Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/LibCallSemantics.cpp
//===- LibCallSemantics.cpp - Describe library semantics ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements interfaces that can be used to describe language // specific runtime library interfaces (e.g. libc, libm, etc) to LLVM // optimizers. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/LibCallSemantics.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/IR/Function.h" using namespace llvm; /// This impl pointer in ~LibCallInfo is actually a StringMap. This /// helper does the cast. static StringMap<const LibCallFunctionInfo*> *getMap(void *Ptr) { return static_cast<StringMap<const LibCallFunctionInfo*> *>(Ptr); } LibCallInfo::~LibCallInfo() { delete getMap(Impl); } const LibCallLocationInfo &LibCallInfo::getLocationInfo(unsigned LocID) const { // Get location info on the first call. if (NumLocations == 0) NumLocations = getLocationInfo(Locations); assert(LocID < NumLocations && "Invalid location ID!"); return Locations[LocID]; } /// Return the LibCallFunctionInfo object corresponding to /// the specified function if we have it. If not, return null. const LibCallFunctionInfo * LibCallInfo::getFunctionInfo(const Function *F) const { StringMap<const LibCallFunctionInfo*> *Map = getMap(Impl); /// If this is the first time we are querying for this info, lazily construct /// the StringMap to index it. if (!Map) { Impl = Map = new StringMap<const LibCallFunctionInfo*>(); const LibCallFunctionInfo *Array = getFunctionInfoArray(); if (!Array) return nullptr; // We now have the array of entries. Populate the StringMap. for (unsigned i = 0; Array[i].Name; ++i) (*Map)[Array[i].Name] = Array+i; } // Look up this function in the string map. return Map->lookup(F->getName()); } /// See if the given exception handling personality function is one that we /// understand. If so, return a description of it; otherwise return Unknown. EHPersonality llvm::classifyEHPersonality(const Value *Pers) { const Function *F = dyn_cast<Function>(Pers->stripPointerCasts()); if (!F) return EHPersonality::Unknown; return StringSwitch<EHPersonality>(F->getName()) .Case("__gnat_eh_personality", EHPersonality::GNU_Ada) .Case("__gxx_personality_v0", EHPersonality::GNU_CXX) .Case("__gcc_personality_v0", EHPersonality::GNU_C) .Case("__objc_personality_v0", EHPersonality::GNU_ObjC) .Case("_except_handler3", EHPersonality::MSVC_X86SEH) .Case("_except_handler4", EHPersonality::MSVC_X86SEH) .Case("__C_specific_handler", EHPersonality::MSVC_Win64SEH) .Case("__CxxFrameHandler3", EHPersonality::MSVC_CXX) .Default(EHPersonality::Unknown); } bool llvm::canSimplifyInvokeNoUnwind(const Function *F) { EHPersonality Personality = classifyEHPersonality(F->getPersonalityFn()); // We can't simplify any invokes to nounwind functions if the personality // function wants to catch asynch exceptions. The nounwind attribute only // implies that the function does not throw synchronous exceptions. return !isAsynchronousEHPersonality(Personality); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/AliasAnalysisEvaluator.cpp
//===- AliasAnalysisEvaluator.cpp - Alias Analysis Accuracy Evaluator -----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a simple N^2 alias analysis accuracy evaluator. // Basically, for each function in the program, it simply queries to see how the // alias analysis implementation answers alias queries between each pair of // pointers in the function. // // This is inspired and adapted from code by: Naveen Neelakantam, Francesco // Spadini, and Wojciech Stryjewski. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; static cl::opt<bool> PrintAll("print-all-alias-modref-info", cl::ReallyHidden); static cl::opt<bool> PrintNoAlias("print-no-aliases", cl::ReallyHidden); static cl::opt<bool> PrintMayAlias("print-may-aliases", cl::ReallyHidden); static cl::opt<bool> PrintPartialAlias("print-partial-aliases", cl::ReallyHidden); static cl::opt<bool> PrintMustAlias("print-must-aliases", cl::ReallyHidden); static cl::opt<bool> PrintNoModRef("print-no-modref", cl::ReallyHidden); static cl::opt<bool> PrintMod("print-mod", cl::ReallyHidden); static cl::opt<bool> PrintRef("print-ref", cl::ReallyHidden); static cl::opt<bool> PrintModRef("print-modref", cl::ReallyHidden); static cl::opt<bool> EvalAAMD("evaluate-aa-metadata", cl::ReallyHidden); namespace { class AAEval : public FunctionPass { unsigned NoAliasCount, MayAliasCount, PartialAliasCount, MustAliasCount; unsigned NoModRefCount, ModCount, RefCount, ModRefCount; public: static char ID; // Pass identification, replacement for typeid AAEval() : FunctionPass(ID) { initializeAAEvalPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AliasAnalysis>(); AU.setPreservesAll(); } bool doInitialization(Module &M) override { NoAliasCount = MayAliasCount = PartialAliasCount = MustAliasCount = 0; NoModRefCount = ModCount = RefCount = ModRefCount = 0; if (PrintAll) { PrintNoAlias = PrintMayAlias = true; PrintPartialAlias = PrintMustAlias = true; PrintNoModRef = PrintMod = PrintRef = PrintModRef = true; } return false; } bool runOnFunction(Function &F) override; bool doFinalization(Module &M) override; }; } char AAEval::ID = 0; INITIALIZE_PASS_BEGIN(AAEval, "aa-eval", "Exhaustive Alias Analysis Precision Evaluator", false, true) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(AAEval, "aa-eval", "Exhaustive Alias Analysis Precision Evaluator", false, true) FunctionPass *llvm::createAAEvalPass() { return new AAEval(); } static void PrintResults(const char *Msg, bool P, const Value *V1, const Value *V2, const Module *M) { if (P) { std::string o1, o2; { raw_string_ostream os1(o1), os2(o2); V1->printAsOperand(os1, true, M); V2->printAsOperand(os2, true, M); } if (o2 < o1) std::swap(o1, o2); errs() << " " << Msg << ":\t" << o1 << ", " << o2 << "\n"; } } static inline void PrintModRefResults(const char *Msg, bool P, Instruction *I, Value *Ptr, Module *M) { if (P) { errs() << " " << Msg << ": Ptr: "; Ptr->printAsOperand(errs(), true, M); errs() << "\t<->" << *I << '\n'; } } static inline void PrintModRefResults(const char *Msg, bool P, CallSite CSA, CallSite CSB, Module *M) { if (P) { errs() << " " << Msg << ": " << *CSA.getInstruction() << " <-> " << *CSB.getInstruction() << '\n'; } } static inline void PrintLoadStoreResults(const char *Msg, bool P, const Value *V1, const Value *V2, const Module *M) { if (P) { errs() << " " << Msg << ": " << *V1 << " <-> " << *V2 << '\n'; } } static inline bool isInterestingPointer(Value *V) { return V->getType()->isPointerTy() && !isa<ConstantPointerNull>(V); } bool AAEval::runOnFunction(Function &F) { AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); SetVector<Value *> Pointers; SetVector<CallSite> CallSites; SetVector<Value *> Loads; SetVector<Value *> Stores; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) if (I->getType()->isPointerTy()) // Add all pointer arguments. Pointers.insert(I); for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { if (I->getType()->isPointerTy()) // Add all pointer instructions. Pointers.insert(&*I); if (EvalAAMD && isa<LoadInst>(&*I)) Loads.insert(&*I); if (EvalAAMD && isa<StoreInst>(&*I)) Stores.insert(&*I); Instruction &Inst = *I; if (auto CS = CallSite(&Inst)) { Value *Callee = CS.getCalledValue(); // Skip actual functions for direct function calls. if (!isa<Function>(Callee) && isInterestingPointer(Callee)) Pointers.insert(Callee); // Consider formals. for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) if (isInterestingPointer(*AI)) Pointers.insert(*AI); CallSites.insert(CS); } else { // Consider all operands. for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end(); OI != OE; ++OI) if (isInterestingPointer(*OI)) Pointers.insert(*OI); } } if (PrintNoAlias || PrintMayAlias || PrintPartialAlias || PrintMustAlias || PrintNoModRef || PrintMod || PrintRef || PrintModRef) errs() << "Function: " << F.getName() << ": " << Pointers.size() << " pointers, " << CallSites.size() << " call sites\n"; // iterate over the worklist, and run the full (n^2)/2 disambiguations for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end(); I1 != E; ++I1) { uint64_t I1Size = MemoryLocation::UnknownSize; Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType(); if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy); for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) { uint64_t I2Size = MemoryLocation::UnknownSize; Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType(); if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy); switch (AA.alias(*I1, I1Size, *I2, I2Size)) { case NoAlias: PrintResults("NoAlias", PrintNoAlias, *I1, *I2, F.getParent()); ++NoAliasCount; break; case MayAlias: PrintResults("MayAlias", PrintMayAlias, *I1, *I2, F.getParent()); ++MayAliasCount; break; case PartialAlias: PrintResults("PartialAlias", PrintPartialAlias, *I1, *I2, F.getParent()); ++PartialAliasCount; break; case MustAlias: PrintResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent()); ++MustAliasCount; break; } } } if (EvalAAMD) { // iterate over all pairs of load, store for (SetVector<Value *>::iterator I1 = Loads.begin(), E = Loads.end(); I1 != E; ++I1) { for (SetVector<Value *>::iterator I2 = Stores.begin(), E2 = Stores.end(); I2 != E2; ++I2) { switch (AA.alias(MemoryLocation::get(cast<LoadInst>(*I1)), MemoryLocation::get(cast<StoreInst>(*I2)))) { case NoAlias: PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2, F.getParent()); ++NoAliasCount; break; case MayAlias: PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2, F.getParent()); ++MayAliasCount; break; case PartialAlias: PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2, F.getParent()); ++PartialAliasCount; break; case MustAlias: PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent()); ++MustAliasCount; break; } } } // iterate over all pairs of store, store for (SetVector<Value *>::iterator I1 = Stores.begin(), E = Stores.end(); I1 != E; ++I1) { for (SetVector<Value *>::iterator I2 = Stores.begin(); I2 != I1; ++I2) { switch (AA.alias(MemoryLocation::get(cast<StoreInst>(*I1)), MemoryLocation::get(cast<StoreInst>(*I2)))) { case NoAlias: PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2, F.getParent()); ++NoAliasCount; break; case MayAlias: PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2, F.getParent()); ++MayAliasCount; break; case PartialAlias: PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2, F.getParent()); ++PartialAliasCount; break; case MustAlias: PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent()); ++MustAliasCount; break; } } } } // Mod/ref alias analysis: compare all pairs of calls and values for (SetVector<CallSite>::iterator C = CallSites.begin(), Ce = CallSites.end(); C != Ce; ++C) { Instruction *I = C->getInstruction(); for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end(); V != Ve; ++V) { uint64_t Size = MemoryLocation::UnknownSize; Type *ElTy = cast<PointerType>((*V)->getType())->getElementType(); if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy); switch (AA.getModRefInfo(*C, *V, Size)) { case AliasAnalysis::NoModRef: PrintModRefResults("NoModRef", PrintNoModRef, I, *V, F.getParent()); ++NoModRefCount; break; case AliasAnalysis::Mod: PrintModRefResults("Just Mod", PrintMod, I, *V, F.getParent()); ++ModCount; break; case AliasAnalysis::Ref: PrintModRefResults("Just Ref", PrintRef, I, *V, F.getParent()); ++RefCount; break; case AliasAnalysis::ModRef: PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent()); ++ModRefCount; break; } } } // Mod/ref alias analysis: compare all pairs of calls for (SetVector<CallSite>::iterator C = CallSites.begin(), Ce = CallSites.end(); C != Ce; ++C) { for (SetVector<CallSite>::iterator D = CallSites.begin(); D != Ce; ++D) { if (D == C) continue; switch (AA.getModRefInfo(*C, *D)) { case AliasAnalysis::NoModRef: PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent()); ++NoModRefCount; break; case AliasAnalysis::Mod: PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent()); ++ModCount; break; case AliasAnalysis::Ref: PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent()); ++RefCount; break; case AliasAnalysis::ModRef: PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent()); ++ModRefCount; break; } } } return false; } static void PrintPercent(unsigned Num, unsigned Sum) { errs() << "(" << Num*100ULL/Sum << "." << ((Num*1000ULL/Sum) % 10) << "%)\n"; } bool AAEval::doFinalization(Module &M) { unsigned AliasSum = NoAliasCount + MayAliasCount + PartialAliasCount + MustAliasCount; errs() << "===== Alias Analysis Evaluator Report =====\n"; if (AliasSum == 0) { errs() << " Alias Analysis Evaluator Summary: No pointers!\n"; } else { errs() << " " << AliasSum << " Total Alias Queries Performed\n"; errs() << " " << NoAliasCount << " no alias responses "; PrintPercent(NoAliasCount, AliasSum); errs() << " " << MayAliasCount << " may alias responses "; PrintPercent(MayAliasCount, AliasSum); errs() << " " << PartialAliasCount << " partial alias responses "; PrintPercent(PartialAliasCount, AliasSum); errs() << " " << MustAliasCount << " must alias responses "; PrintPercent(MustAliasCount, AliasSum); errs() << " Alias Analysis Evaluator Pointer Alias Summary: " << NoAliasCount * 100 / AliasSum << "%/" << MayAliasCount * 100 / AliasSum << "%/" << PartialAliasCount * 100 / AliasSum << "%/" << MustAliasCount * 100 / AliasSum << "%\n"; } // Display the summary for mod/ref analysis unsigned ModRefSum = NoModRefCount + ModCount + RefCount + ModRefCount; if (ModRefSum == 0) { errs() << " Alias Analysis Mod/Ref Evaluator Summary: no " "mod/ref!\n"; } else { errs() << " " << ModRefSum << " Total ModRef Queries Performed\n"; errs() << " " << NoModRefCount << " no mod/ref responses "; PrintPercent(NoModRefCount, ModRefSum); errs() << " " << ModCount << " mod responses "; PrintPercent(ModCount, ModRefSum); errs() << " " << RefCount << " ref responses "; PrintPercent(RefCount, ModRefSum); errs() << " " << ModRefCount << " mod & ref responses "; PrintPercent(ModRefCount, ModRefSum); errs() << " Alias Analysis Evaluator Mod/Ref Summary: " << NoModRefCount * 100 / ModRefSum << "%/" << ModCount * 100 / ModRefSum << "%/" << RefCount * 100 / ModRefSum << "%/" << ModRefCount * 100 / ModRefSum << "%\n"; } return false; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/CMakeLists.txt
set(LLVM_OPTIONAL_SOURCES regioninfo.cpp regionprinter.cpp) # HLSL Change - ignore file add_llvm_library(LLVMAnalysis AliasAnalysis.cpp AliasAnalysisCounter.cpp AliasAnalysisEvaluator.cpp AliasDebugger.cpp AliasSetTracker.cpp Analysis.cpp AssumptionCache.cpp BasicAliasAnalysis.cpp BlockFrequencyInfo.cpp BlockFrequencyInfoImpl.cpp BranchProbabilityInfo.cpp CFG.cpp CFGPrinter.cpp CFLAliasAnalysis.cpp CGSCCPassManager.cpp CaptureTracking.cpp CostModel.cpp CodeMetrics.cpp ConstantFolding.cpp Delinearization.cpp DependenceAnalysis.cpp DivergenceAnalysis.cpp DomPrinter.cpp DominanceFrontier.cpp DxilConstantFolding.cpp DxilConstantFoldingExt.cpp DxilSimplify.cpp DxilValueCache.cpp IVUsers.cpp InstCount.cpp InstructionSimplify.cpp Interval.cpp IntervalPartition.cpp IteratedDominanceFrontier.cpp LazyCallGraph.cpp LazyValueInfo.cpp LibCallAliasAnalysis.cpp LibCallSemantics.cpp Lint.cpp Loads.cpp LoopAccessAnalysis.cpp LoopInfo.cpp LoopPass.cpp MemDepPrinter.cpp MemDerefPrinter.cpp MemoryBuiltins.cpp MemoryDependenceAnalysis.cpp MemoryLocation.cpp ModuleDebugInfoPrinter.cpp NoAliasAnalysis.cpp PHITransAddr.cpp PostDominators.cpp PtrUseVisitor.cpp ReducibilityAnalysis.cpp regioninfo.cpp RegionPass.cpp regionprinter.cpp ScalarEvolution.cpp ScalarEvolutionAliasAnalysis.cpp ScalarEvolutionExpander.cpp ScalarEvolutionNormalization.cpp SparsePropagation.cpp TargetLibraryInfo.cpp TargetTransformInfo.cpp Trace.cpp TypeBasedAliasAnalysis.cpp ScopedNoAliasAA.cpp ValueTracking.cpp VectorUtils.cpp VectorUtils2.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Analysis ) add_dependencies(LLVMAnalysis intrinsics_gen) add_subdirectory(IPA)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/MemoryBuiltins.cpp
//===------ MemoryBuiltins.cpp - Identify calls to memory builtins --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This family of functions identifies calls to builtin functions that allocate // or free memory. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "memory-builtins" enum AllocType { OpNewLike = 1<<0, // allocates; never returns null MallocLike = 1<<1 | OpNewLike, // allocates; may return null CallocLike = 1<<2, // allocates + bzero ReallocLike = 1<<3, // reallocates StrDupLike = 1<<4, AllocLike = MallocLike | CallocLike | StrDupLike, AnyAlloc = AllocLike | ReallocLike }; struct AllocFnsTy { LibFunc::Func Func; AllocType AllocTy; unsigned char NumParams; // First and Second size parameters (or -1 if unused) signed char FstParam, SndParam; }; // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to // know which functions are nounwind, noalias, nocapture parameters, etc. static const AllocFnsTy AllocationFnData[] = { {LibFunc::malloc, MallocLike, 1, 0, -1}, {LibFunc::valloc, MallocLike, 1, 0, -1}, {LibFunc::Znwj, OpNewLike, 1, 0, -1}, // new(unsigned int) {LibFunc::ZnwjRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned int, nothrow) {LibFunc::Znwm, OpNewLike, 1, 0, -1}, // new(unsigned long) {LibFunc::ZnwmRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned long, nothrow) {LibFunc::Znaj, OpNewLike, 1, 0, -1}, // new[](unsigned int) {LibFunc::ZnajRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow) {LibFunc::Znam, OpNewLike, 1, 0, -1}, // new[](unsigned long) {LibFunc::ZnamRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow) {LibFunc::calloc, CallocLike, 2, 0, 1}, {LibFunc::realloc, ReallocLike, 2, 1, -1}, {LibFunc::reallocf, ReallocLike, 2, 1, -1}, {LibFunc::strdup, StrDupLike, 1, -1, -1}, {LibFunc::strndup, StrDupLike, 2, 1, -1} // TODO: Handle "int posix_memalign(void **, size_t, size_t)" }; static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) { if (LookThroughBitCast) V = V->stripPointerCasts(); CallSite CS(const_cast<Value*>(V)); if (!CS.getInstruction()) return nullptr; if (CS.isNoBuiltin()) return nullptr; Function *Callee = CS.getCalledFunction(); if (!Callee || !Callee->isDeclaration()) return nullptr; return Callee; } /// \brief Returns the allocation data for the given value if it is a call to a /// known allocation function, and NULL otherwise. static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy, const TargetLibraryInfo *TLI, bool LookThroughBitCast = false) { // Skip intrinsics if (isa<IntrinsicInst>(V)) return nullptr; Function *Callee = getCalledFunction(V, LookThroughBitCast); if (!Callee) return nullptr; // Make sure that the function is available. StringRef FnName = Callee->getName(); LibFunc::Func TLIFn; if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn)) return nullptr; unsigned i = 0; bool found = false; for ( ; i < array_lengthof(AllocationFnData); ++i) { if (AllocationFnData[i].Func == TLIFn) { found = true; break; } } if (!found) return nullptr; const AllocFnsTy *FnData = &AllocationFnData[i]; if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) return nullptr; // Check function prototype. int FstParam = FnData->FstParam; int SndParam = FnData->SndParam; FunctionType *FTy = Callee->getFunctionType(); if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && FTy->getNumParams() == FnData->NumParams && (FstParam < 0 || (FTy->getParamType(FstParam)->isIntegerTy(32) || FTy->getParamType(FstParam)->isIntegerTy(64))) && (SndParam < 0 || FTy->getParamType(SndParam)->isIntegerTy(32) || FTy->getParamType(SndParam)->isIntegerTy(64))) return FnData; return nullptr; } static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) { ImmutableCallSite CS(LookThroughBitCast ? V->stripPointerCasts() : V); return CS && CS.hasFnAttr(Attribute::NoAlias); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup /// like). bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a function that returns a /// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions). bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { // it's safe to consider realloc as noalias since accessing the original // pointer is undefined behavior return isAllocationFn(V, TLI, LookThroughBitCast) || hasNoAliasAttr(V, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates uninitialized memory (such as malloc). bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, MallocLike, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates zero-filled memory (such as calloc). bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, CallocLike, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates memory (either malloc, calloc, or strdup like). bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, AllocLike, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// reallocates memory (such as realloc). bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates memory and never returns null (such as operator new). bool llvm::isOperatorNewLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast) { return getAllocationData(V, OpNewLike, TLI, LookThroughBitCast); } /// extractMallocCall - Returns the corresponding CallInst if the instruction /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we /// ignore InvokeInst here. const CallInst *llvm::extractMallocCall(const Value *I, const TargetLibraryInfo *TLI) { return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : nullptr; } static Value *computeArraySize(const CallInst *CI, const DataLayout &DL, const TargetLibraryInfo *TLI, bool LookThroughSExt = false) { if (!CI) return nullptr; // The size of the malloc's result type must be known to determine array size. Type *T = getMallocAllocatedType(CI, TLI); if (!T || !T->isSized()) return nullptr; unsigned ElementSize = DL.getTypeAllocSize(T); if (StructType *ST = dyn_cast<StructType>(T)) ElementSize = DL.getStructLayout(ST)->getSizeInBytes(); // If malloc call's arg can be determined to be a multiple of ElementSize, // return the multiple. Otherwise, return NULL. Value *MallocArg = CI->getArgOperand(0); Value *Multiple = nullptr; if (ComputeMultiple(MallocArg, ElementSize, Multiple, LookThroughSExt)) return Multiple; return nullptr; } /// getMallocType - Returns the PointerType resulting from the malloc call. /// The PointerType depends on the number of bitcast uses of the malloc call: /// 0: PointerType is the calls' return type. /// 1: PointerType is the bitcast's result type. /// >1: Unique PointerType cannot be determined, return NULL. PointerType *llvm::getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI) { assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call"); PointerType *MallocType = nullptr; unsigned NumOfBitCastUses = 0; // Determine if CallInst has a bitcast use. for (Value::const_user_iterator UI = CI->user_begin(), E = CI->user_end(); UI != E;) if (const BitCastInst *BCI = dyn_cast<BitCastInst>(*UI++)) { MallocType = cast<PointerType>(BCI->getDestTy()); NumOfBitCastUses++; } // Malloc call has 1 bitcast use, so type is the bitcast's destination type. if (NumOfBitCastUses == 1) return MallocType; // Malloc call was not bitcast, so type is the malloc function's return type. if (NumOfBitCastUses == 0) return cast<PointerType>(CI->getType()); // Type could not be determined. return nullptr; } /// getMallocAllocatedType - Returns the Type allocated by malloc call. /// The Type depends on the number of bitcast uses of the malloc call: /// 0: PointerType is the malloc calls' return type. /// 1: PointerType is the bitcast's result type. /// >1: Unique PointerType cannot be determined, return NULL. Type *llvm::getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI) { PointerType *PT = getMallocType(CI, TLI); return PT ? PT->getElementType() : nullptr; } /// getMallocArraySize - Returns the array size of a malloc call. If the /// argument passed to malloc is a multiple of the size of the malloced type, /// then return that multiple. For non-array mallocs, the multiple is /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// determined. Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout &DL, const TargetLibraryInfo *TLI, bool LookThroughSExt) { assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call"); return computeArraySize(CI, DL, TLI, LookThroughSExt); } /// extractCallocCall - Returns the corresponding CallInst if the instruction /// is a calloc call. const CallInst *llvm::extractCallocCall(const Value *I, const TargetLibraryInfo *TLI) { return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : nullptr; } /// isFreeCall - Returns non-null if the value is a call to the builtin free() const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { const CallInst *CI = dyn_cast<CallInst>(I); if (!CI || isa<IntrinsicInst>(CI)) return nullptr; Function *Callee = CI->getCalledFunction(); if (Callee == nullptr) return nullptr; StringRef FnName = Callee->getName(); LibFunc::Func TLIFn; if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn)) return nullptr; unsigned ExpectedNumParams; if (TLIFn == LibFunc::free || TLIFn == LibFunc::ZdlPv || // operator delete(void*) TLIFn == LibFunc::ZdaPv) // operator delete[](void*) ExpectedNumParams = 1; else if (TLIFn == LibFunc::ZdlPvj || // delete(void*, uint) TLIFn == LibFunc::ZdlPvm || // delete(void*, ulong) TLIFn == LibFunc::ZdlPvRKSt9nothrow_t || // delete(void*, nothrow) TLIFn == LibFunc::ZdaPvj || // delete[](void*, uint) TLIFn == LibFunc::ZdaPvm || // delete[](void*, ulong) TLIFn == LibFunc::ZdaPvRKSt9nothrow_t) // delete[](void*, nothrow) ExpectedNumParams = 2; else return nullptr; // Check free prototype. // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin // attribute will exist. FunctionType *FTy = Callee->getFunctionType(); if (!FTy->getReturnType()->isVoidTy()) return nullptr; if (FTy->getNumParams() != ExpectedNumParams) return nullptr; if (FTy->getParamType(0) != Type::getInt8PtrTy(Callee->getContext())) return nullptr; return CI; } //===----------------------------------------------------------------------===// // Utility functions to compute size of objects. // /// \brief Compute the size of the object pointed by Ptr. Returns true and the /// object size in Size if successful, and false otherwise. /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, /// byval arguments, and global variables. bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, bool RoundToAlign) { ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), RoundToAlign); SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); if (!Visitor.bothKnown(Data)) return false; APInt ObjSize = Data.first, Offset = Data.second; // check for overflow if (Offset.slt(0) || ObjSize.ult(Offset)) Size = 0; else Size = (ObjSize - Offset).getZExtValue(); return true; } STATISTIC(ObjectVisitorArgument, "Number of arguments with unsolved size and offset"); STATISTIC(ObjectVisitorLoad, "Number of load instructions with unsolved size and offset"); APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) { if (RoundToAlign && Align) return APInt(IntTyBits, RoundUpToAlignment(Size.getZExtValue(), Align)); return Size; } ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, bool RoundToAlign) : DL(DL), TLI(TLI), RoundToAlign(RoundToAlign) { // Pointer size must be rechecked for each object visited since it could have // a different address space. } SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { IntTyBits = DL.getPointerTypeSizeInBits(V->getType()); Zero = APInt::getNullValue(IntTyBits); V = V->stripPointerCasts(); if (Instruction *I = dyn_cast<Instruction>(V)) { // If we have already seen this instruction, bail out. Cycles can happen in // unreachable code after constant propagation. if (!SeenInsts.insert(I).second) return unknown(); if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) return visitGEPOperator(*GEP); return visit(*I); } if (Argument *A = dyn_cast<Argument>(V)) return visitArgument(*A); if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) return visitConstantPointerNull(*P); if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) return visitGlobalAlias(*GA); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) return visitGlobalVariable(*GV); if (UndefValue *UV = dyn_cast<UndefValue>(V)) return visitUndefValue(*UV); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { if (CE->getOpcode() == Instruction::IntToPtr) return unknown(); // clueless if (CE->getOpcode() == Instruction::GetElementPtr) return visitGEPOperator(cast<GEPOperator>(*CE)); } DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V << '\n'); return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { if (!I.getAllocatedType()->isSized()) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); if (!I.isArrayAllocation()) return std::make_pair(align(Size, I.getAlignment()), Zero); Value *ArraySize = I.getArraySize(); if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { Size *= C->getValue().zextOrSelf(IntTyBits); return std::make_pair(align(Size, I.getAlignment()), Zero); } return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { // no interprocedural analysis is done at the moment if (!A.hasByValOrInAllocaAttr()) { ++ObjectVisitorArgument; return unknown(); } PointerType *PT = cast<PointerType>(A.getType()); APInt Size(IntTyBits, DL.getTypeAllocSize(PT->getElementType())); return std::make_pair(align(Size, A.getParamAlignment()), Zero); } SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) { const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc, TLI); if (!FnData) return unknown(); // handle strdup-like functions separately if (FnData->AllocTy == StrDupLike) { APInt Size(IntTyBits, GetStringLength(CS.getArgument(0))); if (!Size) return unknown(); // strndup limits strlen if (FnData->FstParam > 0) { ConstantInt *Arg= dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam)); if (!Arg) return unknown(); APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits); if (Size.ugt(MaxSize)) Size = MaxSize + 1; } return std::make_pair(Size, Zero); } ConstantInt *Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->FstParam)); if (!Arg) return unknown(); APInt Size = Arg->getValue().zextOrSelf(IntTyBits); // size determined by just 1 parameter if (FnData->SndParam < 0) return std::make_pair(Size, Zero); Arg = dyn_cast<ConstantInt>(CS.getArgument(FnData->SndParam)); if (!Arg) return unknown(); Size *= Arg->getValue().zextOrSelf(IntTyBits); return std::make_pair(Size, Zero); // TODO: handle more standard functions (+ wchar cousins): // - strdup / strndup // - strcpy / strncpy // - strcat / strncat // - memcpy / memmove // - strcat / strncat // - memset } SizeOffsetType ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull&) { return std::make_pair(Zero, Zero); } SizeOffsetType ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { // Easy cases were already folded by previous passes. return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) { SizeOffsetType PtrData = compute(GEP.getPointerOperand()); APInt Offset(IntTyBits, 0); if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset)) return unknown(); return std::make_pair(PtrData.first, PtrData.second + Offset); } SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { if (GA.mayBeOverridden()) return unknown(); return compute(GA.getAliasee()); } SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ if (!GV.hasDefinitiveInitializer()) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getType()->getElementType())); return std::make_pair(align(Size, GV.getAlignment()), Zero); } SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { // clueless return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) { ++ObjectVisitorLoad; return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { // too complex to analyze statically. return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { SizeOffsetType TrueSide = compute(I.getTrueValue()); SizeOffsetType FalseSide = compute(I.getFalseValue()); if (bothKnown(TrueSide) && bothKnown(FalseSide) && TrueSide == FalseSide) return TrueSide; return unknown(); } SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { return std::make_pair(Zero, Zero); } SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I << '\n'); return unknown(); } ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, bool RoundToAlign) : DL(DL), TLI(TLI), Context(Context), Builder(Context, TargetFolder(DL)), RoundToAlign(RoundToAlign) { // IntTy and Zero must be set for each compute() since the address space may // be different for later objects. } SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { // XXX - Are vectors of pointers possible here? IntTy = cast<IntegerType>(DL.getIntPtrType(V->getType())); Zero = ConstantInt::get(IntTy, 0); SizeOffsetEvalType Result = compute_(V); if (!bothKnown(Result)) { // erase everything that was computed in this iteration from the cache, so // that no dangling references are left behind. We could be a bit smarter if // we kept a dependency graph. It's probably not worth the complexity. for (PtrSetTy::iterator I=SeenVals.begin(), E=SeenVals.end(); I != E; ++I) { CacheMapTy::iterator CacheIt = CacheMap.find(*I); // non-computable results can be safely cached if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) CacheMap.erase(CacheIt); } } SeenVals.clear(); return Result; } SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, RoundToAlign); SizeOffsetType Const = Visitor.compute(V); if (Visitor.bothKnown(Const)) return std::make_pair(ConstantInt::get(Context, Const.first), ConstantInt::get(Context, Const.second)); V = V->stripPointerCasts(); // check cache CacheMapTy::iterator CacheIt = CacheMap.find(V); if (CacheIt != CacheMap.end()) return CacheIt->second; // always generate code immediately before the instruction being // processed, so that the generated code dominates the same BBs Instruction *PrevInsertPoint = Builder.GetInsertPoint(); if (Instruction *I = dyn_cast<Instruction>(V)) Builder.SetInsertPoint(I); // now compute the size and offset SizeOffsetEvalType Result; // Record the pointers that were handled in this run, so that they can be // cleaned later if something fails. We also use this set to break cycles that // can occur in dead code. if (!SeenVals.insert(V).second) { Result = unknown(); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { Result = visitGEPOperator(*GEP); } else if (Instruction *I = dyn_cast<Instruction>(V)) { Result = visit(*I); } else if (isa<Argument>(V) || (isa<ConstantExpr>(V) && cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || isa<GlobalAlias>(V) || isa<GlobalVariable>(V)) { // ignore values where we cannot do more than what ObjectSizeVisitor can Result = unknown(); } else { DEBUG(dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V << '\n'); Result = unknown(); } if (PrevInsertPoint) Builder.SetInsertPoint(PrevInsertPoint); // Don't reuse CacheIt since it may be invalid at this point. CacheMap[V] = Result; return Result; } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { if (!I.getAllocatedType()->isSized()) return unknown(); // must be a VLA assert(I.isArrayAllocation()); Value *ArraySize = I.getArraySize(); Value *Size = ConstantInt::get(ArraySize->getType(), DL.getTypeAllocSize(I.getAllocatedType())); Size = Builder.CreateMul(Size, ArraySize); return std::make_pair(Size, Zero); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) { const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc, TLI); if (!FnData) return unknown(); // handle strdup-like functions separately if (FnData->AllocTy == StrDupLike) { // TODO return unknown(); } Value *FirstArg = CS.getArgument(FnData->FstParam); FirstArg = Builder.CreateZExt(FirstArg, IntTy); if (FnData->SndParam < 0) return std::make_pair(FirstArg, Zero); Value *SecondArg = CS.getArgument(FnData->SndParam); SecondArg = Builder.CreateZExt(SecondArg, IntTy); Value *Size = Builder.CreateMul(FirstArg, SecondArg); return std::make_pair(Size, Zero); // TODO: handle more standard functions (+ wchar cousins): // - strdup / strndup // - strcpy / strncpy // - strcat / strncat // - memcpy / memmove // - strcat / strncat // - memset } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { return unknown(); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { return unknown(); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); if (!bothKnown(PtrData)) return unknown(); Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); Offset = Builder.CreateAdd(PtrData.second, Offset); return std::make_pair(PtrData.first, Offset); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { // clueless return unknown(); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) { return unknown(); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { // create 2 PHIs: one for size and another for offset PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); // insert right away in the cache to handle recursive PHIs CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); // compute offset/size for each PHI incoming pointer for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { Builder.SetInsertPoint(PHI.getIncomingBlock(i)->getFirstInsertionPt()); SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); if (!bothKnown(EdgeData)) { OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy)); OffsetPHI->eraseFromParent(); SizePHI->replaceAllUsesWith(UndefValue::get(IntTy)); SizePHI->eraseFromParent(); return unknown(); } SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); } Value *Size = SizePHI, *Offset = OffsetPHI, *Tmp; if ((Tmp = SizePHI->hasConstantValue())) { Size = Tmp; SizePHI->replaceAllUsesWith(Size); SizePHI->eraseFromParent(); } if ((Tmp = OffsetPHI->hasConstantValue())) { Offset = Tmp; OffsetPHI->replaceAllUsesWith(Offset); OffsetPHI->eraseFromParent(); } return std::make_pair(Size, Offset); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) return unknown(); if (TrueSide == FalseSide) return TrueSide; Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, FalseSide.first); Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, FalseSide.second); return std::make_pair(Size, Offset); } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I <<'\n'); return unknown(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/ValueTracking.cpp
//===- ValueTracking.cpp - Walk computations to compute properties --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains routines that help analyze properties that chains of // computations have. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/ValueTracking.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Statepoint.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include <cstring> using namespace llvm; using namespace llvm::PatternMatch; const unsigned MaxDepth = 6; #if 0 // HLSL Change Starts - option pending /// Enable an experimental feature to leverage information about dominating /// conditions to compute known bits. The individual options below control how /// hard we search. The defaults are choosen to be fairly aggressive. If you /// run into compile time problems when testing, scale them back and report /// your findings. static cl::opt<bool> EnableDomConditions("value-tracking-dom-conditions", cl::Hidden, cl::init(false)); // This is expensive, so we only do it for the top level query value. // (TODO: evaluate cost vs profit, consider higher thresholds) static cl::opt<unsigned> DomConditionsMaxDepth("dom-conditions-max-depth", cl::Hidden, cl::init(1)); /// How many dominating blocks should be scanned looking for dominating /// conditions? static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks", cl::Hidden, cl::init(20000)); // Controls the number of uses of the value searched for possible // dominating comparisons. static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(2000)); // If true, don't consider only compares whose only use is a branch. static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use", cl::Hidden, cl::init(false)); #else static const bool EnableDomConditions = false; static const unsigned DomConditionsMaxDepth = 1; static const unsigned DomConditionsMaxDomBlocks = 2000; static const unsigned DomConditionsMaxUses = 2000; static const bool DomConditionsSingleCmpUse = false; #endif // HLSL Change Ends /// Returns the bitwidth of the given scalar or pointer type (if unknown returns /// 0). For vector types, returns the element type's bitwidth. static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { if (unsigned BitWidth = Ty->getScalarSizeInBits()) return BitWidth; return DL.getPointerTypeSizeInBits(Ty); } // Many of these functions have internal versions that take an assumption // exclusion set. This is because of the potential for mutual recursion to // cause computeKnownBits to repeatedly visit the same assume intrinsic. The // classic case of this is assume(x = y), which will attempt to determine // bits in x from bits in y, which will attempt to determine bits in y from // bits in x, etc. Regarding the mutual recursion, computeKnownBits can call // isKnownNonZero, which calls computeKnownBits and ComputeSignBit and // isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so on. typedef SmallPtrSet<const Value *, 8> ExclInvsSet; namespace { // Simplifying using an assume can only be done in a particular control-flow // context (the context instruction provides that context). If an assume and // the context instruction are not in the same block then the DT helps in // figuring out if we can use it. struct Query { ExclInvsSet ExclInvs; AssumptionCache *AC; const Instruction *CxtI; const DominatorTree *DT; Query(AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr) : AC(AC), CxtI(CxtI), DT(DT) {} Query(const Query &Q, const Value *NewExcl) : ExclInvs(Q.ExclInvs), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT) { ExclInvs.insert(NewExcl); } }; } // end anonymous namespace // Given the provided Value and, potentially, a context instruction, return // the preferred context instruction (if any). static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { // If we've been provided with a context instruction, then use that (provided // it has been inserted). if (CxtI && CxtI->getParent()) return CxtI; // If the value is really an already-inserted instruction, then use that. CxtI = dyn_cast<Instruction>(V); if (CxtI && CxtI->getParent()) return CxtI; return nullptr; } static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q); void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { ::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); } bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { assert(LHS->getType() == RHS->getType() && "LHS and RHS should have the same type"); assert(LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers"); IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0); APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0); computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT); computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT); return (LHSKnownZero | RHSKnownZero).isAllOnesValue(); } static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q); void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { ::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); } static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, const Query &Q, const DataLayout &DL); bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, Query(AC, safeCxtI(V, CxtI), DT), DL); } static bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, const Query &Q); bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); } static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth, const Query &Q); bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { return ::MaskedValueIsZero(V, Mask, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); } static unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth, const Query &Q); unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { return ::ComputeNumSignBits(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); } static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, APInt &KnownOne2, const DataLayout &DL, unsigned Depth, const Query &Q) { if (!Add) { if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { // We know that the top bits of C-X are clear if X contains less bits // than C (i.e. no wrap-around can happen). For example, 20-X is // positive if we can prove that X is >= 0 and < 16. if (!CLHS->getValue().isNegative()) { unsigned BitWidth = KnownZero.getBitWidth(); unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); // NLZ can't be BitWidth with no sign bit APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q); // If all of the MaskV bits are known to be zero, then we know the // output top bits are zero, because we now know that the output is // from [0-C]. if ((KnownZero2 & MaskV) == MaskV) { unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); // Top bits known zero. KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); } } } } unsigned BitWidth = KnownZero.getBitWidth(); // If an initial sequence of bits in the result is not needed, the // corresponding bits in the operands are not needed. APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, DL, Depth + 1, Q); computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q); // Carry in a 1 for a subtract, rather than a 0. APInt CarryIn(BitWidth, 0); if (!Add) { // Sum = LHS + ~RHS + 1 std::swap(KnownZero2, KnownOne2); CarryIn.setBit(0); } APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn; APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn; // Compute known bits of the carry. APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2); APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2; // Compute set of known bits (where all three relevant bits are known). APInt LHSKnown = LHSKnownZero | LHSKnownOne; APInt RHSKnown = KnownZero2 | KnownOne2; APInt CarryKnown = CarryKnownZero | CarryKnownOne; APInt Known = LHSKnown & RHSKnown & CarryKnown; assert((PossibleSumZero & Known) == (PossibleSumOne & Known) && "known bits of sum differ"); // Compute known bits of the result. KnownZero = ~PossibleSumOne & Known; KnownOne = PossibleSumOne & Known; // Are we still trying to solve for the sign bit? if (!Known.isNegative()) { if (NSW) { // Adding two non-negative numbers, or subtracting a negative number from // a non-negative one, can't wrap into negative. if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) KnownZero |= APInt::getSignBit(BitWidth); // Adding two negative numbers, or subtracting a non-negative number from // a negative one, can't wrap into non-negative. else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) KnownOne |= APInt::getSignBit(BitWidth); } } } static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, APInt &KnownOne2, const DataLayout &DL, unsigned Depth, const Query &Q) { unsigned BitWidth = KnownZero.getBitWidth(); computeKnownBits(Op1, KnownZero, KnownOne, DL, Depth + 1, Q); computeKnownBits(Op0, KnownZero2, KnownOne2, DL, Depth + 1, Q); bool isKnownNegative = false; bool isKnownNonNegative = false; // If the multiplication is known not to overflow, compute the sign bit. if (NSW) { if (Op0 == Op1) { // The product of a number with itself is non-negative. isKnownNonNegative = true; } else { bool isKnownNonNegativeOp1 = KnownZero.isNegative(); bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); bool isKnownNegativeOp1 = KnownOne.isNegative(); bool isKnownNegativeOp0 = KnownOne2.isNegative(); // The product of two numbers with the same sign is non-negative. isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); // The product of a negative number and a non-negative number is either // negative or zero. if (!isKnownNonNegative) isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && isKnownNonZero(Op0, DL, Depth, Q)) || (isKnownNegativeOp0 && isKnownNonNegativeOp1 && isKnownNonZero(Op1, DL, Depth, Q)); } } // If low bits are zero in either operand, output low known-0 bits. // Also compute a conserative estimate for high known-0 bits. // More trickiness is possible, but this is sufficient for the // interesting case of alignment computation. KnownOne.clearAllBits(); unsigned TrailZ = KnownZero.countTrailingOnes() + KnownZero2.countTrailingOnes(); unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + KnownZero2.countLeadingOnes(), BitWidth) - BitWidth; TrailZ = std::min(TrailZ, BitWidth); LeadZ = std::min(LeadZ, BitWidth); KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | APInt::getHighBitsSet(BitWidth, LeadZ); // Only make use of no-wrap flags if we failed to compute the sign bit // directly. This matters if the multiplication always overflows, in // which case we prefer to follow the result of the direct computation, // though as the program is invoking undefined behaviour we can choose // whatever we like here. if (isKnownNonNegative && !KnownOne.isNegative()) KnownZero.setBit(BitWidth - 1); else if (isKnownNegative && !KnownZero.isNegative()) KnownOne.setBit(BitWidth - 1); } void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, APInt &KnownZero) { unsigned BitWidth = KnownZero.getBitWidth(); unsigned NumRanges = Ranges.getNumOperands() / 2; assert(NumRanges >= 1); // Use the high end of the ranges to find leading zeros. unsigned MinLeadingZeros = BitWidth; for (unsigned i = 0; i < NumRanges; ++i) { ConstantInt *Lower = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); ConstantInt *Upper = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); ConstantRange Range(Lower->getValue(), Upper->getValue()); if (Range.isWrappedSet()) MinLeadingZeros = 0; // -1 has no zeros unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros(); MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros); } KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros); } static bool isEphemeralValueOf(Instruction *I, const Value *E) { SmallVector<const Value *, 16> WorkSet(1, I); SmallPtrSet<const Value *, 32> Visited; SmallPtrSet<const Value *, 16> EphValues; while (!WorkSet.empty()) { const Value *V = WorkSet.pop_back_val(); if (!Visited.insert(V).second) continue; // If all uses of this value are ephemeral, then so is this value. bool FoundNEUse = false; for (const User *I : V->users()) if (!EphValues.count(I)) { FoundNEUse = true; break; } if (!FoundNEUse) { if (V == E) return true; EphValues.insert(V); if (const User *U = dyn_cast<User>(V)) for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); J != JE; ++J) { if (isSafeToSpeculativelyExecute(*J)) WorkSet.push_back(*J); } } } return false; } // Is this an intrinsic that cannot be speculated but also cannot trap? static bool isAssumeLikeIntrinsic(const Instruction *I) { if (const CallInst *CI = dyn_cast<CallInst>(I)) if (Function *F = CI->getCalledFunction()) switch (F->getIntrinsicID()) { default: break; // FIXME: This list is repeated from NoTTI::getIntrinsicCost. case Intrinsic::assume: case Intrinsic::dbg_declare: case Intrinsic::dbg_value: case Intrinsic::invariant_start: case Intrinsic::invariant_end: case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: case Intrinsic::objectsize: case Intrinsic::ptr_annotation: case Intrinsic::var_annotation: return true; } return false; } static bool isValidAssumeForContext(Value *V, const Query &Q) { Instruction *Inv = cast<Instruction>(V); // There are two restrictions on the use of an assume: // 1. The assume must dominate the context (or the control flow must // reach the assume whenever it reaches the context). // 2. The context must not be in the assume's set of ephemeral values // (otherwise we will use the assume to prove that the condition // feeding the assume is trivially true, thus causing the removal of // the assume). if (Q.DT) { if (Q.DT->dominates(Inv, Q.CxtI)) { return true; } else if (Inv->getParent() == Q.CxtI->getParent()) { // The context comes first, but they're both in the same block. Make sure // there is nothing in between that might interrupt the control flow. for (BasicBlock::const_iterator I = std::next(BasicBlock::const_iterator(Q.CxtI)), IE(Inv); I != IE; ++I) if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I)) return false; return !isEphemeralValueOf(Inv, Q.CxtI); } return false; } // When we don't have a DT, we do a limited search... if (Inv->getParent() == Q.CxtI->getParent()->getSinglePredecessor()) { return true; } else if (Inv->getParent() == Q.CxtI->getParent()) { // Search forward from the assume until we reach the context (or the end // of the block); the common case is that the assume will come first. for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)), IE = Inv->getParent()->end(); I != IE; ++I) if (I == Q.CxtI) return true; // The context must come first... for (BasicBlock::const_iterator I = std::next(BasicBlock::const_iterator(Q.CxtI)), IE(Inv); I != IE; ++I) if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I)) return false; return !isEphemeralValueOf(Inv, Q.CxtI); } return false; } bool llvm::isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT) { return ::isValidAssumeForContext(const_cast<Instruction *>(I), Query(nullptr, CxtI, DT)); } template<typename LHS, typename RHS> inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>, CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>> m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L)); } template<typename LHS, typename RHS> inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>, BinaryOp_match<RHS, LHS, Instruction::And>> m_c_And(const LHS &L, const RHS &R) { return m_CombineOr(m_And(L, R), m_And(R, L)); } template<typename LHS, typename RHS> inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>, BinaryOp_match<RHS, LHS, Instruction::Or>> m_c_Or(const LHS &L, const RHS &R) { return m_CombineOr(m_Or(L, R), m_Or(R, L)); } template<typename LHS, typename RHS> inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>, BinaryOp_match<RHS, LHS, Instruction::Xor>> m_c_Xor(const LHS &L, const RHS &R) { return m_CombineOr(m_Xor(L, R), m_Xor(R, L)); } /// Compute known bits in 'V' under the assumption that the condition 'Cmp' is /// true (at the context instruction.) This is mostly a utility function for /// the prototype dominating conditions reasoning below. static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q) { Value *LHS = Cmp->getOperand(0); Value *RHS = Cmp->getOperand(1); // TODO: We could potentially be more aggressive here. This would be worth // evaluating. If we can, explore commoning this code with the assume // handling logic. if (LHS != V && RHS != V) return; const unsigned BitWidth = KnownZero.getBitWidth(); switch (Cmp->getPredicate()) { default: // We know nothing from this condition break; // TODO: implement unsigned bound from below (known one bits) // TODO: common condition check implementations with assumes // TODO: implement other patterns from assume (e.g. V & B == A) case ICmpInst::ICMP_SGT: if (LHS == V) { APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); if (KnownOneTemp.isAllOnesValue() || KnownZeroTemp.isNegative()) { // We know that the sign bit is zero. KnownZero |= APInt::getSignBit(BitWidth); } } break; case ICmpInst::ICMP_EQ: { APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); if (LHS == V) computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); else if (RHS == V) computeKnownBits(LHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); else llvm_unreachable("missing use?"); KnownZero |= KnownZeroTemp; KnownOne |= KnownOneTemp; } break; case ICmpInst::ICMP_ULE: if (LHS == V) { APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); // The known zero bits carry over unsigned SignBits = KnownZeroTemp.countLeadingOnes(); KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits); } break; case ICmpInst::ICMP_ULT: if (LHS == V) { APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); // Whatever high bits in rhs are zero are known to be zero (if rhs is a // power of 2, then one more). unsigned SignBits = KnownZeroTemp.countLeadingOnes(); if (isKnownToBeAPowerOfTwo(RHS, false, Depth + 1, Query(Q, Cmp), DL)) SignBits++; KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits); } break; }; } /// Compute known bits in 'V' from conditions which are known to be true along /// all paths leading to the context instruction. In particular, look for /// cases where one branch of an interesting condition dominates the context /// instruction. This does not do general dataflow. /// NOTE: This code is EXPERIMENTAL and currently off by default. static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q) { // Need both the dominator tree and the query location to do anything useful if (!Q.DT || !Q.CxtI) return; Instruction *Cxt = const_cast<Instruction *>(Q.CxtI); // Avoid useless work if (auto VI = dyn_cast<Instruction>(V)) if (VI->getParent() == Cxt->getParent()) return; // Note: We currently implement two options. It's not clear which of these // will survive long term, we need data for that. // Option 1 - Try walking the dominator tree looking for conditions which // might apply. This works well for local conditions (loop guards, etc..), // but not as well for things far from the context instruction (presuming a // low max blocks explored). If we can set an high enough limit, this would // be all we need. // Option 2 - We restrict out search to those conditions which are uses of // the value we're interested in. This is independent of dom structure, // but is slightly less powerful without looking through lots of use chains. // It does handle conditions far from the context instruction (e.g. early // function exits on entry) really well though. // Option 1 - Search the dom tree unsigned NumBlocksExplored = 0; BasicBlock *Current = Cxt->getParent(); while (true) { // Stop searching if we've gone too far up the chain if (NumBlocksExplored >= DomConditionsMaxDomBlocks) break; NumBlocksExplored++; if (!Q.DT->getNode(Current)->getIDom()) break; Current = Q.DT->getNode(Current)->getIDom()->getBlock(); if (!Current) // found function entry break; BranchInst *BI = dyn_cast<BranchInst>(Current->getTerminator()); if (!BI || BI->isUnconditional()) continue; ICmpInst *Cmp = dyn_cast<ICmpInst>(BI->getCondition()); if (!Cmp) continue; // We're looking for conditions that are guaranteed to hold at the context // instruction. Finding a condition where one path dominates the context // isn't enough because both the true and false cases could merge before // the context instruction we're actually interested in. Instead, we need // to ensure that the taken *edge* dominates the context instruction. BasicBlock *BB0 = BI->getSuccessor(0); BasicBlockEdge Edge(BI->getParent(), BB0); if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent())) continue; computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth, Q); } // Option 2 - Search the other uses of V unsigned NumUsesExplored = 0; for (auto U : V->users()) { // Avoid massive lists if (NumUsesExplored >= DomConditionsMaxUses) break; NumUsesExplored++; // Consider only compare instructions uniquely controlling a branch ICmpInst *Cmp = dyn_cast<ICmpInst>(U); if (!Cmp) continue; if (DomConditionsSingleCmpUse && !Cmp->hasOneUse()) continue; for (auto *CmpU : Cmp->users()) { BranchInst *BI = dyn_cast<BranchInst>(CmpU); if (!BI || BI->isUnconditional()) continue; // We're looking for conditions that are guaranteed to hold at the // context instruction. Finding a condition where one path dominates // the context isn't enough because both the true and false cases could // merge before the context instruction we're actually interested in. // Instead, we need to ensure that the taken *edge* dominates the context // instruction. BasicBlock *BB0 = BI->getSuccessor(0); BasicBlockEdge Edge(BI->getParent(), BB0); if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent())) continue; computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth, Q); } } } static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q) { // Use of assumptions is context-sensitive. If we don't have a context, we // cannot use them! if (!Q.AC || !Q.CxtI) return; unsigned BitWidth = KnownZero.getBitWidth(); for (auto &AssumeVH : Q.AC->assumptions()) { if (!AssumeVH) continue; CallInst *I = cast<CallInst>(AssumeVH); assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"); if (Q.ExclInvs.count(I)) continue; // Warning: This loop can end up being somewhat performance sensetive. // We're running this loop for once for each value queried resulting in a // runtime of ~O(#assumes * #values). assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && "must be an assume intrinsic"); Value *Arg = I->getArgOperand(0); if (Arg == V && isValidAssumeForContext(I, Q)) { assert(BitWidth == 1 && "assume operand is not i1?"); KnownZero.clearAllBits(); KnownOne.setAllBits(); return; } // The remaining tests are all recursive, so bail out if we hit the limit. if (Depth == MaxDepth) continue; Value *A, *B; auto m_V = m_CombineOr(m_Specific(V), m_CombineOr(m_PtrToInt(m_Specific(V)), m_BitCast(m_Specific(V)))); CmpInst::Predicate Pred; ConstantInt *C; // assume(v = a) if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); KnownZero |= RHSKnownZero; KnownOne |= RHSKnownOne; // assume(v & b = a) } else if (match(Arg, m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in the mask that are known to be one, we can propagate // known bits from the RHS to V. KnownZero |= RHSKnownZero & MaskKnownOne; KnownOne |= RHSKnownOne & MaskKnownOne; // assume(~(v & b) = a) } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in the mask that are known to be one, we can propagate // inverted known bits from the RHS to V. KnownZero |= RHSKnownOne & MaskKnownOne; KnownOne |= RHSKnownZero & MaskKnownOne; // assume(v | b = a) } else if (match(Arg, m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in B that are known to be zero, we can propagate known // bits from the RHS to V. KnownZero |= RHSKnownZero & BKnownZero; KnownOne |= RHSKnownOne & BKnownZero; // assume(~(v | b) = a) } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in B that are known to be zero, we can propagate // inverted known bits from the RHS to V. KnownZero |= RHSKnownOne & BKnownZero; KnownOne |= RHSKnownZero & BKnownZero; // assume(v ^ b = a) } else if (match(Arg, m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in B that are known to be zero, we can propagate known // bits from the RHS to V. For those bits in B that are known to be one, // we can propagate inverted known bits from the RHS to V. KnownZero |= RHSKnownZero & BKnownZero; KnownOne |= RHSKnownOne & BKnownZero; KnownZero |= RHSKnownOne & BKnownOne; KnownOne |= RHSKnownZero & BKnownOne; // assume(~(v ^ b) = a) } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in B that are known to be zero, we can propagate // inverted known bits from the RHS to V. For those bits in B that are // known to be one, we can propagate known bits from the RHS to V. KnownZero |= RHSKnownOne & BKnownZero; KnownOne |= RHSKnownZero & BKnownZero; KnownZero |= RHSKnownZero & BKnownOne; KnownOne |= RHSKnownOne & BKnownOne; // assume(v << c = a) } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them to known // bits in V shifted to the right by C. KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); // assume(~(v << c) = a) } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them inverted // to known bits in V shifted to the right by C. KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); // assume(v >> c = a) } else if (match(Arg, m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), m_AShr(m_V, m_ConstantInt(C))), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them to known // bits in V shifted to the right by C. KnownZero |= RHSKnownZero << C->getZExtValue(); KnownOne |= RHSKnownOne << C->getZExtValue(); // assume(~(v >> c) = a) } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr( m_LShr(m_V, m_ConstantInt(C)), m_AShr(m_V, m_ConstantInt(C)))), m_Value(A))) && Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them inverted // to known bits in V shifted to the right by C. KnownZero |= RHSKnownOne << C->getZExtValue(); KnownOne |= RHSKnownZero << C->getZExtValue(); // assume(v >=_s c) where c is non-negative } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_SGE && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); if (RHSKnownZero.isNegative()) { // We know that the sign bit is zero. KnownZero |= APInt::getSignBit(BitWidth); } // assume(v >_s c) where c is at least -1. } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_SGT && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { // We know that the sign bit is zero. KnownZero |= APInt::getSignBit(BitWidth); } // assume(v <=_s c) where c is negative } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_SLE && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); if (RHSKnownOne.isNegative()) { // We know that the sign bit is one. KnownOne |= APInt::getSignBit(BitWidth); } // assume(v <_s c) where c is non-positive } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_SLT && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { // We know that the sign bit is one. KnownOne |= APInt::getSignBit(BitWidth); } // assume(v <=_u c) } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_ULE && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); // Whatever high bits in c are zero are known to be zero. KnownZero |= APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); // assume(v <_u c) } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && Pred == ICmpInst::ICMP_ULT && isValidAssumeForContext(I, Q)) { APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); // Whatever high bits in c are zero are known to be zero (if c is a power // of 2, then one more). if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I), DL)) KnownZero |= APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1); else KnownZero |= APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); } } } static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q) { unsigned BitWidth = KnownZero.getBitWidth(); APInt KnownZero2(KnownZero), KnownOne2(KnownOne); switch (I->getOpcode()) { default: break; case Instruction::Load: if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) computeKnownBitsFromRangeMetadata(*MD, KnownZero); break; case Instruction::And: { // If either the LHS or the RHS are Zero, the result is zero. computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); // Output known-1 bits are only known if set in both the LHS & RHS. KnownOne &= KnownOne2; // Output known-0 are known to be clear if zero in either the LHS | RHS. KnownZero |= KnownZero2; break; } case Instruction::Or: { computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); // Output known-0 bits are only known if clear in both the LHS & RHS. KnownZero &= KnownZero2; // Output known-1 are known to be set if set in either the LHS | RHS. KnownOne |= KnownOne2; break; } case Instruction::Xor: { computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); // Output known-0 bits are known if clear or set in both the LHS & RHS. APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); // Output known-1 are known to be set if set in only one of the LHS, RHS. KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); KnownZero = KnownZeroOut; break; } case Instruction::Mul: { bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero, KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); break; } case Instruction::UDiv: { // For the purposes of computing leading zeros we can conservatively // treat a udiv as a logical right shift by the power of 2 known to // be less than the denominator. computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); unsigned LeadZ = KnownZero2.countLeadingOnes(); KnownOne2.clearAllBits(); KnownZero2.clearAllBits(); computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); if (RHSUnknownLeadingOnes != BitWidth) LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); break; } case Instruction::Select: computeKnownBits(I->getOperand(2), KnownZero, KnownOne, DL, Depth + 1, Q); computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); // Only known if known in both the LHS and RHS. KnownOne &= KnownOne2; KnownZero &= KnownZero2; break; case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::SIToFP: case Instruction::UIToFP: break; // Can't work with floating point. case Instruction::PtrToInt: case Instruction::IntToPtr: case Instruction::AddrSpaceCast: // Pointers could be different sizes. // FALL THROUGH and handle them the same as zext/trunc. case Instruction::ZExt: case Instruction::Trunc: { Type *SrcTy = I->getOperand(0)->getType(); unsigned SrcBitWidth; // Note that we handle pointer operands here because of inttoptr/ptrtoint // which fall through here. SrcBitWidth = DL.getTypeSizeInBits(SrcTy->getScalarType()); assert(SrcBitWidth && "SrcBitWidth can't be zero"); KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); KnownZero = KnownZero.zextOrTrunc(BitWidth); KnownOne = KnownOne.zextOrTrunc(BitWidth); // Any top bits are known to be zero. if (BitWidth > SrcBitWidth) KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); break; } case Instruction::BitCast: { Type *SrcTy = I->getOperand(0)->getType(); if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && // TODO: For now, not handling conversions like: // (bitcast i64 %x to <2 x i32>) !I->getType()->isVectorTy()) { computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); break; } break; } case Instruction::SExt: { // Compute the bits in the result that are not present in the input. unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); KnownZero = KnownZero.trunc(SrcBitWidth); KnownOne = KnownOne.trunc(SrcBitWidth); computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); KnownZero = KnownZero.zext(BitWidth); KnownOne = KnownOne.zext(BitWidth); // If the sign bit of the input is known set or clear, then we know the // top bits of the result. if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); break; } case Instruction::Shl: // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); KnownZero <<= ShiftAmt; KnownOne <<= ShiftAmt; KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 } break; case Instruction::LShr: // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { // Compute the new bits that are at the top now. uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); // Unsigned shift right. computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); // high bits known zero. KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); } break; case Instruction::AShr: // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { // Compute the new bits that are at the top now. uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); // Signed shift right. computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. KnownZero |= HighBits; else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. KnownOne |= HighBits; } break; case Instruction::Sub: { bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, KnownZero, KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); break; } case Instruction::Add: { bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, KnownZero, KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); break; } case Instruction::SRem: if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { APInt RA = Rem->getValue().abs(); if (RA.isPowerOf2()) { APInt LowBits = RA - 1; computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); // The low bits of the first operand are unchanged by the srem. KnownZero = KnownZero2 & LowBits; KnownOne = KnownOne2 & LowBits; // If the first operand is non-negative or has all low bits zero, then // the upper bits are all zero. if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) KnownZero |= ~LowBits; // If the first operand is negative and not all low bits are zero, then // the upper bits are all one. if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) KnownOne |= ~LowBits; assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); } } // The sign bit is the LHS's sign bit, except when the result of the // remainder is zero. if (KnownZero.isNonNegative()) { APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, DL, Depth + 1, Q); // If it's known zero, our sign bit is also zero. if (LHSKnownZero.isNegative()) KnownZero.setBit(BitWidth - 1); } break; case Instruction::URem: { if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { APInt RA = Rem->getValue(); if (RA.isPowerOf2()) { APInt LowBits = (RA - 1); computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); KnownZero |= ~LowBits; KnownOne &= LowBits; break; } } // Since the result is less than or equal to either operand, any leading // zero bits in either operand must also exist in the result. computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); unsigned Leaders = std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes()); KnownOne.clearAllBits(); KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); break; } case Instruction::Alloca: { AllocaInst *AI = cast<AllocaInst>(I); unsigned Align = AI->getAlignment(); if (Align == 0) Align = DL.getABITypeAlignment(AI->getType()->getElementType()); if (Align > 0) KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); break; } case Instruction::GetElementPtr: { // Analyze all of the subscripts of this getelementptr instruction // to determine if we can prove known low zero bits. APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, DL, Depth + 1, Q); unsigned TrailZ = LocalKnownZero.countTrailingOnes(); gep_type_iterator GTI = gep_type_begin(I); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { Value *Index = I->getOperand(i); if (StructType *STy = dyn_cast<StructType>(*GTI)) { // Handle struct member offset arithmetic. // Handle case when index is vector zeroinitializer Constant *CIndex = cast<Constant>(Index); if (CIndex->isZeroValue()) continue; if (CIndex->getType()->isVectorTy()) Index = CIndex->getSplatValue(); unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); uint64_t Offset = SL->getElementOffset(Idx); TrailZ = std::min<unsigned>(TrailZ, countTrailingZeros(Offset)); } else { // Handle array index arithmetic. Type *IndexedTy = GTI.getIndexedType(); if (!IndexedTy->isSized()) { TrailZ = 0; break; } unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); uint64_t TypeSize = DL.getTypeAllocSize(IndexedTy); LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); computeKnownBits(Index, LocalKnownZero, LocalKnownOne, DL, Depth + 1, Q); TrailZ = std::min(TrailZ, unsigned(countTrailingZeros(TypeSize) + LocalKnownZero.countTrailingOnes())); } } KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); break; } case Instruction::PHI: { PHINode *P = cast<PHINode>(I); // Handle the case of a simple two-predecessor recurrence PHI. // There's a lot more that could theoretically be done here, but // this is sufficient to catch some interesting cases. if (P->getNumIncomingValues() == 2) { for (unsigned i = 0; i != 2; ++i) { Value *L = P->getIncomingValue(i); Value *R = P->getIncomingValue(!i); Operator *LU = dyn_cast<Operator>(L); if (!LU) continue; unsigned Opcode = LU->getOpcode(); // Check for operations that have the property that if // both their operands have low zero bits, the result // will have low zero bits. if (Opcode == Instruction::Add || Opcode == Instruction::Sub || Opcode == Instruction::And || Opcode == Instruction::Or || Opcode == Instruction::Mul) { Value *LL = LU->getOperand(0); Value *LR = LU->getOperand(1); // Find a recurrence. if (LL == I) L = LR; else if (LR == I) L = LL; else break; // Ok, we have a PHI of the form L op= R. Check for low // zero bits. computeKnownBits(R, KnownZero2, KnownOne2, DL, Depth + 1, Q); // We need to take the minimum number of known bits APInt KnownZero3(KnownZero), KnownOne3(KnownOne); computeKnownBits(L, KnownZero3, KnownOne3, DL, Depth + 1, Q); KnownZero = APInt::getLowBitsSet(BitWidth, std::min(KnownZero2.countTrailingOnes(), KnownZero3.countTrailingOnes())); break; } } } // Unreachable blocks may have zero-operand PHI nodes. if (P->getNumIncomingValues() == 0) break; // Otherwise take the unions of the known bit sets of the operands, // taking conservative care to avoid excessive recursion. if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { // Skip if every incoming value references to ourself. if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) break; KnownZero = APInt::getAllOnesValue(BitWidth); KnownOne = APInt::getAllOnesValue(BitWidth); for (Value *IncValue : P->incoming_values()) { // Skip direct self references. if (IncValue == P) continue; KnownZero2 = APInt(BitWidth, 0); KnownOne2 = APInt(BitWidth, 0); // Recurse, but cap the recursion to one level, because we don't // want to waste time spinning around in loops. computeKnownBits(IncValue, KnownZero2, KnownOne2, DL, MaxDepth - 1, Q); KnownZero &= KnownZero2; KnownOne &= KnownOne2; // If all bits have been ruled out, there's no need to check // more operands. if (!KnownZero && !KnownOne) break; } } break; } case Instruction::Call: case Instruction::Invoke: if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) computeKnownBitsFromRangeMetadata(*MD, KnownZero); // If a range metadata is attached to this IntrinsicInst, intersect the // explicit range specified by the metadata and the implicit range of // the intrinsic. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { switch (II->getIntrinsicID()) { default: break; case Intrinsic::ctlz: case Intrinsic::cttz: { unsigned LowBits = Log2_32(BitWidth)+1; // If this call is undefined for 0, the result will be less than 2^n. if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) LowBits -= 1; KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); break; } case Intrinsic::ctpop: { unsigned LowBits = Log2_32(BitWidth)+1; KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); break; } #if 0 // HLSL Change - remove platform intrinsics case Intrinsic::x86_sse42_crc32_64_64: KnownZero |= APInt::getHighBitsSet(64, 32); break; #endif // HLSL Change - remove platform intrinsics } } break; case Instruction::ExtractValue: if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { ExtractValueInst *EVI = cast<ExtractValueInst>(I); if (EVI->getNumIndices() != 1) break; if (EVI->getIndices()[0] == 0) { switch (II->getIntrinsicID()) { default: break; case Intrinsic::uadd_with_overflow: case Intrinsic::sadd_with_overflow: computeKnownBitsAddSub(true, II->getArgOperand(0), II->getArgOperand(1), false, KnownZero, KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); break; case Intrinsic::usub_with_overflow: case Intrinsic::ssub_with_overflow: computeKnownBitsAddSub(false, II->getArgOperand(0), II->getArgOperand(1), false, KnownZero, KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); break; case Intrinsic::umul_with_overflow: case Intrinsic::smul_with_overflow: computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, KnownZero, KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); break; } } } } } /// Determine which bits of V are known to be either zero or one and return /// them in the KnownZero/KnownOne bit sets. /// /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that /// we cannot optimize based on the assumption that it is zero without changing /// it to be an explicit zero. If we don't change it to zero, other code could /// optimized based on the contradictory assumption that it is non-zero. /// Because instcombine aggressively folds operations with undef args anyway, /// this won't lose us code quality. /// /// This function is defined on values with integer type, values with pointer /// type, and vectors of integers. In the case /// where V is a vector, known zero, and known one values are the /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q) { assert(V && "No Value?"); assert(Depth <= MaxDepth && "Limit Search Depth"); unsigned BitWidth = KnownZero.getBitWidth(); assert((V->getType()->isIntOrIntVectorTy() || V->getType()->getScalarType()->isPointerTy()) && "Not integer or pointer type!"); assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && (!V->getType()->isIntOrIntVectorTy() || V->getType()->getScalarSizeInBits() == BitWidth) && KnownZero.getBitWidth() == BitWidth && KnownOne.getBitWidth() == BitWidth && "V, KnownOne and KnownZero should have same BitWidth"); if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { // We know all of the bits for a constant! KnownOne = CI->getValue(); KnownZero = ~KnownOne; return; } // Null and aggregate-zero are all-zeros. if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { KnownOne.clearAllBits(); KnownZero = APInt::getAllOnesValue(BitWidth); return; } // Handle a constant vector by taking the intersection of the known bits of // each element. There is no real need to handle ConstantVector here, because // we don't handle undef in any particularly useful way. if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { // We know that CDS must be a vector of integers. Take the intersection of // each element. KnownZero.setAllBits(); KnownOne.setAllBits(); APInt Elt(KnownZero.getBitWidth(), 0); for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { Elt = CDS->getElementAsInteger(i); KnownZero &= ~Elt; KnownOne &= Elt; } return; } // The address of an aligned GlobalValue has trailing zeros. if (auto *GO = dyn_cast<GlobalObject>(V)) { unsigned Align = GO->getAlignment(); if (Align == 0) { if (auto *GVar = dyn_cast<GlobalVariable>(GO)) { Type *ObjectType = GVar->getType()->getElementType(); if (ObjectType->isSized()) { // If the object is defined in the current Module, we'll be giving // it the preferred alignment. Otherwise, we have to assume that it // may only have the minimum ABI alignment. if (GVar->isStrongDefinitionForLinker()) Align = DL.getPreferredAlignment(GVar); else Align = DL.getABITypeAlignment(ObjectType); } } } if (Align > 0) KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); else KnownZero.clearAllBits(); KnownOne.clearAllBits(); return; } if (Argument *A = dyn_cast<Argument>(V)) { unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0; if (!Align && A->hasStructRetAttr()) { // An sret parameter has at least the ABI alignment of the return type. Type *EltTy = cast<PointerType>(A->getType())->getElementType(); if (EltTy->isSized()) Align = DL.getABITypeAlignment(EltTy); } if (Align) KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); else KnownZero.clearAllBits(); KnownOne.clearAllBits(); // Don't give up yet... there might be an assumption that provides more // information... computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q); // Or a dominating condition for that matter if (EnableDomConditions && Depth <= DomConditionsMaxDepth) computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth, Q); return; } // Start out not knowing anything. KnownZero.clearAllBits(); KnownOne.clearAllBits(); // Limit search depth. // All recursive calls that increase depth must come after this. if (Depth == MaxDepth) return; // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has // the bits of its aliasee. if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { if (!GA->mayBeOverridden()) computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q); return; } if (Operator *I = dyn_cast<Operator>(V)) computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q); // computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition // strictly refines KnownZero and KnownOne. Therefore, we run them after // computeKnownBitsFromOperator. // Check whether a nearby assume intrinsic can determine some known bits. computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q); // Check whether there's a dominating condition which implies something about // this value at the given context. if (EnableDomConditions && Depth <= DomConditionsMaxDepth) computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth, Q); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); } /// Determine whether the sign bit is known to be zero or one. /// Convenience wrapper around computeKnownBits. void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, const DataLayout &DL, unsigned Depth, const Query &Q) { unsigned BitWidth = getBitWidth(V->getType(), DL); if (!BitWidth) { KnownZero = false; KnownOne = false; return; } APInt ZeroBits(BitWidth, 0); APInt OneBits(BitWidth, 0); computeKnownBits(V, ZeroBits, OneBits, DL, Depth, Q); KnownOne = OneBits[BitWidth - 1]; KnownZero = ZeroBits[BitWidth - 1]; } /// Return true if the given value is known to have exactly one /// bit set when defined. For vectors return true if every element is known to /// be a power of two when defined. Supports values with integer or pointer /// types and vectors of integers. bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, const Query &Q, const DataLayout &DL) { if (Constant *C = dyn_cast<Constant>(V)) { if (C->isNullValue()) return OrZero; if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) return CI->getValue().isPowerOf2(); // TODO: Handle vector constants. } // 1 << X is clearly a power of two if the one is not shifted off the end. If // it is shifted off the end then the result is undefined. if (match(V, m_Shl(m_One(), m_Value()))) return true; // (signbit) >>l X is clearly a power of two if the one is not shifted off the // bottom. If it is shifted off the bottom then the result is undefined. if (match(V, m_LShr(m_SignBit(), m_Value()))) return true; // The remaining tests are all recursive, so bail out if we hit the limit. if (Depth++ == MaxDepth) return false; Value *X = nullptr, *Y = nullptr; // A shift of a power of two is a power of two or zero. if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || match(V, m_Shr(m_Value(X), m_Value())))) return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL); if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q, DL); if (SelectInst *SI = dyn_cast<SelectInst>(V)) return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q, DL) && isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q, DL); if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { // A power of two and'd with anything is a power of two or zero. if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL) || isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q, DL)) return true; // X & (-X) is always a power of two or zero. if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) return true; return false; } // Adding a power-of-two or zero to the same power-of-two or zero yields // either the original power-of-two, a larger power-of-two or zero. if (match(V, m_Add(m_Value(X), m_Value(Y)))) { OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { if (match(X, m_And(m_Specific(Y), m_Value())) || match(X, m_And(m_Value(), m_Specific(Y)))) if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q, DL)) return true; if (match(Y, m_And(m_Specific(X), m_Value())) || match(Y, m_And(m_Value(), m_Specific(X)))) if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q, DL)) return true; unsigned BitWidth = V->getType()->getScalarSizeInBits(); APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0); computeKnownBits(X, LHSZeroBits, LHSOneBits, DL, Depth, Q); APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0); computeKnownBits(Y, RHSZeroBits, RHSOneBits, DL, Depth, Q); // If i8 V is a power of two or zero: // ZeroBits: 1 1 1 0 1 1 1 1 // ~ZeroBits: 0 0 0 1 0 0 0 0 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2()) // If OrZero isn't set, we cannot give back a zero result. // Make sure either the LHS or RHS has a bit set. if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue()) return true; } } // An exact divide or right shift can only shift off zero bits, so the result // is a power of two only if the first operand is a power of two and not // copying a sign bit (sdiv int_min, 2). if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, Depth, Q, DL); } return false; } /// \brief Test whether a GEP's result is known to be non-null. /// /// Uses properties inherent in a GEP to try to determine whether it is known /// to be non-null. /// /// Currently this routine does not support vector GEPs. static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout &DL, unsigned Depth, const Query &Q) { if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) return false; // FIXME: Support vector-GEPs. assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); // If the base pointer is non-null, we cannot walk to a null address with an // inbounds GEP in address space zero. if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth, Q)) return true; // Walk the GEP operands and see if any operand introduces a non-zero offset. // If so, then the GEP cannot produce a null pointer, as doing so would // inherently violate the inbounds contract within address space zero. for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); GTI != GTE; ++GTI) { // Struct types are easy -- they must always be indexed by a constant. if (StructType *STy = dyn_cast<StructType>(*GTI)) { ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); uint64_t ElementOffset = SL->getElementOffset(ElementIdx); if (ElementOffset > 0) return true; continue; } // If we have a zero-sized type, the index doesn't matter. Keep looping. if (DL.getTypeAllocSize(GTI.getIndexedType()) == 0) continue; // Fast path the constant operand case both for efficiency and so we don't // increment Depth when just zipping down an all-constant GEP. if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { if (!OpC->isZero()) return true; continue; } // We post-increment Depth here because while isKnownNonZero increments it // as well, when we pop back up that increment won't persist. We don't want // to recurse 10k times just because we have 10k GEP operands. We don't // bail completely out because we want to handle constant GEPs regardless // of depth. if (Depth++ >= MaxDepth) continue; if (isKnownNonZero(GTI.getOperand(), DL, Depth, Q)) return true; } return false; } /// Does the 'Range' metadata (which must be a valid MD_range operand list) /// ensure that the value it's attached to is never Value? 'RangeType' is /// is the type of the value described by the range. static bool rangeMetadataExcludesValue(MDNode* Ranges, const APInt& Value) { const unsigned NumRanges = Ranges->getNumOperands() / 2; assert(NumRanges >= 1); for (unsigned i = 0; i < NumRanges; ++i) { ConstantInt *Lower = mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); ConstantInt *Upper = mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); ConstantRange Range(Lower->getValue(), Upper->getValue()); if (Range.contains(Value)) return false; } return true; } /// Return true if the given value is known to be non-zero when defined. /// For vectors return true if every element is known to be non-zero when /// defined. Supports values with integer or pointer type and vectors of /// integers. bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, const Query &Q) { if (Constant *C = dyn_cast<Constant>(V)) { if (C->isNullValue()) return false; if (isa<ConstantInt>(C)) // Must be non-zero due to null test above. return true; // TODO: Handle vectors return false; } if (Instruction* I = dyn_cast<Instruction>(V)) { if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { // If the possible ranges don't contain zero, then the value is // definitely non-zero. if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) { const APInt ZeroValue(Ty->getBitWidth(), 0); if (rangeMetadataExcludesValue(Ranges, ZeroValue)) return true; } } } // The remaining tests are all recursive, so bail out if we hit the limit. if (Depth++ >= MaxDepth) return false; // Check for pointer simplifications. if (V->getType()->isPointerTy()) { if (isKnownNonNull(V)) return true; if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) if (isGEPKnownNonNull(GEP, DL, Depth, Q)) return true; } unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), DL); // X | Y != 0 if X != 0 or Y != 0. Value *X = nullptr, *Y = nullptr; if (match(V, m_Or(m_Value(X), m_Value(Y)))) return isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q); // ext X != 0 if X != 0. if (isa<SExtInst>(V) || isa<ZExtInst>(V)) return isKnownNonZero(cast<Instruction>(V)->getOperand(0), DL, Depth, Q); // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined // if the lowest bit is shifted off the end. if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { // shl nuw can't remove any non-zero bits. OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); if (BO->hasNoUnsignedWrap()) return isKnownNonZero(X, DL, Depth, Q); APInt KnownZero(BitWidth, 0); APInt KnownOne(BitWidth, 0); computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q); if (KnownOne[0]) return true; } // shr X, Y != 0 if X is negative. Note that the value of the shift is not // defined if the sign bit is shifted off the end. else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { // shr exact can only shift out zero bits. PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); if (BO->isExact()) return isKnownNonZero(X, DL, Depth, Q); bool XKnownNonNegative, XKnownNegative; ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q); if (XKnownNegative) return true; } // div exact can only produce a zero if the dividend is zero. else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { return isKnownNonZero(X, DL, Depth, Q); } // X + Y. else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { bool XKnownNonNegative, XKnownNegative; bool YKnownNonNegative, YKnownNegative; ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q); ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, DL, Depth, Q); // If X and Y are both non-negative (as signed values) then their sum is not // zero unless both X and Y are zero. if (XKnownNonNegative && YKnownNonNegative) if (isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q)) return true; // If X and Y are both negative (as signed values) then their sum is not // zero unless both X and Y equal INT_MIN. if (BitWidth && XKnownNegative && YKnownNegative) { APInt KnownZero(BitWidth, 0); APInt KnownOne(BitWidth, 0); APInt Mask = APInt::getSignedMaxValue(BitWidth); // The sign bit of X is set. If some other bit is set then X is not equal // to INT_MIN. computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q); if ((KnownOne & Mask) != 0) return true; // The sign bit of Y is set. If some other bit is set then Y is not equal // to INT_MIN. computeKnownBits(Y, KnownZero, KnownOne, DL, Depth, Q); if ((KnownOne & Mask) != 0) return true; } // The sum of a non-negative number and a power of two is not zero. if (XKnownNonNegative && isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q, DL)) return true; if (YKnownNonNegative && isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q, DL)) return true; } // X * Y. else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); // If X and Y are non-zero then so is X * Y as long as the multiplication // does not overflow. if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && isKnownNonZero(X, DL, Depth, Q) && isKnownNonZero(Y, DL, Depth, Q)) return true; } // (C ? X : Y) != 0 if X != 0 and Y != 0. else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { if (isKnownNonZero(SI->getTrueValue(), DL, Depth, Q) && isKnownNonZero(SI->getFalseValue(), DL, Depth, Q)) return true; } if (!BitWidth) return false; APInt KnownZero(BitWidth, 0); APInt KnownOne(BitWidth, 0); computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); return KnownOne != 0; } /// Return true if 'V & Mask' is known to be zero. We use this predicate to /// simplify operations downstream. Mask is known to be zero for bits that V /// cannot have. /// /// This function is defined on values with integer type, values with pointer /// type, and vectors of integers. In the case /// where V is a vector, the mask, known zero, and known one values are the /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth, const Query &Q) { APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); return (KnownZero & Mask) == Mask; } /// Return the number of times the sign bit of the register is replicated into /// the other bits. We know that at least 1 bit is always equal to the sign bit /// (itself), but other cases can give us information. For example, immediately /// after an "ashr X, 2", we know that the top 3 bits are all equal to each /// other, so we return 3. /// /// 'Op' must have a scalar integer type. /// unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth, const Query &Q) { unsigned TyBits = DL.getTypeSizeInBits(V->getType()->getScalarType()); unsigned Tmp, Tmp2; unsigned FirstAnswer = 1; // Note that ConstantInt is handled by the general computeKnownBits case // below. if (Depth == 6) return 1; // Limit search depth. Operator *U = dyn_cast<Operator>(V); switch (Operator::getOpcode(V)) { default: break; case Instruction::SExt: Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); return ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q) + Tmp; case Instruction::SDiv: { const APInt *Denominator; // sdiv X, C -> adds log(C) sign bits. if (match(U->getOperand(1), m_APInt(Denominator))) { // Ignore non-positive denominator. if (!Denominator->isStrictlyPositive()) break; // Calculate the incoming numerator bits. unsigned NumBits = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); // Add floor(log(C)) bits to the numerator bits. return std::min(TyBits, NumBits + Denominator->logBase2()); } break; } case Instruction::SRem: { const APInt *Denominator; // srem X, C -> we know that the result is within [-C+1,C) when C is a // positive constant. This let us put a lower bound on the number of sign // bits. if (match(U->getOperand(1), m_APInt(Denominator))) { // Ignore non-positive denominator. if (!Denominator->isStrictlyPositive()) break; // Calculate the incoming numerator bits. SRem by a positive constant // can't lower the number of sign bits. unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); // Calculate the leading sign bit constraints by examining the // denominator. Given that the denominator is positive, there are two // cases: // // 1. the numerator is positive. The result range is [0,C) and [0,C) u< // (1 << ceilLogBase2(C)). // // 2. the numerator is negative. Then the result range is (-C,0] and // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). // // Thus a lower bound on the number of sign bits is `TyBits - // ceilLogBase2(C)`. unsigned ResBits = TyBits - Denominator->ceilLogBase2(); return std::max(NumrBits, ResBits); } break; } case Instruction::AShr: { Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); // ashr X, C -> adds C sign bits. Vectors too. const APInt *ShAmt; if (match(U->getOperand(1), m_APInt(ShAmt))) { Tmp += ShAmt->getZExtValue(); if (Tmp > TyBits) Tmp = TyBits; } return Tmp; } case Instruction::Shl: { const APInt *ShAmt; if (match(U->getOperand(1), m_APInt(ShAmt))) { // shl destroys sign bits. Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); Tmp2 = ShAmt->getZExtValue(); if (Tmp2 >= TyBits || // Bad shift. Tmp2 >= Tmp) break; // Shifted all sign bits out. return Tmp - Tmp2; } break; } case Instruction::And: case Instruction::Or: case Instruction::Xor: // NOT is handled here. // Logical binary ops preserve the number of sign bits at the worst. Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); if (Tmp != 1) { Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); FirstAnswer = std::min(Tmp, Tmp2); // We computed what we know about the sign bits as our first // answer. Now proceed to the generic code that uses // computeKnownBits, and pick whichever answer is better. } break; case Instruction::Select: Tmp = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); if (Tmp == 1) return 1; // Early out. Tmp2 = ComputeNumSignBits(U->getOperand(2), DL, Depth + 1, Q); return std::min(Tmp, Tmp2); case Instruction::Add: // Add can have at most one carry bit. Thus we know that the output // is, at worst, one more bit than the inputs. Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); if (Tmp == 1) return 1; // Early out. // Special case decrementing a value (ADD X, -1): if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) if (CRHS->isAllOnesValue()) { APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); // If the input is known to be 0 or 1, the output is 0/-1, which is all // sign bits set. if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) return TyBits; // If we are subtracting one from a positive number, there is no carry // out of the result. if (KnownZero.isNegative()) return Tmp; } Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); if (Tmp2 == 1) return 1; return std::min(Tmp, Tmp2)-1; case Instruction::Sub: Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); if (Tmp2 == 1) return 1; // Handle NEG. if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) if (CLHS->isNullValue()) { APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); computeKnownBits(U->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); // If the input is known to be 0 or 1, the output is 0/-1, which is all // sign bits set. if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) return TyBits; // If the input is known to be positive (the sign bit is known clear), // the output of the NEG has the same number of sign bits as the input. if (KnownZero.isNegative()) return Tmp2; // Otherwise, we treat this like a SUB. } // Sub can have at most one carry bit. Thus we know that the output // is, at worst, one more bit than the inputs. Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); if (Tmp == 1) return 1; // Early out. return std::min(Tmp, Tmp2)-1; case Instruction::PHI: { PHINode *PN = cast<PHINode>(U); unsigned NumIncomingValues = PN->getNumIncomingValues(); // Don't analyze large in-degree PHIs. if (NumIncomingValues > 4) break; // Unreachable blocks may have zero-operand PHI nodes. if (NumIncomingValues == 0) break; // Take the minimum of all incoming values. This can't infinitely loop // because of our depth threshold. Tmp = ComputeNumSignBits(PN->getIncomingValue(0), DL, Depth + 1, Q); for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { if (Tmp == 1) return Tmp; Tmp = std::min( Tmp, ComputeNumSignBits(PN->getIncomingValue(i), DL, Depth + 1, Q)); } return Tmp; } case Instruction::Trunc: // FIXME: it's tricky to do anything useful for this, but it is an important // case for targets like X86. break; } // Finally, if we can prove that the top bits of the result are 0's or 1's, // use this information. APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); APInt Mask; computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); if (KnownZero.isNegative()) { // sign bit is 0 Mask = KnownZero; } else if (KnownOne.isNegative()) { // sign bit is 1; Mask = KnownOne; } else { // Nothing known. return FirstAnswer; } // Okay, we know that the sign bit in Mask is set. Use CLZ to determine // the number of identical bits in the top of the input value. Mask = ~Mask; Mask <<= Mask.getBitWidth()-TyBits; // Return # leading zeros. We use 'min' here in case Val was zero before // shifting. We don't want to return '64' as for an i32 "0". return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); } /// This function computes the integer multiple of Base that equals V. /// If successful, it returns true and returns the multiple in /// Multiple. If unsuccessful, it returns false. It looks /// through SExt instructions only if LookThroughSExt is true. bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, bool LookThroughSExt, unsigned Depth) { const unsigned MaxDepth = 6; assert(V && "No Value?"); assert(Depth <= MaxDepth && "Limit Search Depth"); assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); Type *T = V->getType(); ConstantInt *CI = dyn_cast<ConstantInt>(V); if (Base == 0) return false; if (Base == 1) { Multiple = V; return true; } ConstantExpr *CO = dyn_cast<ConstantExpr>(V); Constant *BaseVal = ConstantInt::get(T, Base); if (CO && CO == BaseVal) { // Multiple is 1. Multiple = ConstantInt::get(T, 1); return true; } if (CI && CI->getZExtValue() % Base == 0) { Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); return true; } if (Depth == MaxDepth) return false; // Limit search depth. Operator *I = dyn_cast<Operator>(V); if (!I) return false; switch (I->getOpcode()) { default: break; case Instruction::SExt: if (!LookThroughSExt) return false; // otherwise fall through to ZExt LLVM_FALLTHROUGH; // HLSL Change case Instruction::ZExt: return ComputeMultiple(I->getOperand(0), Base, Multiple, LookThroughSExt, Depth+1); case Instruction::Shl: case Instruction::Mul: { Value *Op0 = I->getOperand(0); Value *Op1 = I->getOperand(1); if (I->getOpcode() == Instruction::Shl) { ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); if (!Op1CI) return false; // Turn Op0 << Op1 into Op0 * 2^Op1 APInt Op1Int = Op1CI->getValue(); uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); APInt API(Op1Int.getBitWidth(), 0); API.setBit(BitToSet); Op1 = ConstantInt::get(V->getContext(), API); } Value *Mul0 = nullptr; if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { if (Constant *Op1C = dyn_cast<Constant>(Op1)) if (Constant *MulC = dyn_cast<Constant>(Mul0)) { if (Op1C->getType()->getPrimitiveSizeInBits() < MulC->getType()->getPrimitiveSizeInBits()) Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); if (Op1C->getType()->getPrimitiveSizeInBits() > MulC->getType()->getPrimitiveSizeInBits()) MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) Multiple = ConstantExpr::getMul(MulC, Op1C); return true; } if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) if (Mul0CI->getValue() == 1) { // V == Base * Op1, so return Op1 Multiple = Op1; return true; } } Value *Mul1 = nullptr; if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { if (Constant *Op0C = dyn_cast<Constant>(Op0)) if (Constant *MulC = dyn_cast<Constant>(Mul1)) { if (Op0C->getType()->getPrimitiveSizeInBits() < MulC->getType()->getPrimitiveSizeInBits()) Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); if (Op0C->getType()->getPrimitiveSizeInBits() > MulC->getType()->getPrimitiveSizeInBits()) MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) Multiple = ConstantExpr::getMul(MulC, Op0C); return true; } if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) if (Mul1CI->getValue() == 1) { // V == Base * Op0, so return Op0 Multiple = Op0; return true; } } } } // We could not determine if V is a multiple of Base. return false; } /// Return true if we can prove that the specified FP value is never equal to /// -0.0. /// /// NOTE: this function will need to be revisited when we support non-default /// rounding modes! /// bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) return !CFP->getValueAPF().isNegZero(); // FIXME: Magic number! At the least, this should be given a name because it's // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to // expose it as a parameter, so it can be used for testing / experimenting. if (Depth == 6) return false; // Limit search depth. const Operator *I = dyn_cast<Operator>(V); if (!I) return false; // Check if the nsz fast-math flag is set if (const FPMathOperator *FPOp = dyn_cast<FPMathOperator>(I)) // HLSL Change - FPO -> FPOp (macro collision) if (FPOp->hasNoSignedZeros()) // HLSL Change - FPO -> FPOp (macro collision) return true; // (add x, 0.0) is guaranteed to return +0.0, not -0.0. if (I->getOpcode() == Instruction::FAdd) if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) if (CFP->isNullValue()) return true; // sitofp and uitofp turn into +0.0 for zero. if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) return true; if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) // sqrt(-0.0) = -0.0, no other negative results are possible. if (II->getIntrinsicID() == Intrinsic::sqrt) return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); if (const CallInst *CI = dyn_cast<CallInst>(I)) if (const Function *F = CI->getCalledFunction()) { if (F->isDeclaration()) { // abs(x) != -0.0 if (F->getName() == "abs") return true; // fabs[lf](x) != -0.0 if (F->getName() == "fabs") return true; if (F->getName() == "fabsf") return true; if (F->getName() == "fabsl") return true; if (F->getName() == "sqrt" || F->getName() == "sqrtf" || F->getName() == "sqrtl") return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); } } return false; } bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) { if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero(); // FIXME: Magic number! At the least, this should be given a name because it's // used similarly in CannotBeNegativeZero(). A better fix may be to // expose it as a parameter, so it can be used for testing / experimenting. if (Depth == 6) return false; // Limit search depth. const Operator *I = dyn_cast<Operator>(V); if (!I) return false; switch (I->getOpcode()) { default: break; case Instruction::FMul: // x*x is always non-negative or a NaN. if (I->getOperand(0) == I->getOperand(1)) return true; LLVM_FALLTHROUGH; // HLSL Change case Instruction::FAdd: case Instruction::FDiv: case Instruction::FRem: return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) && CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1); case Instruction::FPExt: case Instruction::FPTrunc: // Widening/narrowing never change sign. return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); case Instruction::Call: if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) switch (II->getIntrinsicID()) { default: break; case Intrinsic::exp: case Intrinsic::exp2: case Intrinsic::fabs: case Intrinsic::sqrt: return true; case Intrinsic::powi: if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { // powi(x,n) is non-negative if n is even. if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0) return true; } return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); case Intrinsic::fma: case Intrinsic::fmuladd: // x*x+y is non-negative if y is non-negative. return I->getOperand(0) == I->getOperand(1) && CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1); } break; } return false; } /// If the specified value can be set by repeating the same byte in memory, /// return the i8 value that it is represented with. This is /// true for all i8 values obviously, but is also true for i32 0, i32 -1, /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated /// byte store (e.g. i16 0x1234), return null. Value *llvm::isBytewiseValue(Value *V) { // All byte-wide stores are splatable, even of arbitrary variables. if (V->getType()->isIntegerTy(8)) return V; // Handle 'null' ConstantArrayZero etc. if (Constant *C = dyn_cast<Constant>(V)) if (C->isNullValue()) return Constant::getNullValue(Type::getInt8Ty(V->getContext())); // Constant float and double values can be handled as integer values if the // corresponding integer value is "byteable". An important case is 0.0. if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { if (CFP->getType()->isFloatTy()) V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); if (CFP->getType()->isDoubleTy()) V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); // Don't handle long double formats, which have strange constraints. } // We can handle constant integers that are multiple of 8 bits. if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { if (CI->getBitWidth() % 8 == 0) { assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); if (!CI->getValue().isSplat(8)) return nullptr; return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); } } // A ConstantDataArray/Vector is splatable if all its members are equal and // also splatable. if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { Value *Elt = CA->getElementAsConstant(0); Value *Val = isBytewiseValue(Elt); if (!Val) return nullptr; for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) if (CA->getElementAsConstant(I) != Elt) return nullptr; return Val; } // Conceptually, we could handle things like: // %a = zext i8 %X to i16 // %b = shl i16 %a, 8 // %c = or i16 %a, %b // but until there is an example that actually needs this, it doesn't seem // worth worrying about. return nullptr; } // This is the recursive version of BuildSubAggregate. It takes a few different // arguments. Idxs is the index within the nested struct From that we are // looking at now (which is of type IndexedType). IdxSkip is the number of // indices from Idxs that should be left out when inserting into the resulting // struct. To is the result struct built so far, new insertvalue instructions // build on that. static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, SmallVectorImpl<unsigned> &Idxs, unsigned IdxSkip, Instruction *InsertBefore) { llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); if (STy) { // Save the original To argument so we can modify it Value *OrigTo = To; // General case, the type indexed by Idxs is a struct for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { // Process each struct element recursively Idxs.push_back(i); Value *PrevTo = To; To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, InsertBefore); Idxs.pop_back(); if (!To) { // Couldn't find any inserted value for this index? Cleanup while (PrevTo != OrigTo) { InsertValueInst* Del = cast<InsertValueInst>(PrevTo); PrevTo = Del->getAggregateOperand(); Del->eraseFromParent(); } // Stop processing elements break; } } // If we successfully found a value for each of our subaggregates if (To) return To; } // Base case, the type indexed by SourceIdxs is not a struct, or not all of // the struct's elements had a value that was inserted directly. In the latter // case, perhaps we can't determine each of the subelements individually, but // we might be able to find the complete struct somewhere. // Find the value that is at that particular spot Value *V = FindInsertedValue(From, Idxs); if (!V) return nullptr; // Insert the value in the new (sub) aggregrate return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), "tmp", InsertBefore); } // This helper takes a nested struct and extracts a part of it (which is again a // struct) into a new value. For example, given the struct: // { a, { b, { c, d }, e } } // and the indices "1, 1" this returns // { c, d }. // // It does this by inserting an insertvalue for each element in the resulting // struct, as opposed to just inserting a single struct. This will only work if // each of the elements of the substruct are known (ie, inserted into From by an // insertvalue instruction somewhere). // // All inserted insertvalue instructions are inserted before InsertBefore static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, Instruction *InsertBefore) { assert(InsertBefore && "Must have someplace to insert!"); Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), idx_range); Value *To = UndefValue::get(IndexedType); SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); unsigned IdxSkip = Idxs.size(); return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); } /// Given an aggregrate and an sequence of indices, see if /// the scalar value indexed is already around as a register, for example if it /// were inserted directly into the aggregrate. /// /// If InsertBefore is not null, this function will duplicate (modified) /// insertvalues when a part of a nested struct is extracted. Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, Instruction *InsertBefore) { // Nothing to index? Just return V then (this is useful at the end of our // recursion). if (idx_range.empty()) return V; // We have indices, so V should have an indexable type. assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && "Not looking at a struct or array?"); assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && "Invalid indices for type?"); if (Constant *C = dyn_cast<Constant>(V)) { C = C->getAggregateElement(idx_range[0]); if (!C) return nullptr; return FindInsertedValue(C, idx_range.slice(1), InsertBefore); } if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { // Loop the indices for the insertvalue instruction in parallel with the // requested indices const unsigned *req_idx = idx_range.begin(); for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); i != e; ++i, ++req_idx) { if (req_idx == idx_range.end()) { // We can't handle this without inserting insertvalues if (!InsertBefore) return nullptr; // The requested index identifies a part of a nested aggregate. Handle // this specially. For example, // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 // %C = extractvalue {i32, { i32, i32 } } %B, 1 // This can be changed into // %A = insertvalue {i32, i32 } undef, i32 10, 0 // %C = insertvalue {i32, i32 } %A, i32 11, 1 // which allows the unused 0,0 element from the nested struct to be // removed. return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), InsertBefore); } // This insert value inserts something else than what we are looking for. // See if the (aggregrate) value inserted into has the value we are // looking for, then. if (*req_idx != *i) return FindInsertedValue(I->getAggregateOperand(), idx_range, InsertBefore); } // If we end up here, the indices of the insertvalue match with those // requested (though possibly only partially). Now we recursively look at // the inserted value, passing any remaining indices. return FindInsertedValue(I->getInsertedValueOperand(), makeArrayRef(req_idx, idx_range.end()), InsertBefore); } if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { // If we're extracting a value from an aggregrate that was extracted from // something else, we can extract from that something else directly instead. // However, we will need to chain I's indices with the requested indices. // Calculate the number of indices required unsigned size = I->getNumIndices() + idx_range.size(); // Allocate some space to put the new indices in SmallVector<unsigned, 5> Idxs; Idxs.reserve(size); // Add indices from the extract value instruction Idxs.append(I->idx_begin(), I->idx_end()); // Add requested indices Idxs.append(idx_range.begin(), idx_range.end()); assert(Idxs.size() == size && "Number of indices added not correct?"); return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); } // Otherwise, we don't know (such as, extracting from a function return value // or load instruction) return nullptr; } /// Analyze the specified pointer to see if it can be expressed as a base /// pointer plus a constant offset. Return the base and offset to the caller. Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL) { unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); APInt ByteOffset(BitWidth, 0); while (1) { if (Ptr->getType()->isVectorTy()) break; if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { APInt GEPOffset(BitWidth, 0); if (!GEP->accumulateConstantOffset(DL, GEPOffset)) break; ByteOffset += GEPOffset; Ptr = GEP->getPointerOperand(); } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { Ptr = cast<Operator>(Ptr)->getOperand(0); } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { if (GA->mayBeOverridden()) break; Ptr = GA->getAliasee(); } else { break; } } Offset = ByteOffset.getSExtValue(); return Ptr; } /// This function computes the length of a null-terminated C string pointed to /// by V. If successful, it returns true and returns the string in Str. /// If unsuccessful, it returns false. bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, uint64_t Offset, bool TrimAtNul) { assert(V); // Look through bitcast instructions and geps. V = V->stripPointerCasts(); // If the value is a GEP instruction or constant expression, treat it as an // offset. if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { // Make sure the GEP has exactly three arguments. if (GEP->getNumOperands() != 3) return false; // Make sure the index-ee is a pointer to array of i8. PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); if (!AT || !AT->getElementType()->isIntegerTy(8)) return false; // Check to make sure that the first operand of the GEP is an integer and // has value 0 so that we are sure we're indexing into the initializer. const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); if (!FirstIdx || !FirstIdx->isZero()) return false; // If the second index isn't a ConstantInt, then this is a variable index // into the array. If this occurs, we can't say anything meaningful about // the string. uint64_t StartIdx = 0; if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) StartIdx = CI->getZExtValue(); else return false; return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset, TrimAtNul); } // The GEP instruction, constant or instruction, must reference a global // variable that is a constant and is initialized. The referenced constant // initializer is the array that we'll use for optimization. const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) return false; // Handle the all-zeros case if (GV->getInitializer()->isNullValue()) { // This is a degenerate case. The initializer is constant zero so the // length of the string must be zero. Str = ""; return true; } // Must be a Constant Array const ConstantDataArray *Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); if (!Array || !Array->isString()) return false; // Get the number of elements in the array uint64_t NumElts = Array->getType()->getArrayNumElements(); // Start out with the entire array in the StringRef. Str = Array->getAsString(); if (Offset > NumElts) return false; // Skip over 'offset' bytes. Str = Str.substr(Offset); if (TrimAtNul) { // Trim off the \0 and anything after it. If the array is not nul // terminated, we just return the whole end of string. The client may know // some other way that the string is length-bound. Str = Str.substr(0, Str.find('\0')); } return true; } // These next two are very similar to the above, but also look through PHI // nodes. // TODO: See if we can integrate these two together. /// If we can compute the length of the string pointed to by /// the specified pointer, return 'len+1'. If we can't, return 0. static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) { // Look through noop bitcast instructions. V = V->stripPointerCasts(); // If this is a PHI node, there are two cases: either we have already seen it // or we haven't. if (PHINode *PN = dyn_cast<PHINode>(V)) { if (!PHIs.insert(PN).second) return ~0ULL; // already in the set. // If it was new, see if all the input strings are the same length. uint64_t LenSoFar = ~0ULL; for (Value *IncValue : PN->incoming_values()) { uint64_t Len = GetStringLengthH(IncValue, PHIs); if (Len == 0) return 0; // Unknown length -> unknown. if (Len == ~0ULL) continue; if (Len != LenSoFar && LenSoFar != ~0ULL) return 0; // Disagree -> unknown. LenSoFar = Len; } // Success, all agree. return LenSoFar; } // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) if (SelectInst *SI = dyn_cast<SelectInst>(V)) { uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); if (Len1 == 0) return 0; uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); if (Len2 == 0) return 0; if (Len1 == ~0ULL) return Len2; if (Len2 == ~0ULL) return Len1; if (Len1 != Len2) return 0; return Len1; } // Otherwise, see if we can read the string. StringRef StrData; if (!getConstantStringInfo(V, StrData)) return 0; return StrData.size()+1; } /// If we can compute the length of the string pointed to by /// the specified pointer, return 'len+1'. If we can't, return 0. uint64_t llvm::GetStringLength(Value *V) { if (!V->getType()->isPointerTy()) return 0; SmallPtrSet<PHINode*, 32> PHIs; uint64_t Len = GetStringLengthH(V, PHIs); // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return // an empty string as a length. return Len == ~0ULL ? 1 : Len; } /// \brief \p PN defines a loop-variant pointer to an object. Check if the /// previous iteration of the loop was referring to the same object as \p PN. static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) { // Find the loop-defined value. Loop *L = LI->getLoopFor(PN->getParent()); if (PN->getNumIncomingValues() != 2) return true; // Find the value from previous iteration. auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) return true; // If a new pointer is loaded in the loop, the pointer references a different // object in every iteration. E.g.: // for (i) // int *p = a[i]; // ... if (auto *Load = dyn_cast<LoadInst>(PrevValue)) if (!L->isLoopInvariant(Load->getPointerOperand())) return false; return true; } Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup) { if (!V->getType()->isPointerTy()) return V; for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { V = GEP->getPointerOperand(); } else if (Operator::getOpcode(V) == Instruction::BitCast || Operator::getOpcode(V) == Instruction::AddrSpaceCast) { V = cast<Operator>(V)->getOperand(0); } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { if (GA->mayBeOverridden()) return V; V = GA->getAliasee(); } else { // See if InstructionSimplify knows any relevant tricks. if (Instruction *I = dyn_cast<Instruction>(V)) // TODO: Acquire a DominatorTree and AssumptionCache and use them. if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) { V = Simplified; continue; } return V; } assert(V->getType()->isPointerTy() && "Unexpected operand type!"); } return V; } void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, const DataLayout &DL, LoopInfo *LI, unsigned MaxLookup) { SmallPtrSet<Value *, 4> Visited; SmallVector<Value *, 4> Worklist; Worklist.push_back(V); do { Value *P = Worklist.pop_back_val(); P = GetUnderlyingObject(P, DL, MaxLookup); if (!Visited.insert(P).second) continue; if (SelectInst *SI = dyn_cast<SelectInst>(P)) { Worklist.push_back(SI->getTrueValue()); Worklist.push_back(SI->getFalseValue()); continue; } if (PHINode *PN = dyn_cast<PHINode>(P)) { // If this PHI changes the underlying object in every iteration of the // loop, don't look through it. Consider: // int **A; // for (i) { // Prev = Curr; // Prev = PHI (Prev_0, Curr) // Curr = A[i]; // *Prev, *Curr; // // Prev is tracking Curr one iteration behind so they refer to different // underlying objects. if (!LI || !LI->isLoopHeader(PN->getParent()) || isSameUnderlyingObjectInLoop(PN, LI)) for (Value *IncValue : PN->incoming_values()) Worklist.push_back(IncValue); continue; } Objects.push_back(P); } while (!Worklist.empty()); } /// Return true if the only users of this pointer are lifetime markers. bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { for (const User *U : V->users()) { const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); if (!II) return false; if (II->getIntrinsicID() != Intrinsic::lifetime_start && II->getIntrinsicID() != Intrinsic::lifetime_end) return false; } return true; } static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, Type *Ty, const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { assert(Offset.isNonNegative() && "offset can't be negative"); assert(Ty->isSized() && "must be sized"); APInt DerefBytes(Offset.getBitWidth(), 0); bool CheckForNonNull = false; if (const Argument *A = dyn_cast<Argument>(BV)) { DerefBytes = A->getDereferenceableBytes(); if (!DerefBytes.getBoolValue()) { DerefBytes = A->getDereferenceableOrNullBytes(); CheckForNonNull = true; } } else if (auto CS = ImmutableCallSite(BV)) { DerefBytes = CS.getDereferenceableBytes(0); if (!DerefBytes.getBoolValue()) { DerefBytes = CS.getDereferenceableOrNullBytes(0); CheckForNonNull = true; } } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); DerefBytes = CI->getLimitedValue(); } if (!DerefBytes.getBoolValue()) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); DerefBytes = CI->getLimitedValue(); } CheckForNonNull = true; } } if (DerefBytes.getBoolValue()) if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) return true; return false; } static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { Type *VTy = V->getType(); Type *Ty = VTy->getPointerElementType(); if (!Ty->isSized()) return false; APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI); } /// Return true if Value is always a dereferenceable pointer. /// /// Test if V is always a pointer to allocated and suitably aligned memory for /// a simple load or store. static bool isDereferenceablePointer(const Value *V, const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) { // Note that it is not safe to speculate into a malloc'd region because // malloc may return null. // These are obviously ok. if (isa<AllocaInst>(V)) return true; // It's not always safe to follow a bitcast, for example: // bitcast i8* (alloca i8) to i32* // would result in a 4-byte load from a 1-byte alloca. However, // if we're casting from a pointer from a type of larger size // to a type of smaller size (or the same size), and the alignment // is at least as large as for the resulting pointer type, then // we can look through the bitcast. if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) { Type *STy = BC->getSrcTy()->getPointerElementType(), *DTy = BC->getDestTy()->getPointerElementType(); if (STy->isSized() && DTy->isSized() && (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) && (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy))) return isDereferenceablePointer(BC->getOperand(0), DL, CtxI, DT, TLI, Visited); } // Global variables which can't collapse to null are ok. if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) return !GV->hasExternalWeakLinkage(); // byval arguments are okay. if (const Argument *A = dyn_cast<Argument>(V)) if (A->hasByValAttr()) return true; if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI)) return true; // For GEPs, determine if the indexing lands within the allocated object. if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { Type *VTy = GEP->getType(); Type *Ty = VTy->getPointerElementType(); const Value *Base = GEP->getPointerOperand(); // Conservatively require that the base pointer be fully dereferenceable. if (!Visited.insert(Base).second) return false; if (!isDereferenceablePointer(Base, DL, CtxI, DT, TLI, Visited)) return false; APInt Offset(DL.getPointerTypeSizeInBits(VTy), 0); if (!GEP->accumulateConstantOffset(DL, Offset)) return false; // Check if the load is within the bounds of the underlying object. uint64_t LoadSize = DL.getTypeStoreSize(Ty); Type *BaseType = Base->getType()->getPointerElementType(); return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)); } // For gc.relocate, look through relocations if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V)) if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) { GCRelocateOperands RelocateInst(I); return isDereferenceablePointer(RelocateInst.getDerivedPtr(), DL, CtxI, DT, TLI, Visited); } if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V)) return isDereferenceablePointer(ASC->getOperand(0), DL, CtxI, DT, TLI, Visited); // If we don't know, assume the worst. return false; } bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { // When dereferenceability information is provided by a dereferenceable // attribute, we know exactly how many bytes are dereferenceable. If we can // determine the exact offset to the attributed variable, we can use that // information here. Type *VTy = V->getType(); Type *Ty = VTy->getPointerElementType(); if (Ty->isSized()) { APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); if (Offset.isNonNegative()) if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI)) return true; } SmallPtrSet<const Value *, 32> Visited; return ::isDereferenceablePointer(V, DL, CtxI, DT, TLI, Visited); } bool llvm::isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { const Operator *Inst = dyn_cast<Operator>(V); if (!Inst) return false; for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) if (C->canTrap()) return false; switch (Inst->getOpcode()) { default: return true; case Instruction::UDiv: case Instruction::URem: { // x / y is undefined if y == 0. const APInt *V; if (match(Inst->getOperand(1), m_APInt(V))) return *V != 0; return false; } case Instruction::SDiv: case Instruction::SRem: { // x / y is undefined if y == 0 or x == INT_MIN and y == -1 const APInt *Numerator, *Denominator; if (!match(Inst->getOperand(1), m_APInt(Denominator))) return false; // We cannot hoist this division if the denominator is 0. if (*Denominator == 0) return false; // It's safe to hoist if the denominator is not 0 or -1. if (*Denominator != -1) return true; // At this point we know that the denominator is -1. It is safe to hoist as // long we know that the numerator is not INT_MIN. if (match(Inst->getOperand(0), m_APInt(Numerator))) return !Numerator->isMinSignedValue(); // The numerator *might* be MinSignedValue. return false; } case Instruction::Load: { const LoadInst *LI = cast<LoadInst>(Inst); if (!LI->isUnordered() || // Speculative load may create a race that did not exist in the source. LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) return false; const DataLayout &DL = LI->getModule()->getDataLayout(); return isDereferenceablePointer(LI->getPointerOperand(), DL, CtxI, DT, TLI); } case Instruction::Call: { if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { switch (II->getIntrinsicID()) { // These synthetic intrinsics have no side-effects and just mark // information about their operands. // FIXME: There are other no-op synthetic instructions that potentially // should be considered at least *safe* to speculate... case Intrinsic::dbg_declare: case Intrinsic::dbg_value: return true; case Intrinsic::bswap: case Intrinsic::ctlz: case Intrinsic::ctpop: case Intrinsic::cttz: case Intrinsic::objectsize: case Intrinsic::sadd_with_overflow: case Intrinsic::smul_with_overflow: case Intrinsic::ssub_with_overflow: case Intrinsic::uadd_with_overflow: case Intrinsic::umul_with_overflow: case Intrinsic::usub_with_overflow: return true; // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set // errno like libm sqrt would. case Intrinsic::sqrt: case Intrinsic::fma: case Intrinsic::fmuladd: case Intrinsic::fabs: case Intrinsic::minnum: case Intrinsic::maxnum: return true; // TODO: some fp intrinsics are marked as having the same error handling // as libm. They're safe to speculate when they won't error. // TODO: are convert_{from,to}_fp16 safe? // TODO: can we list target-specific intrinsics here? default: break; } } return false; // The called function could have undefined behavior or // side-effects, even if marked readnone nounwind. } case Instruction::VAArg: case Instruction::Alloca: case Instruction::Invoke: case Instruction::PHI: case Instruction::Store: case Instruction::Ret: case Instruction::Br: case Instruction::IndirectBr: case Instruction::Switch: case Instruction::Unreachable: case Instruction::Fence: case Instruction::LandingPad: case Instruction::AtomicRMW: case Instruction::AtomicCmpXchg: case Instruction::Resume: return false; // Misc instructions which have effects } } /// Return true if we know that the specified value is never null. bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { // Alloca never returns null, malloc might. if (isa<AllocaInst>(V)) return true; // A byval, inalloca, or nonnull argument is never null. if (const Argument *A = dyn_cast<Argument>(V)) return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); // Global values are not null unless extern weak. if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) return !GV->hasExternalWeakLinkage(); // A Load tagged w/nonnull metadata is never null. if (const LoadInst *LI = dyn_cast<LoadInst>(V)) return LI->getMetadata(LLVMContext::MD_nonnull); if (auto CS = ImmutableCallSite(V)) if (CS.isReturnNonNull()) return true; // operator new never returns null. if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true)) return true; return false; } static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT) { unsigned NumUsesExplored = 0; for (auto U : V->users()) { // Avoid massive lists if (NumUsesExplored >= DomConditionsMaxUses) break; NumUsesExplored++; // Consider only compare instructions uniquely controlling a branch const ICmpInst *Cmp = dyn_cast<ICmpInst>(U); if (!Cmp) continue; if (DomConditionsSingleCmpUse && !Cmp->hasOneUse()) continue; for (auto *CmpU : Cmp->users()) { const BranchInst *BI = dyn_cast<BranchInst>(CmpU); if (!BI) continue; assert(BI->isConditional() && "uses a comparison!"); BasicBlock *NonNullSuccessor = nullptr; CmpInst::Predicate Pred; if (match(const_cast<ICmpInst*>(Cmp), m_c_ICmp(Pred, m_Specific(V), m_Zero()))) { if (Pred == ICmpInst::ICMP_EQ) NonNullSuccessor = BI->getSuccessor(1); else if (Pred == ICmpInst::ICMP_NE) NonNullSuccessor = BI->getSuccessor(0); } if (NonNullSuccessor) { BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) return true; } } } return false; } bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { if (isKnownNonNull(V, TLI)) return true; return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false; } OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { // Multiplying n * m significant bits yields a result of n + m significant // bits. If the total number of significant bits does not exceed the // result bit width (minus 1), there is no overflow. // This means if we have enough leading zero bits in the operands // we can guarantee that the result does not overflow. // Ref: "Hacker's Delight" by Henry Warren unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); APInt LHSKnownZero(BitWidth, 0); APInt LHSKnownOne(BitWidth, 0); APInt RHSKnownZero(BitWidth, 0); APInt RHSKnownOne(BitWidth, 0); computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI, DT); computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI, DT); // Note that underestimating the number of zero bits gives a more // conservative answer. unsigned ZeroBits = LHSKnownZero.countLeadingOnes() + RHSKnownZero.countLeadingOnes(); // First handle the easy case: if we have enough zero bits there's // definitely no overflow. if (ZeroBits >= BitWidth) return OverflowResult::NeverOverflows; // Get the largest possible values for each operand. APInt LHSMax = ~LHSKnownZero; APInt RHSMax = ~RHSKnownZero; // We know the multiply operation doesn't overflow if the maximum values for // each operand will not overflow after we multiply them together. bool MaxOverflow; LHSMax.umul_ov(RHSMax, MaxOverflow); if (!MaxOverflow) return OverflowResult::NeverOverflows; // We know it always overflows if multiplying the smallest possible values for // the operands also results in overflow. bool MinOverflow; LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow); if (MinOverflow) return OverflowResult::AlwaysOverflows; return OverflowResult::MayOverflow; } OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { bool LHSKnownNonNegative, LHSKnownNegative; ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, AC, CxtI, DT); if (LHSKnownNonNegative || LHSKnownNegative) { bool RHSKnownNonNegative, RHSKnownNegative; ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, AC, CxtI, DT); if (LHSKnownNegative && RHSKnownNegative) { // The sign bit is set in both cases: this MUST overflow. // Create a simple add instruction, and insert it into the struct. return OverflowResult::AlwaysOverflows; } if (LHSKnownNonNegative && RHSKnownNonNegative) { // The sign bit is clear in both cases: this CANNOT overflow. // Create a simple add instruction, and insert it into the struct. return OverflowResult::NeverOverflows; } } return OverflowResult::MayOverflow; } static SelectPatternFlavor matchSelectPattern(ICmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS) { LHS = CmpLHS; RHS = CmpRHS; // (icmp X, Y) ? X : Y if (TrueVal == CmpLHS && FalseVal == CmpRHS) { switch (Pred) { default: return SPF_UNKNOWN; // Equality. case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: return SPF_UMAX; case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: return SPF_SMAX; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: return SPF_UMIN; case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: return SPF_SMIN; } } // (icmp X, Y) ? Y : X if (TrueVal == CmpRHS && FalseVal == CmpLHS) { switch (Pred) { default: return SPF_UNKNOWN; // Equality. case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: return SPF_UMIN; case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: return SPF_SMIN; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: return SPF_UMAX; case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: return SPF_SMAX; } } if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) { if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) { return (CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS; } // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) { return (CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS; } } // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C) if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) { if (C1->getType() == C2->getType() && ~C1->getValue() == C2->getValue() && (match(TrueVal, m_Not(m_Specific(CmpLHS))) || match(CmpLHS, m_Not(m_Specific(TrueVal))))) { LHS = TrueVal; RHS = FalseVal; return SPF_SMIN; } } } // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5) return SPF_UNKNOWN; } static Constant *lookThroughCast(ICmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp) { CastInst *CI = dyn_cast<CastInst>(V1); Constant *C = dyn_cast<Constant>(V2); if (!CI || !C) return nullptr; *CastOp = CI->getOpcode(); if (isa<SExtInst>(CI) && CmpI->isSigned()) { Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy()); // This is only valid if the truncated value can be sign-extended // back to the original value. if (ConstantExpr::getSExt(T, C->getType()) == C) return T; return nullptr; } if (isa<ZExtInst>(CI) && CmpI->isUnsigned()) return ConstantExpr::getTrunc(C, CI->getSrcTy()); if (isa<TruncInst>(CI)) return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned()); return nullptr; } SelectPatternFlavor llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp) { SelectInst *SI = dyn_cast<SelectInst>(V); if (!SI) return SPF_UNKNOWN; ICmpInst *CmpI = dyn_cast<ICmpInst>(SI->getCondition()); if (!CmpI) return SPF_UNKNOWN; ICmpInst::Predicate Pred = CmpI->getPredicate(); Value *CmpLHS = CmpI->getOperand(0); Value *CmpRHS = CmpI->getOperand(1); Value *TrueVal = SI->getTrueValue(); Value *FalseVal = SI->getFalseValue(); // Bail out early. if (CmpI->isEquality()) return SPF_UNKNOWN; // Deal with type mismatches. if (CastOp && CmpLHS->getType() != TrueVal->getType()) { if (Constant *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) return ::matchSelectPattern(Pred, CmpLHS, CmpRHS, cast<CastInst>(TrueVal)->getOperand(0), C, LHS, RHS); if (Constant *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) return ::matchSelectPattern(Pred, CmpLHS, CmpRHS, C, cast<CastInst>(FalseVal)->getOperand(0), LHS, RHS); } return ::matchSelectPattern(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/LLVMBuild.txt
;===- ./lib/Analysis/LLVMBuild.txt -----------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [common] subdirectories = IPA [component_0] type = Library name = Analysis parent = Libraries required_libraries = Core DXIL Support
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/BlockFrequencyInfoImpl.cpp
//===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Loops should be simplified before this analysis. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/BlockFrequencyInfoImpl.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/Support/raw_ostream.h" #include <numeric> using namespace llvm; using namespace llvm::bfi_detail; #define DEBUG_TYPE "block-freq" ScaledNumber<uint64_t> BlockMass::toScaled() const { if (isFull()) return ScaledNumber<uint64_t>(1, 0); return ScaledNumber<uint64_t>(getMass() + 1, -64); } void BlockMass::dump() const { print(dbgs()); } static char getHexDigit(int N) { assert(N < 16); if (N < 10) return '0' + N; return 'a' + N - 10; } raw_ostream &BlockMass::print(raw_ostream &OS) const { for (int Digits = 0; Digits < 16; ++Digits) OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf); return OS; } namespace { typedef BlockFrequencyInfoImplBase::BlockNode BlockNode; typedef BlockFrequencyInfoImplBase::Distribution Distribution; typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList; typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64; typedef BlockFrequencyInfoImplBase::LoopData LoopData; typedef BlockFrequencyInfoImplBase::Weight Weight; typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData; /// \brief Dithering mass distributer. /// /// This class splits up a single mass into portions by weight, dithering to /// spread out error. No mass is lost. The dithering precision depends on the /// precision of the product of \a BlockMass and \a BranchProbability. /// /// The distribution algorithm follows. /// /// 1. Initialize by saving the sum of the weights in \a RemWeight and the /// mass to distribute in \a RemMass. /// /// 2. For each portion: /// /// 1. Construct a branch probability, P, as the portion's weight divided /// by the current value of \a RemWeight. /// 2. Calculate the portion's mass as \a RemMass times P. /// 3. Update \a RemWeight and \a RemMass at each portion by subtracting /// the current portion's weight and mass. struct DitheringDistributer { uint32_t RemWeight; BlockMass RemMass; DitheringDistributer(Distribution &Dist, const BlockMass &Mass); BlockMass takeMass(uint32_t Weight); }; } // end namespace DitheringDistributer::DitheringDistributer(Distribution &Dist, const BlockMass &Mass) { Dist.normalize(); RemWeight = Dist.Total; RemMass = Mass; } BlockMass DitheringDistributer::takeMass(uint32_t Weight) { assert(Weight && "invalid weight"); assert(Weight <= RemWeight); BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight); // Decrement totals (dither). RemWeight -= Weight; RemMass -= Mass; return Mass; } void Distribution::add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type) { assert(Amount && "invalid weight of 0"); uint64_t NewTotal = Total + Amount; // Check for overflow. It should be impossible to overflow twice. bool IsOverflow = NewTotal < Total; assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow"); DidOverflow |= IsOverflow; // Update the total. Total = NewTotal; // Save the weight. Weights.push_back(Weight(Type, Node, Amount)); } static void combineWeight(Weight &W, const Weight &OtherW) { assert(OtherW.TargetNode.isValid()); if (!W.Amount) { W = OtherW; return; } assert(W.Type == OtherW.Type); assert(W.TargetNode == OtherW.TargetNode); assert(OtherW.Amount && "Expected non-zero weight"); if (W.Amount > W.Amount + OtherW.Amount) // Saturate on overflow. W.Amount = UINT64_MAX; else W.Amount += OtherW.Amount; } static void combineWeightsBySorting(WeightList &Weights) { // Sort so edges to the same node are adjacent. std::sort(Weights.begin(), Weights.end(), [](const Weight &L, const Weight &R) { return L.TargetNode < R.TargetNode; }); // Combine adjacent edges. WeightList::iterator O = Weights.begin(); for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E; ++O, (I = L)) { *O = *I; // Find the adjacent weights to the same node. for (++L; L != E && I->TargetNode == L->TargetNode; ++L) combineWeight(*O, *L); } // Erase extra entries. Weights.erase(O, Weights.end()); return; } static void combineWeightsByHashing(WeightList &Weights) { // Collect weights into a DenseMap. typedef DenseMap<BlockNode::IndexType, Weight> HashTable; HashTable Combined(NextPowerOf2(2 * Weights.size())); for (const Weight &W : Weights) combineWeight(Combined[W.TargetNode.Index], W); // Check whether anything changed. if (Weights.size() == Combined.size()) return; // Fill in the new weights. Weights.clear(); Weights.reserve(Combined.size()); for (const auto &I : Combined) Weights.push_back(I.second); } static void combineWeights(WeightList &Weights) { // Use a hash table for many successors to keep this linear. if (Weights.size() > 128) { combineWeightsByHashing(Weights); return; } combineWeightsBySorting(Weights); } static uint64_t shiftRightAndRound(uint64_t N, int Shift) { assert(Shift >= 0); assert(Shift < 64); if (!Shift) return N; return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1)); } void Distribution::normalize() { // Early exit for termination nodes. if (Weights.empty()) return; // Only bother if there are multiple successors. if (Weights.size() > 1) combineWeights(Weights); // Early exit when combined into a single successor. if (Weights.size() == 1) { Total = 1; Weights.front().Amount = 1; return; } // Determine how much to shift right so that the total fits into 32-bits. // // If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1 // for each weight can cause a 32-bit overflow. int Shift = 0; if (DidOverflow) Shift = 33; else if (Total > UINT32_MAX) Shift = 33 - countLeadingZeros(Total); // Early exit if nothing needs to be scaled. if (!Shift) { // If we didn't overflow then combineWeights() shouldn't have changed the // sum of the weights, but let's double-check. assert(Total == std::accumulate(Weights.begin(), Weights.end(), UINT64_C(0), [](uint64_t Sum, const Weight &W) { return Sum + W.Amount; }) && "Expected total to be correct"); return; } // Recompute the total through accumulation (rather than shifting it) so that // it's accurate after shifting and any changes combineWeights() made above. Total = 0; // Sum the weights to each node and shift right if necessary. for (Weight &W : Weights) { // Scale down below UINT32_MAX. Since Shift is larger than necessary, we // can round here without concern about overflow. assert(W.TargetNode.isValid()); W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift)); assert(W.Amount <= UINT32_MAX); // Update the total. Total += W.Amount; } assert(Total <= UINT32_MAX); } void BlockFrequencyInfoImplBase::clear() { // Swap with a default-constructed std::vector, since std::vector<>::clear() // does not actually clear heap storage. std::vector<FrequencyData>().swap(Freqs); std::vector<WorkingData>().swap(Working); Loops.clear(); } /// \brief Clear all memory not needed downstream. /// /// Releases all memory not used downstream. In particular, saves Freqs. static void cleanup(BlockFrequencyInfoImplBase &BFI) { std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs)); BFI.clear(); BFI.Freqs = std::move(SavedFreqs); } bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist, const LoopData *OuterLoop, const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight) { if (!Weight) Weight = 1; auto isLoopHeader = [&OuterLoop](const BlockNode &Node) { return OuterLoop && OuterLoop->isHeader(Node); }; BlockNode Resolved = Working[Succ.Index].getResolvedNode(); #ifndef NDEBUG auto debugSuccessor = [&](const char *Type) { dbgs() << " =>" << " [" << Type << "] weight = " << Weight; if (!isLoopHeader(Resolved)) dbgs() << ", succ = " << getBlockName(Succ); if (Resolved != Succ) dbgs() << ", resolved = " << getBlockName(Resolved); dbgs() << "\n"; }; (void)debugSuccessor; #endif if (isLoopHeader(Resolved)) { DEBUG(debugSuccessor("backedge")); Dist.addBackedge(Resolved, Weight); return true; } if (Working[Resolved.Index].getContainingLoop() != OuterLoop) { DEBUG(debugSuccessor(" exit ")); Dist.addExit(Resolved, Weight); return true; } if (Resolved < Pred) { if (!isLoopHeader(Pred)) { // If OuterLoop is an irreducible loop, we can't actually handle this. assert((!OuterLoop || !OuterLoop->isIrreducible()) && "unhandled irreducible control flow"); // Irreducible backedge. Abort. DEBUG(debugSuccessor("abort!!!")); return false; } // If "Pred" is a loop header, then this isn't really a backedge; rather, // OuterLoop must be irreducible. These false backedges can come only from // secondary loop headers. assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) && "unhandled irreducible control flow"); } DEBUG(debugSuccessor(" local ")); Dist.addLocal(Resolved, Weight); return true; } bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist( const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) { // Copy the exit map into Dist. for (const auto &I : Loop.Exits) if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first, I.second.getMass())) // Irreducible backedge. return false; return true; } /// \brief Compute the loop scale for a loop. void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { // Compute loop scale. DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n"); // Infinite loops need special handling. If we give the back edge an infinite // mass, they may saturate all the other scales in the function down to 1, // making all the other region temperatures look exactly the same. Choose an // arbitrary scale to avoid these issues. // // FIXME: An alternate way would be to select a symbolic scale which is later // replaced to be the maximum of all computed scales plus 1. This would // appropriately describe the loop as having a large scale, without skewing // the final frequency computation. const Scaled64 InifiniteLoopScale(1, 12); // LoopScale == 1 / ExitMass // ExitMass == HeadMass - BackedgeMass BlockMass TotalBackedgeMass; for (auto &Mass : Loop.BackedgeMass) TotalBackedgeMass += Mass; BlockMass ExitMass = BlockMass::getFull() - TotalBackedgeMass; // Block scale stores the inverse of the scale. If this is an infinite loop, // its exit mass will be zero. In this case, use an arbitrary scale for the // loop scale. Loop.Scale = ExitMass.isEmpty() ? InifiniteLoopScale : ExitMass.toScaled().inverse(); DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull() << " - " << TotalBackedgeMass << ")\n" << " - scale = " << Loop.Scale << "\n"); } /// \brief Package up a loop. void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) { DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n"); // Clear the subloop exits to prevent quadratic memory usage. for (const BlockNode &M : Loop.Nodes) { if (auto *Loop = Working[M.Index].getPackagedLoop()) Loop->Exits.clear(); DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n"); } Loop.IsPackaged = true; } #ifndef NDEBUG static void debugAssign(const BlockFrequencyInfoImplBase &BFI, const DitheringDistributer &D, const BlockNode &T, const BlockMass &M, const char *Desc) { dbgs() << " => assign " << M << " (" << D.RemMass << ")"; if (Desc) dbgs() << " [" << Desc << "]"; if (T.isValid()) dbgs() << " to " << BFI.getBlockName(T); dbgs() << "\n"; } #endif void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source, LoopData *OuterLoop, Distribution &Dist) { BlockMass Mass = Working[Source.Index].getMass(); DEBUG(dbgs() << " => mass: " << Mass << "\n"); // Distribute mass to successors as laid out in Dist. DitheringDistributer D(Dist, Mass); for (const Weight &W : Dist.Weights) { // Check for a local edge (non-backedge and non-exit). BlockMass Taken = D.takeMass(W.Amount); if (W.Type == Weight::Local) { Working[W.TargetNode.Index].getMass() += Taken; DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr)); continue; } // Backedges and exits only make sense if we're processing a loop. assert(OuterLoop && "backedge or exit outside of loop"); // Check for a backedge. if (W.Type == Weight::Backedge) { OuterLoop->BackedgeMass[OuterLoop->getHeaderIndex(W.TargetNode)] += Taken; DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "back")); continue; } // This must be an exit. assert(W.Type == Weight::Exit); OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken)); DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "exit")); } } static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI, const Scaled64 &Min, const Scaled64 &Max) { // Scale the Factor to a size that creates integers. Ideally, integers would // be scaled so that Max == UINT64_MAX so that they can be best // differentiated. However, in the presence of large frequency values, small // frequencies are scaled down to 1, making it impossible to differentiate // small, unequal numbers. When the spread between Min and Max frequencies // fits well within MaxBits, we make the scale be at least 8. const unsigned MaxBits = 64; const unsigned SpreadBits = (Max / Min).lg(); Scaled64 ScalingFactor; if (SpreadBits <= MaxBits - 3) { // If the values are small enough, make the scaling factor at least 8 to // allow distinguishing small values. ScalingFactor = Min.inverse(); ScalingFactor <<= 3; } else { // If the values need more than MaxBits to be represented, saturate small // frequency values down to 1 by using a scaling factor that benefits large // frequency values. ScalingFactor = Scaled64(1, MaxBits) / Max; } // Translate the floats to integers. DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max << ", factor = " << ScalingFactor << "\n"); for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) { Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor; BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>()); DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = " << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled << ", int = " << BFI.Freqs[Index].Integer << "\n"); } } /// \brief Unwrap a loop package. /// /// Visits all the members of a loop, adjusting their BlockData according to /// the loop's pseudo-node. static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) { DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop) << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale << "\n"); Loop.Scale *= Loop.Mass.toScaled(); Loop.IsPackaged = false; DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n"); // Propagate the head scale through the loop. Since members are visited in // RPO, the head scale will be updated by the loop scale first, and then the // final head scale will be used for updated the rest of the members. for (const BlockNode &N : Loop.Nodes) { const auto &Working = BFI.Working[N.Index]; Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale : BFI.Freqs[N.Index].Scaled; Scaled64 New = Loop.Scale * F; DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New << "\n"); F = New; } } void BlockFrequencyInfoImplBase::unwrapLoops() { // Set initial frequencies from loop-local masses. for (size_t Index = 0; Index < Working.size(); ++Index) Freqs[Index].Scaled = Working[Index].Mass.toScaled(); for (LoopData &Loop : Loops) unwrapLoop(*this, Loop); } void BlockFrequencyInfoImplBase::finalizeMetrics() { // Unwrap loop packages in reverse post-order, tracking min and max // frequencies. auto Min = Scaled64::getLargest(); auto Max = Scaled64::getZero(); for (size_t Index = 0; Index < Working.size(); ++Index) { // Update min/max scale. Min = std::min(Min, Freqs[Index].Scaled); Max = std::max(Max, Freqs[Index].Scaled); } // Convert to integers. convertFloatingToInteger(*this, Min, Max); // Clean up data structures. cleanup(*this); // Print out the final stats. DEBUG(dump()); } BlockFrequency BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const { if (!Node.isValid()) return 0; return Freqs[Node.Index].Integer; } Scaled64 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const { if (!Node.isValid()) return Scaled64::getZero(); return Freqs[Node.Index].Scaled; } std::string BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const { return std::string(); } std::string BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const { return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*"); } raw_ostream & BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, const BlockNode &Node) const { return OS << getFloatingBlockFreq(Node); } raw_ostream & BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, const BlockFrequency &Freq) const { Scaled64 Block(Freq.getFrequency(), 0); Scaled64 Entry(getEntryFreq(), 0); return OS << Block / Entry; } void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) { Start = OuterLoop.getHeader(); Nodes.reserve(OuterLoop.Nodes.size()); for (auto N : OuterLoop.Nodes) addNode(N); indexNodes(); } void IrreducibleGraph::addNodesInFunction() { Start = 0; for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index) if (!BFI.Working[Index].isPackaged()) addNode(Index); indexNodes(); } void IrreducibleGraph::indexNodes() { for (auto &I : Nodes) Lookup[I.Node.Index] = &I; } void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ, const BFIBase::LoopData *OuterLoop) { if (OuterLoop && OuterLoop->isHeader(Succ)) return; auto L = Lookup.find(Succ.Index); if (L == Lookup.end()) return; IrrNode &SuccIrr = *L->second; Irr.Edges.push_back(&SuccIrr); SuccIrr.Edges.push_front(&Irr); ++SuccIrr.NumIn; } namespace llvm { template <> struct GraphTraits<IrreducibleGraph> { typedef bfi_detail::IrreducibleGraph GraphT; typedef const GraphT::IrrNode NodeType; typedef GraphT::IrrNode::iterator ChildIteratorType; static const NodeType *getEntryNode(const GraphT &G) { return G.StartIrr; } static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); } static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); } }; } /// \brief Find extra irreducible headers. /// /// Find entry blocks and other blocks with backedges, which exist when \c G /// contains irreducible sub-SCCs. static void findIrreducibleHeaders( const BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G, const std::vector<const IrreducibleGraph::IrrNode *> &SCC, LoopData::NodeList &Headers, LoopData::NodeList &Others) { // Map from nodes in the SCC to whether it's an entry block. SmallDenseMap<const IrreducibleGraph::IrrNode *, bool, 8> InSCC; // InSCC also acts the set of nodes in the graph. Seed it. for (const auto *I : SCC) InSCC[I] = false; for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) { auto &Irr = *I->first; for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { if (InSCC.count(P)) continue; // This is an entry block. I->second = true; Headers.push_back(Irr.Node); DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n"); break; } } assert(Headers.size() >= 2 && "Expected irreducible CFG; -loop-info is likely invalid"); if (Headers.size() == InSCC.size()) { // Every block is a header. std::sort(Headers.begin(), Headers.end()); return; } // Look for extra headers from irreducible sub-SCCs. for (const auto &I : InSCC) { // Entry blocks are already headers. if (I.second) continue; auto &Irr = *I.first; for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { // Skip forward edges. if (P->Node < Irr.Node) continue; // Skip predecessors from entry blocks. These can have inverted // ordering. if (InSCC.lookup(P)) continue; // Store the extra header. Headers.push_back(Irr.Node); DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n"); break; } if (Headers.back() == Irr.Node) // Added this as a header. continue; // This is not a header. Others.push_back(Irr.Node); DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n"); } std::sort(Headers.begin(), Headers.end()); std::sort(Others.begin(), Others.end()); } static void createIrreducibleLoop( BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G, LoopData *OuterLoop, std::list<LoopData>::iterator Insert, const std::vector<const IrreducibleGraph::IrrNode *> &SCC) { // Translate the SCC into RPO. DEBUG(dbgs() << " - found-scc\n"); LoopData::NodeList Headers; LoopData::NodeList Others; findIrreducibleHeaders(BFI, G, SCC, Headers, Others); auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(), Headers.end(), Others.begin(), Others.end()); // Update loop hierarchy. for (const auto &N : Loop->Nodes) if (BFI.Working[N.Index].isLoopHeader()) BFI.Working[N.Index].Loop->Parent = &*Loop; else BFI.Working[N.Index].Loop = &*Loop; } iterator_range<std::list<LoopData>::iterator> BlockFrequencyInfoImplBase::analyzeIrreducible( const IrreducibleGraph &G, LoopData *OuterLoop, std::list<LoopData>::iterator Insert) { assert((OuterLoop == nullptr) == (Insert == Loops.begin())); auto Prev = OuterLoop ? std::prev(Insert) : Loops.end(); for (auto I = scc_begin(G); !I.isAtEnd(); ++I) { if (I->size() < 2) continue; // Translate the SCC into RPO. createIrreducibleLoop(*this, G, OuterLoop, Insert, *I); } if (OuterLoop) return make_range(std::next(Prev), Insert); return make_range(Loops.begin(), Insert); } void BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) { OuterLoop.Exits.clear(); for (auto &Mass : OuterLoop.BackedgeMass) Mass = BlockMass::getEmpty(); auto O = OuterLoop.Nodes.begin() + 1; for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I) if (!Working[I->Index].isPackaged()) *O++ = *I; OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end()); } void BlockFrequencyInfoImplBase::adjustLoopHeaderMass(LoopData &Loop) { assert(Loop.isIrreducible() && "this only makes sense on irreducible loops"); // Since the loop has more than one header block, the mass flowing back into // each header will be different. Adjust the mass in each header loop to // reflect the masses flowing through back edges. // // To do this, we distribute the initial mass using the backedge masses // as weights for the distribution. BlockMass LoopMass = BlockMass::getFull(); Distribution Dist; DEBUG(dbgs() << "adjust-loop-header-mass:\n"); for (uint32_t H = 0; H < Loop.NumHeaders; ++H) { auto &HeaderNode = Loop.Nodes[H]; auto &BackedgeMass = Loop.BackedgeMass[Loop.getHeaderIndex(HeaderNode)]; DEBUG(dbgs() << " - Add back edge mass for node " << getBlockName(HeaderNode) << ": " << BackedgeMass << "\n"); Dist.addLocal(HeaderNode, BackedgeMass.getMass()); } DitheringDistributer D(Dist, LoopMass); DEBUG(dbgs() << " Distribute loop mass " << LoopMass << " to headers using above weights\n"); for (const Weight &W : Dist.Weights) { BlockMass Taken = D.takeMass(W.Amount); assert(W.Type == Weight::Local && "all weights should be local"); Working[W.TargetNode.Index].getMass() = Taken; DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr)); } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/PHITransAddr.cpp
//===- PHITransAddr.cpp - PHI Translation for Addresses -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the PHITransAddr class. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/PHITransAddr.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; static bool CanPHITrans(Instruction *Inst) { if (isa<PHINode>(Inst) || isa<GetElementPtrInst>(Inst)) return true; if (isa<CastInst>(Inst) && isSafeToSpeculativelyExecute(Inst)) return true; if (Inst->getOpcode() == Instruction::Add && isa<ConstantInt>(Inst->getOperand(1))) return true; // cerr << "MEMDEP: Could not PHI translate: " << *Pointer; // if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst)) // cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0); return false; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void PHITransAddr::dump() const { if (!Addr) { dbgs() << "PHITransAddr: null\n"; return; } dbgs() << "PHITransAddr: " << *Addr << "\n"; for (unsigned i = 0, e = InstInputs.size(); i != e; ++i) dbgs() << " Input #" << i << " is " << *InstInputs[i] << "\n"; } #endif static bool VerifySubExpr(Value *Expr, SmallVectorImpl<Instruction*> &InstInputs) { // If this is a non-instruction value, there is nothing to do. Instruction *I = dyn_cast<Instruction>(Expr); if (!I) return true; // If it's an instruction, it is either in Tmp or its operands recursively // are. SmallVectorImpl<Instruction*>::iterator Entry = std::find(InstInputs.begin(), InstInputs.end(), I); if (Entry != InstInputs.end()) { InstInputs.erase(Entry); return true; } // If it isn't in the InstInputs list it is a subexpr incorporated into the // address. Sanity check that it is phi translatable. if (!CanPHITrans(I)) { errs() << "Instruction in PHITransAddr is not phi-translatable:\n"; errs() << *I << '\n'; llvm_unreachable("Either something is missing from InstInputs or " "CanPHITrans is wrong."); } // Validate the operands of the instruction. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) if (!VerifySubExpr(I->getOperand(i), InstInputs)) return false; return true; } /// Verify - Check internal consistency of this data structure. If the /// structure is valid, it returns true. If invalid, it prints errors and /// returns false. bool PHITransAddr::Verify() const { if (!Addr) return true; SmallVector<Instruction*, 8> Tmp(InstInputs.begin(), InstInputs.end()); if (!VerifySubExpr(Addr, Tmp)) return false; if (!Tmp.empty()) { errs() << "PHITransAddr contains extra instructions:\n"; for (unsigned i = 0, e = InstInputs.size(); i != e; ++i) errs() << " InstInput #" << i << " is " << *InstInputs[i] << "\n"; llvm_unreachable("This is unexpected."); } // a-ok. return true; } /// IsPotentiallyPHITranslatable - If this needs PHI translation, return true /// if we have some hope of doing it. This should be used as a filter to /// avoid calling PHITranslateValue in hopeless situations. bool PHITransAddr::IsPotentiallyPHITranslatable() const { // If the input value is not an instruction, or if it is not defined in CurBB, // then we don't need to phi translate it. Instruction *Inst = dyn_cast<Instruction>(Addr); return !Inst || CanPHITrans(Inst); } static void RemoveInstInputs(Value *V, SmallVectorImpl<Instruction*> &InstInputs) { Instruction *I = dyn_cast<Instruction>(V); if (!I) return; // If the instruction is in the InstInputs list, remove it. SmallVectorImpl<Instruction*>::iterator Entry = std::find(InstInputs.begin(), InstInputs.end(), I); if (Entry != InstInputs.end()) { InstInputs.erase(Entry); return; } assert(!isa<PHINode>(I) && "Error, removing something that isn't an input"); // Otherwise, it must have instruction inputs itself. Zap them recursively. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { if (Instruction *Op = dyn_cast<Instruction>(I->getOperand(i))) RemoveInstInputs(Op, InstInputs); } } Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree *DT) { // If this is a non-instruction value, it can't require PHI translation. Instruction *Inst = dyn_cast<Instruction>(V); if (!Inst) return V; // Determine whether 'Inst' is an input to our PHI translatable expression. bool isInput = std::find(InstInputs.begin(), InstInputs.end(), Inst) != InstInputs.end(); // Handle inputs instructions if needed. if (isInput) { if (Inst->getParent() != CurBB) { // If it is an input defined in a different block, then it remains an // input. return Inst; } // If 'Inst' is defined in this block and is an input that needs to be phi // translated, we need to incorporate the value into the expression or fail. // In either case, the instruction itself isn't an input any longer. InstInputs.erase(std::find(InstInputs.begin(), InstInputs.end(), Inst)); // If this is a PHI, go ahead and translate it. if (PHINode *PN = dyn_cast<PHINode>(Inst)) return AddAsInput(PN->getIncomingValueForBlock(PredBB)); // If this is a non-phi value, and it is analyzable, we can incorporate it // into the expression by making all instruction operands be inputs. if (!CanPHITrans(Inst)) return nullptr; // All instruction operands are now inputs (and of course, they may also be // defined in this block, so they may need to be phi translated themselves. for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) if (Instruction *Op = dyn_cast<Instruction>(Inst->getOperand(i))) InstInputs.push_back(Op); } // Ok, it must be an intermediate result (either because it started that way // or because we just incorporated it into the expression). See if its // operands need to be phi translated, and if so, reconstruct it. if (CastInst *Cast = dyn_cast<CastInst>(Inst)) { if (!isSafeToSpeculativelyExecute(Cast)) return nullptr; Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT); if (!PHIIn) return nullptr; if (PHIIn == Cast->getOperand(0)) return Cast; // Find an available version of this cast. // Constants are trivial to find. if (Constant *C = dyn_cast<Constant>(PHIIn)) return AddAsInput(ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType())); // Otherwise we have to see if a casted version of the incoming pointer // is available. If so, we can use it, otherwise we have to fail. for (User *U : PHIIn->users()) { if (CastInst *CastI = dyn_cast<CastInst>(U)) if (CastI->getOpcode() == Cast->getOpcode() && CastI->getType() == Cast->getType() && (!DT || DT->dominates(CastI->getParent(), PredBB))) return CastI; } return nullptr; } // Handle getelementptr with at least one PHI translatable operand. if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { SmallVector<Value*, 8> GEPOps; bool AnyChanged = false; for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { Value *GEPOp = PHITranslateSubExpr(GEP->getOperand(i), CurBB, PredBB, DT); if (!GEPOp) return nullptr; AnyChanged |= GEPOp != GEP->getOperand(i); GEPOps.push_back(GEPOp); } if (!AnyChanged) return GEP; // Simplify the GEP to handle 'gep x, 0' -> x etc. if (Value *V = SimplifyGEPInst(GEPOps, DL, TLI, DT, AC)) { for (unsigned i = 0, e = GEPOps.size(); i != e; ++i) RemoveInstInputs(GEPOps[i], InstInputs); return AddAsInput(V); } // Scan to see if we have this GEP available. Value *APHIOp = GEPOps[0]; for (User *U : APHIOp->users()) { if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) if (GEPI->getType() == GEP->getType() && GEPI->getNumOperands() == GEPOps.size() && GEPI->getParent()->getParent() == CurBB->getParent() && (!DT || DT->dominates(GEPI->getParent(), PredBB))) { if (std::equal(GEPOps.begin(), GEPOps.end(), GEPI->op_begin())) return GEPI; } } return nullptr; } // Handle add with a constant RHS. if (Inst->getOpcode() == Instruction::Add && isa<ConstantInt>(Inst->getOperand(1))) { // PHI translate the LHS. Constant *RHS = cast<ConstantInt>(Inst->getOperand(1)); bool isNSW = cast<BinaryOperator>(Inst)->hasNoSignedWrap(); bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap(); Value *LHS = PHITranslateSubExpr(Inst->getOperand(0), CurBB, PredBB, DT); if (!LHS) return nullptr; // If the PHI translated LHS is an add of a constant, fold the immediates. if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS)) if (BOp->getOpcode() == Instruction::Add) if (ConstantInt *CI = dyn_cast<ConstantInt>(BOp->getOperand(1))) { LHS = BOp->getOperand(0); RHS = ConstantExpr::getAdd(RHS, CI); isNSW = isNUW = false; // If the old 'LHS' was an input, add the new 'LHS' as an input. if (std::find(InstInputs.begin(), InstInputs.end(), BOp) != InstInputs.end()) { RemoveInstInputs(BOp, InstInputs); AddAsInput(LHS); } } // See if the add simplifies away. if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT, AC)) { // If we simplified the operands, the LHS is no longer an input, but Res // is. RemoveInstInputs(LHS, InstInputs); return AddAsInput(Res); } // If we didn't modify the add, just return it. if (LHS == Inst->getOperand(0) && RHS == Inst->getOperand(1)) return Inst; // Otherwise, see if we have this add available somewhere. for (User *U : LHS->users()) { if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) if (BO->getOpcode() == Instruction::Add && BO->getOperand(0) == LHS && BO->getOperand(1) == RHS && BO->getParent()->getParent() == CurBB->getParent() && (!DT || DT->dominates(BO->getParent(), PredBB))) return BO; } return nullptr; } // Otherwise, we failed. return nullptr; } /// PHITranslateValue - PHI translate the current address up the CFG from /// CurBB to Pred, updating our state to reflect any needed changes. If /// 'MustDominate' is true, the translated value must dominate /// PredBB. This returns true on failure and sets Addr to null. bool PHITransAddr::PHITranslateValue(BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree *DT, bool MustDominate) { assert(DT || !MustDominate); assert(Verify() && "Invalid PHITransAddr!"); if (DT && DT->isReachableFromEntry(PredBB)) Addr = PHITranslateSubExpr(Addr, CurBB, PredBB, MustDominate ? DT : nullptr); else Addr = nullptr; assert(Verify() && "Invalid PHITransAddr!"); if (MustDominate) // Make sure the value is live in the predecessor. if (Instruction *Inst = dyn_cast_or_null<Instruction>(Addr)) if (!DT->dominates(Inst->getParent(), PredBB)) Addr = nullptr; return Addr == nullptr; } /// PHITranslateWithInsertion - PHI translate this value into the specified /// predecessor block, inserting a computation of the value if it is /// unavailable. /// /// All newly created instructions are added to the NewInsts list. This /// returns null on failure. /// Value *PHITransAddr:: PHITranslateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree &DT, SmallVectorImpl<Instruction*> &NewInsts) { unsigned NISize = NewInsts.size(); // Attempt to PHI translate with insertion. Addr = InsertPHITranslatedSubExpr(Addr, CurBB, PredBB, DT, NewInsts); // If successful, return the new value. if (Addr) return Addr; // If not, destroy any intermediate instructions inserted. while (NewInsts.size() != NISize) NewInsts.pop_back_val()->eraseFromParent(); return nullptr; } /// InsertPHITranslatedPointer - Insert a computation of the PHI translated /// version of 'V' for the edge PredBB->CurBB into the end of the PredBB /// block. All newly created instructions are added to the NewInsts list. /// This returns null on failure. /// Value *PHITransAddr:: InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB, BasicBlock *PredBB, const DominatorTree &DT, SmallVectorImpl<Instruction*> &NewInsts) { // See if we have a version of this value already available and dominating // PredBB. If so, there is no need to insert a new instance of it. PHITransAddr Tmp(InVal, DL, AC); if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT, /*MustDominate=*/true)) return Tmp.getAddr(); // We don't need to PHI translate values which aren't instructions. auto *Inst = dyn_cast<Instruction>(InVal); if (!Inst) return nullptr; // Handle cast of PHI translatable value. if (CastInst *Cast = dyn_cast<CastInst>(Inst)) { if (!isSafeToSpeculativelyExecute(Cast)) return nullptr; Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0), CurBB, PredBB, DT, NewInsts); if (!OpVal) return nullptr; // Otherwise insert a cast at the end of PredBB. CastInst *New = CastInst::Create(Cast->getOpcode(), OpVal, InVal->getType(), InVal->getName() + ".phi.trans.insert", PredBB->getTerminator()); New->setDebugLoc(Inst->getDebugLoc()); NewInsts.push_back(New); return New; } // Handle getelementptr with at least one PHI operand. if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { SmallVector<Value*, 8> GEPOps; BasicBlock *CurBB = GEP->getParent(); for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { Value *OpVal = InsertPHITranslatedSubExpr(GEP->getOperand(i), CurBB, PredBB, DT, NewInsts); if (!OpVal) return nullptr; GEPOps.push_back(OpVal); } GetElementPtrInst *Result = GetElementPtrInst::Create( GEP->getSourceElementType(), GEPOps[0], makeArrayRef(GEPOps).slice(1), InVal->getName() + ".phi.trans.insert", PredBB->getTerminator()); Result->setDebugLoc(Inst->getDebugLoc()); Result->setIsInBounds(GEP->isInBounds()); NewInsts.push_back(Result); return Result; } #if 0 // FIXME: This code works, but it is unclear that we actually want to insert // a big chain of computation in order to make a value available in a block. // This needs to be evaluated carefully to consider its cost trade offs. // Handle add with a constant RHS. if (Inst->getOpcode() == Instruction::Add && isa<ConstantInt>(Inst->getOperand(1))) { // PHI translate the LHS. Value *OpVal = InsertPHITranslatedSubExpr(Inst->getOperand(0), CurBB, PredBB, DT, NewInsts); if (OpVal == 0) return 0; BinaryOperator *Res = BinaryOperator::CreateAdd(OpVal, Inst->getOperand(1), InVal->getName()+".phi.trans.insert", PredBB->getTerminator()); Res->setHasNoSignedWrap(cast<BinaryOperator>(Inst)->hasNoSignedWrap()); Res->setHasNoUnsignedWrap(cast<BinaryOperator>(Inst)->hasNoUnsignedWrap()); NewInsts.push_back(Res); return Res; } #endif return nullptr; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/Delinearization.cpp
//===---- Delinearization.cpp - MultiDimensional Index Delinearization ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This implements an analysis pass that tries to delinearize all GEP // instructions in all loops using the SCEV analysis functionality. This pass is // only used for testing purposes: if your pass needs delinearization, please // use the on-demand SCEVAddRecExpr::delinearize() function. // //===----------------------------------------------------------------------===// #include "llvm/IR/Constants.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Type.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DL_NAME "delinearize" #define DEBUG_TYPE DL_NAME namespace { class Delinearization : public FunctionPass { Delinearization(const Delinearization &); // do not implement protected: Function *F; LoopInfo *LI; ScalarEvolution *SE; public: static char ID; // Pass identification, replacement for typeid Delinearization() : FunctionPass(ID) { initializeDelinearizationPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override; void print(raw_ostream &O, const Module *M = nullptr) const override; }; } // end anonymous namespace void Delinearization::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<ScalarEvolution>(); } bool Delinearization::runOnFunction(Function &F) { this->F = &F; SE = &getAnalysis<ScalarEvolution>(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); return false; } static Value *getPointerOperand(Instruction &Inst) { if (LoadInst *Load = dyn_cast<LoadInst>(&Inst)) return Load->getPointerOperand(); else if (StoreInst *Store = dyn_cast<StoreInst>(&Inst)) return Store->getPointerOperand(); else if (GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(&Inst)) return Gep->getPointerOperand(); return nullptr; } void Delinearization::print(raw_ostream &O, const Module *) const { O << "Delinearization on function " << F->getName() << ":\n"; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { Instruction *Inst = &(*I); // Only analyze loads and stores. if (!isa<StoreInst>(Inst) && !isa<LoadInst>(Inst) && !isa<GetElementPtrInst>(Inst)) continue; const BasicBlock *BB = Inst->getParent(); // Delinearize the memory access as analyzed in all the surrounding loops. // Do not analyze memory accesses outside loops. for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) { const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L); const SCEVUnknown *BasePointer = dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFn)); // Do not delinearize if we cannot find the base pointer. if (!BasePointer) break; AccessFn = SE->getMinusSCEV(AccessFn, BasePointer); const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(AccessFn); // Do not try to delinearize memory accesses that are not AddRecs. if (!AR) break; O << "\n"; O << "Inst:" << *Inst << "\n"; O << "In Loop with Header: " << L->getHeader()->getName() << "\n"; O << "AddRec: " << *AR << "\n"; SmallVector<const SCEV *, 3> Subscripts, Sizes; SE->delinearize(AR, Subscripts, Sizes, SE->getElementSize(Inst)); if (Subscripts.size() == 0 || Sizes.size() == 0 || Subscripts.size() != Sizes.size()) { O << "failed to delinearize\n"; continue; } O << "Base offset: " << *BasePointer << "\n"; O << "ArrayDecl[UnknownSize]"; int Size = Subscripts.size(); for (int i = 0; i < Size - 1; i++) O << "[" << *Sizes[i] << "]"; O << " with elements of " << *Sizes[Size - 1] << " bytes.\n"; O << "ArrayRef"; for (int i = 0; i < Size; i++) O << "[" << *Subscripts[i] << "]"; O << "\n"; } } } char Delinearization::ID = 0; static const char delinearization_name[] = "Delinearization"; INITIALIZE_PASS_BEGIN(Delinearization, DL_NAME, delinearization_name, true, true) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(Delinearization, DL_NAME, delinearization_name, true, true) FunctionPass *llvm::createDelinearizationPass() { return new Delinearization; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/LoopPass.cpp
//===- LoopPass.cpp - Loop Pass and Loop Pass Manager ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements LoopPass and LPPassManager. All loop optimization // and transformation passes are derived from LoopPass. LPPassManager is // responsible for managing LoopPasses. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/LoopPass.h" #include "llvm/IR/IRPrintingPasses.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "loop-pass-manager" namespace { /// PrintLoopPass - Print a Function corresponding to a Loop. /// class PrintLoopPass : public LoopPass { private: std::string Banner; raw_ostream &Out; // raw_ostream to print on. public: static char ID; PrintLoopPass(const std::string &B, raw_ostream &o) : LoopPass(ID), Banner(B), Out(o) {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } bool runOnLoop(Loop *L, LPPassManager &) override { Out << Banner; for (Loop::block_iterator b = L->block_begin(), be = L->block_end(); b != be; ++b) { if (*b) (*b)->print(Out); else Out << "Printing <null> block"; } return false; } }; char PrintLoopPass::ID = 0; } //===----------------------------------------------------------------------===// // LPPassManager // char LPPassManager::ID = 0; LPPassManager::LPPassManager() : FunctionPass(ID), PMDataManager() { skipThisLoop = false; redoThisLoop = false; LI = nullptr; CurrentLoop = nullptr; } /// Delete loop from the loop queue and loop hierarchy (LoopInfo). void LPPassManager::deleteLoopFromQueue(Loop *L) { LI->updateUnloop(L); // Notify passes that the loop is being deleted. deleteSimpleAnalysisLoop(L); // If L is current loop then skip rest of the passes and let // runOnFunction remove L from LQ. Otherwise, remove L from LQ now // and continue applying other passes on CurrentLoop. if (CurrentLoop == L) skipThisLoop = true; delete L; if (skipThisLoop) return; for (std::deque<Loop *>::iterator I = LQ.begin(), E = LQ.end(); I != E; ++I) { if (*I == L) { LQ.erase(I); break; } } } // Inset loop into loop nest (LoopInfo) and loop queue (LQ). void LPPassManager::insertLoop(Loop *L, Loop *ParentLoop) { assert (CurrentLoop != L && "Cannot insert CurrentLoop"); // Insert into loop nest if (ParentLoop) ParentLoop->addChildLoop(L); else LI->addTopLevelLoop(L); insertLoopIntoQueue(L); } void LPPassManager::insertLoopIntoQueue(Loop *L) { // Insert L into loop queue if (L == CurrentLoop) redoLoop(L); else if (!L->getParentLoop()) // This is top level loop. LQ.push_front(L); else { // Insert L after the parent loop. for (std::deque<Loop *>::iterator I = LQ.begin(), E = LQ.end(); I != E; ++I) { if (*I == L->getParentLoop()) { // deque does not support insert after. ++I; LQ.insert(I, 1, L); break; } } } } // Reoptimize this loop. LPPassManager will re-insert this loop into the // queue. This allows LoopPass to change loop nest for the loop. This // utility may send LPPassManager into infinite loops so use caution. void LPPassManager::redoLoop(Loop *L) { assert (CurrentLoop == L && "Can redo only CurrentLoop"); redoThisLoop = true; } /// cloneBasicBlockSimpleAnalysis - Invoke cloneBasicBlockAnalysis hook for /// all loop passes. void LPPassManager::cloneBasicBlockSimpleAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) { for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { LoopPass *LP = getContainedPass(Index); LP->cloneBasicBlockAnalysis(From, To, L); } } /// deleteSimpleAnalysisValue - Invoke deleteAnalysisValue hook for all passes. void LPPassManager::deleteSimpleAnalysisValue(Value *V, Loop *L) { if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) { for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE; ++BI) { Instruction &I = *BI; deleteSimpleAnalysisValue(&I, L); } } for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { LoopPass *LP = getContainedPass(Index); LP->deleteAnalysisValue(V, L); } } /// Invoke deleteAnalysisLoop hook for all passes. void LPPassManager::deleteSimpleAnalysisLoop(Loop *L) { for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { LoopPass *LP = getContainedPass(Index); LP->deleteAnalysisLoop(L); } } // Recurse through all subloops and all loops into LQ. static void addLoopIntoQueue(Loop *L, std::deque<Loop *> &LQ) { LQ.push_back(L); for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) addLoopIntoQueue(*I, LQ); } /// Pass Manager itself does not invalidate any analysis info. void LPPassManager::getAnalysisUsage(AnalysisUsage &Info) const { // LPPassManager needs LoopInfo. In the long term LoopInfo class will // become part of LPPassManager. Info.addRequired<LoopInfoWrapperPass>(); Info.setPreservesAll(); } /// run - Execute all of the passes scheduled for execution. Keep track of /// whether any of the passes modifies the function, and if so, return true. bool LPPassManager::runOnFunction(Function &F) { auto &LIWP = getAnalysis<LoopInfoWrapperPass>(); LI = &LIWP.getLoopInfo(); bool Changed = false; // Collect inherited analysis from Module level pass manager. populateInheritedAnalysis(TPM->activeStack); // Populate the loop queue in reverse program order. There is no clear need to // process sibling loops in either forward or reverse order. There may be some // advantage in deleting uses in a later loop before optimizing the // definitions in an earlier loop. If we find a clear reason to process in // forward order, then a forward variant of LoopPassManager should be created. // // Note that LoopInfo::iterator visits loops in reverse program // order. Here, reverse_iterator gives us a forward order, and the LoopQueue // reverses the order a third time by popping from the back. for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I) addLoopIntoQueue(*I, LQ); if (LQ.empty()) // No loops, skip calling finalizers return false; // Initialization for (std::deque<Loop *>::const_iterator I = LQ.begin(), E = LQ.end(); I != E; ++I) { Loop *L = *I; for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { LoopPass *P = getContainedPass(Index); Changed |= P->doInitialization(L, *this); } } // Walk Loops while (!LQ.empty()) { CurrentLoop = LQ.back(); skipThisLoop = false; redoThisLoop = false; // Run all passes on the current Loop. for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { LoopPass *P = getContainedPass(Index); dumpPassInfo(P, EXECUTION_MSG, ON_LOOP_MSG, CurrentLoop->getHeader()->getName()); dumpRequiredSet(P); initializeAnalysisImpl(P); { PassManagerPrettyStackEntry X(P, *CurrentLoop->getHeader()); TimeRegion PassTimer(getPassTimer(P)); Changed |= P->runOnLoop(CurrentLoop, *this); } if (Changed) dumpPassInfo(P, MODIFICATION_MSG, ON_LOOP_MSG, skipThisLoop ? "<deleted>" : CurrentLoop->getHeader()->getName()); dumpPreservedSet(P); if (!skipThisLoop) { // Manually check that this loop is still healthy. This is done // instead of relying on LoopInfo::verifyLoop since LoopInfo // is a function pass and it's really expensive to verify every // loop in the function every time. That level of checking can be // enabled with the -verify-loop-info option. { TimeRegion PassTimer(getPassTimer(&LIWP)); CurrentLoop->verifyLoop(); } // Then call the regular verifyAnalysis functions. verifyPreservedAnalysis(P); F.getContext().yield(); } removeNotPreservedAnalysis(P); recordAvailableAnalysis(P); removeDeadPasses(P, skipThisLoop ? "<deleted>" : CurrentLoop->getHeader()->getName(), ON_LOOP_MSG); if (skipThisLoop) // Do not run other passes on this loop. break; } // If the loop was deleted, release all the loop passes. This frees up // some memory, and avoids trouble with the pass manager trying to call // verifyAnalysis on them. if (skipThisLoop) for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { Pass *P = getContainedPass(Index); freePass(P, "<deleted>", ON_LOOP_MSG); } // Pop the loop from queue after running all passes. LQ.pop_back(); if (redoThisLoop) LQ.push_back(CurrentLoop); } // Finalization for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { LoopPass *P = getContainedPass(Index); Changed |= P->doFinalization(); } return Changed; } /// Print passes managed by this manager void LPPassManager::dumpPassStructure(unsigned Offset) { errs().indent(Offset*2) << "Loop Pass Manager\n"; for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { Pass *P = getContainedPass(Index); P->dumpPassStructure(Offset + 1); dumpLastUses(P, Offset+1); } } //===----------------------------------------------------------------------===// // LoopPass Pass *LoopPass::createPrinterPass(raw_ostream &O, const std::string &Banner) const { return new PrintLoopPass(Banner, O); } // Check if this pass is suitable for the current LPPassManager, if // available. This pass P is not suitable for a LPPassManager if P // is not preserving higher level analysis info used by other // LPPassManager passes. In such case, pop LPPassManager from the // stack. This will force assignPassManager() to create new // LPPassManger as expected. void LoopPass::preparePassManager(PMStack &PMS) { // Find LPPassManager while (!PMS.empty() && PMS.top()->getPassManagerType() > PMT_LoopPassManager) PMS.pop(); // If this pass is destroying high level information that is used // by other passes that are managed by LPM then do not insert // this pass in current LPM. Use new LPPassManager. if (PMS.top()->getPassManagerType() == PMT_LoopPassManager && !PMS.top()->preserveHigherLevelAnalysis(this)) PMS.pop(); } /// Assign pass manager to manage this pass. void LoopPass::assignPassManager(PMStack &PMS, PassManagerType PreferredType) { std::unique_ptr<LoopPass> thisPtr(this); // HLSL Change // Find LPPassManager while (!PMS.empty() && PMS.top()->getPassManagerType() > PMT_LoopPassManager) PMS.pop(); LPPassManager *LPPM; if (PMS.top()->getPassManagerType() == PMT_LoopPassManager) LPPM = (LPPassManager*)PMS.top(); else { // Create new Loop Pass Manager if it does not exist. assert (!PMS.empty() && "Unable to create Loop Pass Manager"); PMDataManager *PMD = PMS.top(); // [1] Create new Loop Pass Manager LPPM = new LPPassManager(); LPPM->populateInheritedAnalysis(PMS); // [2] Set up new manager's top level manager PMTopLevelManager *TPM = PMD->getTopLevelManager(); TPM->addIndirectPassManager(LPPM); // [3] Assign manager to manage this new manager. This may create // and push new managers into PMS Pass *P = LPPM->getAsPass(); TPM->schedulePass(P); // [4] Push new manager into PMS PMS.push(LPPM); } thisPtr.release(); // HLSL Change LPPM->add(this); } // Containing function has Attribute::OptimizeNone and transformation // passes should skip it. bool LoopPass::skipOptnoneFunction(const Loop *L) const { const Function *F = L->getHeader()->getParent(); if (F && F->hasFnAttribute(Attribute::OptimizeNone)) { // FIXME: Report this to dbgs() only once per function. DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' in function " << F->getName() << "\n"); // FIXME: Delete loop from pass manager's queue? return true; } return false; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/LoopAccessAnalysis.cpp
//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // The implementation for the loop memory dependence that was originally // developed for the loop vectorizer. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Analysis/VectorUtils.h" using namespace llvm; #define DEBUG_TYPE "loop-accesses" #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned, true> VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor)); unsigned VectorizerParams::VectorizationFactor; static cl::opt<unsigned, true> VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location( VectorizerParams::VectorizationInterleave)); unsigned VectorizerParams::VectorizationInterleave; static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold( "runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8)); unsigned VectorizerParams::RuntimeMemoryCheckThreshold; /// \brief The maximum iterations used to merge memory checks static cl::opt<unsigned> MemoryCheckMergeThreshold( "memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100)); /// Maximum SIMD width. const unsigned VectorizerParams::MaxVectorWidth = 64; /// \brief We collect interesting dependences up to this threshold. static cl::opt<unsigned> MaxInterestingDependence( "max-interesting-dependences", cl::Hidden, cl::desc("Maximum number of interesting dependences collected by " "loop-access analysis (default = 100)"), cl::init(100)); #else unsigned VectorizerParams::VectorizationInterleave; unsigned VectorizerParams::VectorizationFactor; unsigned VectorizerParams::RuntimeMemoryCheckThreshold = 8; static const unsigned MemoryCheckMergeThreshold = 100; const unsigned VectorizerParams::MaxVectorWidth = 64; static const unsigned MaxInterestingDependence = 100; #endif // HLSL Change Ends bool VectorizerParams::isInterleaveForced() { return false; // HLSL Change - instead of return ::VectorizationInterleave.getNumOccurrences() > 0; } void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message, const Function *TheFunction, const Loop *TheLoop, const char *PassName) { DebugLoc DL = TheLoop->getStartLoc(); if (const Instruction *I = Message.getInstr()) DL = I->getDebugLoc(); emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName, *TheFunction, DL, Message.str()); } Value *llvm::stripIntegerCast(Value *V) { if (CastInst *CI = dyn_cast<CastInst>(V)) if (CI->getOperand(0)->getType()->isIntegerTy()) return CI->getOperand(0); return V; } const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE, const ValueToValueMap &PtrToStride, Value *Ptr, Value *OrigPtr) { const SCEV *OrigSCEV = SE->getSCEV(Ptr); // If there is an entry in the map return the SCEV of the pointer with the // symbolic stride replaced by one. ValueToValueMap::const_iterator SI = PtrToStride.find(OrigPtr ? OrigPtr : Ptr); if (SI != PtrToStride.end()) { Value *StrideVal = SI->second; // Strip casts. StrideVal = stripIntegerCast(StrideVal); // Replace symbolic stride by one. Value *One = ConstantInt::get(StrideVal->getType(), 1); ValueToValueMap RewriteMap; RewriteMap[StrideVal] = One; const SCEV *ByOne = SCEVParameterRewriter::rewrite(OrigSCEV, *SE, RewriteMap, true); DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne << "\n"); return ByOne; } // Otherwise, just return the SCEV of the original pointer. return SE->getSCEV(Ptr); } void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId, unsigned ASId, const ValueToValueMap &Strides) { // Get the stride replaced scev. const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr); const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc); assert(AR && "Invalid addrec expression"); const SCEV *Ex = SE->getBackedgeTakenCount(Lp); const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE); Pointers.emplace_back(Ptr, AR->getStart(), ScEnd, WritePtr, DepSetId, ASId, Sc); } bool RuntimePointerChecking::needsChecking( const CheckingPtrGroup &M, const CheckingPtrGroup &N, const SmallVectorImpl<int> *PtrPartition) const { for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J) if (needsChecking(M.Members[I], N.Members[J], PtrPartition)) return true; return false; } /// Compare \p I and \p J and return the minimum. /// Return nullptr in case we couldn't find an answer. static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE) { const SCEV *Diff = SE->getMinusSCEV(J, I); const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff); if (!C) return nullptr; if (C->getValue()->isNegative()) return J; return I; } bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { const SCEV *Start = RtCheck.Pointers[Index].Start; const SCEV *End = RtCheck.Pointers[Index].End; // Compare the starts and ends with the known minimum and maximum // of this set. We need to know how we compare against the min/max // of the set in order to be able to emit memchecks. const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE); if (!Min0) return false; const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE); if (!Min1) return false; // Update the low bound expression if we've found a new min value. if (Min0 == Start) Low = Start; // Update the high bound expression if we've found a new max value. if (Min1 != End) High = End; Members.push_back(Index); return true; } void RuntimePointerChecking::groupChecks( MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { // We build the groups from dependency candidates equivalence classes // because: // - We know that pointers in the same equivalence class share // the same underlying object and therefore there is a chance // that we can compare pointers // - We wouldn't be able to merge two pointers for which we need // to emit a memcheck. The classes in DepCands are already // conveniently built such that no two pointers in the same // class need checking against each other. // We use the following (greedy) algorithm to construct the groups // For every pointer in the equivalence class: // For each existing group: // - if the difference between this pointer and the min/max bounds // of the group is a constant, then make the pointer part of the // group and update the min/max bounds of that group as required. CheckingGroups.clear(); // If we don't have the dependency partitions, construct a new // checking pointer group for each pointer. if (!UseDependencies) { for (unsigned I = 0; I < Pointers.size(); ++I) CheckingGroups.push_back(CheckingPtrGroup(I, *this)); return; } unsigned TotalComparisons = 0; DenseMap<Value *, unsigned> PositionMap; for (unsigned Index = 0; Index < Pointers.size(); ++Index) PositionMap[Pointers[Index].PointerValue] = Index; // We need to keep track of what pointers we've already seen so we // don't process them twice. SmallSet<unsigned, 2> Seen; // Go through all equivalence classes, get the the "pointer check groups" // and add them to the overall solution. We use the order in which accesses // appear in 'Pointers' to enforce determinism. for (unsigned I = 0; I < Pointers.size(); ++I) { // We've seen this pointer before, and therefore already processed // its equivalence class. if (Seen.count(I)) continue; MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue, Pointers[I].IsWritePtr); SmallVector<CheckingPtrGroup, 2> Groups; auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access)); // Because DepCands is constructed by visiting accesses in the order in // which they appear in alias sets (which is deterministic) and the // iteration order within an equivalence class member is only dependent on // the order in which unions and insertions are performed on the // equivalence class, the iteration order is deterministic. for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); MI != ME; ++MI) { unsigned Pointer = PositionMap[MI->getPointer()]; bool Merged = false; // Mark this pointer as seen. Seen.insert(Pointer); // Go through all the existing sets and see if we can find one // which can include this pointer. for (CheckingPtrGroup &Group : Groups) { // Don't perform more than a certain amount of comparisons. // This should limit the cost of grouping the pointers to something // reasonable. If we do end up hitting this threshold, the algorithm // will create separate groups for all remaining pointers. if (TotalComparisons > MemoryCheckMergeThreshold) break; TotalComparisons++; if (Group.addPointer(Pointer)) { Merged = true; break; } } if (!Merged) // We couldn't add this pointer to any existing set or the threshold // for the number of comparisons has been reached. Create a new group // to hold the current pointer. Groups.push_back(CheckingPtrGroup(Pointer, *this)); } // We've computed the grouped checks for this partition. // Save the results and continue with the next one. std::copy(Groups.begin(), Groups.end(), std::back_inserter(CheckingGroups)); } } bool RuntimePointerChecking::needsChecking( unsigned I, unsigned J, const SmallVectorImpl<int> *PtrPartition) const { const PointerInfo &PointerI = Pointers[I]; const PointerInfo &PointerJ = Pointers[J]; // No need to check if two readonly pointers intersect. if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr) return false; // Only need to check pointers between two different dependency sets. if (PointerI.DependencySetId == PointerJ.DependencySetId) return false; // Only need to check pointers in the same alias set. if (PointerI.AliasSetId != PointerJ.AliasSetId) return false; // If PtrPartition is set omit checks between pointers of the same partition. // Partition number -1 means that the pointer is used in multiple partitions. // In this case we can't omit the check. if (PtrPartition && (*PtrPartition)[I] != -1 && (*PtrPartition)[I] == (*PtrPartition)[J]) return false; return true; } void RuntimePointerChecking::print( raw_ostream &OS, unsigned Depth, const SmallVectorImpl<int> *PtrPartition) const { OS.indent(Depth) << "Run-time memory checks:\n"; unsigned N = 0; for (unsigned I = 0; I < CheckingGroups.size(); ++I) for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) if (needsChecking(CheckingGroups[I], CheckingGroups[J], PtrPartition)) { OS.indent(Depth) << "Check " << N++ << ":\n"; OS.indent(Depth + 2) << "Comparing group " << I << ":\n"; for (unsigned K = 0; K < CheckingGroups[I].Members.size(); ++K) { OS.indent(Depth + 2) << *Pointers[CheckingGroups[I].Members[K]].PointerValue << "\n"; if (PtrPartition) OS << " (Partition: " << (*PtrPartition)[CheckingGroups[I].Members[K]] << ")" << "\n"; } OS.indent(Depth + 2) << "Against group " << J << ":\n"; for (unsigned K = 0; K < CheckingGroups[J].Members.size(); ++K) { OS.indent(Depth + 2) << *Pointers[CheckingGroups[J].Members[K]].PointerValue << "\n"; if (PtrPartition) OS << " (Partition: " << (*PtrPartition)[CheckingGroups[J].Members[K]] << ")" << "\n"; } } OS.indent(Depth) << "Grouped accesses:\n"; for (unsigned I = 0; I < CheckingGroups.size(); ++I) { OS.indent(Depth + 2) << "Group " << I << ":\n"; OS.indent(Depth + 4) << "(Low: " << *CheckingGroups[I].Low << " High: " << *CheckingGroups[I].High << ")\n"; for (unsigned J = 0; J < CheckingGroups[I].Members.size(); ++J) { OS.indent(Depth + 6) << "Member: " << *Pointers[CheckingGroups[I].Members[J]].Expr << "\n"; } } } unsigned RuntimePointerChecking::getNumberOfChecks( const SmallVectorImpl<int> *PtrPartition) const { unsigned NumPartitions = CheckingGroups.size(); unsigned CheckCount = 0; for (unsigned I = 0; I < NumPartitions; ++I) for (unsigned J = I + 1; J < NumPartitions; ++J) if (needsChecking(CheckingGroups[I], CheckingGroups[J], PtrPartition)) CheckCount++; return CheckCount; } bool RuntimePointerChecking::needsAnyChecking( const SmallVectorImpl<int> *PtrPartition) const { unsigned NumPointers = Pointers.size(); for (unsigned I = 0; I < NumPointers; ++I) for (unsigned J = I + 1; J < NumPointers; ++J) if (needsChecking(I, J, PtrPartition)) return true; return false; } namespace { /// \brief Analyses memory accesses in a loop. /// /// Checks whether run time pointer checks are needed and builds sets for data /// dependence checking. class AccessAnalysis { public: /// \brief Read or write access location. typedef PointerIntPair<Value *, 1, bool> MemAccessInfo; typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet; AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, MemoryDepChecker::DepCandidates &DA) : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false) {} /// \brief Register a load and whether it is only read from. void addLoad(MemoryLocation &Loc, bool IsReadOnly) { Value *Ptr = const_cast<Value*>(Loc.Ptr); AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); Accesses.insert(MemAccessInfo(Ptr, false)); if (IsReadOnly) ReadOnlyPtr.insert(Ptr); } /// \brief Register a store. void addStore(MemoryLocation &Loc) { Value *Ptr = const_cast<Value*>(Loc.Ptr); AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags); Accesses.insert(MemAccessInfo(Ptr, true)); } /// \brief Check whether we can check the pointers at runtime for /// non-intersection. /// /// Returns true if we need no check or if we do and we can generate them /// (i.e. the pointers have computable bounds). bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, Loop *TheLoop, const ValueToValueMap &Strides, bool ShouldCheckStride = false); /// \brief Goes over all memory accesses, checks whether a RT check is needed /// and builds sets of dependent accesses. void buildDependenceSets() { processMemAccesses(); } /// \brief Initial processing of memory accesses determined that we need to /// perform dependency checking. /// /// Note that this can later be cleared if we retry memcheck analysis without /// dependency checking (i.e. ShouldRetryWithRuntimeCheck). bool isDependencyCheckNeeded() { return !CheckDeps.empty(); } /// We decided that no dependence analysis would be used. Reset the state. void resetDepChecks(MemoryDepChecker &DepChecker) { CheckDeps.clear(); DepChecker.clearInterestingDependences(); } MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; } private: typedef SetVector<MemAccessInfo> PtrAccessSet; /// \brief Go over all memory access and check whether runtime pointer checks /// are needed and build sets of dependency check candidates. void processMemAccesses(); /// Set of all accesses. PtrAccessSet Accesses; const DataLayout &DL; /// Set of accesses that need a further dependence check. MemAccessInfoSet CheckDeps; /// Set of pointers that are read only. SmallPtrSet<Value*, 16> ReadOnlyPtr; /// An alias set tracker to partition the access set by underlying object and //intrinsic property (such as TBAA metadata). AliasSetTracker AST; LoopInfo *LI; /// Sets of potentially dependent accesses - members of one set share an /// underlying pointer. The set "CheckDeps" identfies which sets really need a /// dependence check. MemoryDepChecker::DepCandidates &DepCands; /// \brief Initial processing of memory accesses determined that we may need /// to add memchecks. Perform the analysis to determine the necessary checks. /// /// Note that, this is different from isDependencyCheckNeeded. When we retry /// memcheck analysis without dependency checking /// (i.e. ShouldRetryWithRuntimeCheck), isDependencyCheckNeeded is cleared /// while this remains set if we have potentially dependent accesses. bool IsRTCheckAnalysisNeeded; }; } // end anonymous namespace /// \brief Check whether a pointer can participate in a runtime bounds check. static bool hasComputableBounds(ScalarEvolution *SE, const ValueToValueMap &Strides, Value *Ptr) { const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr); const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); if (!AR) return false; return AR->isAffine(); } bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, Loop *TheLoop, const ValueToValueMap &StridesMap, bool ShouldCheckStride) { // Find pointers with computable bounds. We are going to use this information // to place a runtime bound check. bool CanDoRT = true; bool NeedRTCheck = false; if (!IsRTCheckAnalysisNeeded) return true; bool IsDepCheckNeeded = isDependencyCheckNeeded(); // We assign a consecutive id to access from different alias sets. // Accesses between different groups doesn't need to be checked. unsigned ASId = 1; for (auto &AS : AST) { int NumReadPtrChecks = 0; int NumWritePtrChecks = 0; // We assign consecutive id to access from different dependence sets. // Accesses within the same set don't need a runtime check. unsigned RunningDepId = 1; DenseMap<Value *, unsigned> DepSetId; for (auto A : AS) { Value *Ptr = A.getValue(); bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); MemAccessInfo Access(Ptr, IsWrite); if (IsWrite) ++NumWritePtrChecks; else ++NumReadPtrChecks; if (hasComputableBounds(SE, StridesMap, Ptr) && // When we run after a failing dependency check we have to make sure // we don't have wrapping pointers. (!ShouldCheckStride || isStridedPtr(SE, Ptr, TheLoop, StridesMap) == 1)) { // The id of the dependence set. unsigned DepId; if (IsDepCheckNeeded) { Value *Leader = DepCands.getLeaderValue(Access).getPointer(); unsigned &LeaderId = DepSetId[Leader]; if (!LeaderId) LeaderId = RunningDepId++; DepId = LeaderId; } else // Each access has its own dependence set. DepId = RunningDepId++; RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap); DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); } else { DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); CanDoRT = false; } } // If we have at least two writes or one write and a read then we need to // check them. But there is no need to checks if there is only one // dependence set for this alias set. // // Note that this function computes CanDoRT and NeedRTCheck independently. // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer // for which we couldn't find the bounds but we don't actually need to emit // any checks so it does not matter. if (!(IsDepCheckNeeded && CanDoRT && RunningDepId == 2)) NeedRTCheck |= (NumWritePtrChecks >= 2 || (NumReadPtrChecks >= 1 && NumWritePtrChecks >= 1)); ++ASId; } // If the pointers that we would use for the bounds comparison have different // address spaces, assume the values aren't directly comparable, so we can't // use them for the runtime check. We also have to assume they could // overlap. In the future there should be metadata for whether address spaces // are disjoint. unsigned NumPointers = RtCheck.Pointers.size(); for (unsigned i = 0; i < NumPointers; ++i) { for (unsigned j = i + 1; j < NumPointers; ++j) { // Only need to check pointers between two different dependency sets. if (RtCheck.Pointers[i].DependencySetId == RtCheck.Pointers[j].DependencySetId) continue; // Only need to check pointers in the same alias set. if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId) continue; Value *PtrI = RtCheck.Pointers[i].PointerValue; Value *PtrJ = RtCheck.Pointers[j].PointerValue; unsigned ASi = PtrI->getType()->getPointerAddressSpace(); unsigned ASj = PtrJ->getType()->getPointerAddressSpace(); if (ASi != ASj) { DEBUG(dbgs() << "LAA: Runtime check would require comparison between" " different address spaces\n"); return false; } } } if (NeedRTCheck && CanDoRT) RtCheck.groupChecks(DepCands, IsDepCheckNeeded); DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks(nullptr) << " pointer comparisons.\n"); RtCheck.Need = NeedRTCheck; bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT; if (!CanDoRTIfNeeded) RtCheck.reset(); return CanDoRTIfNeeded; } void AccessAnalysis::processMemAccesses() { // We process the set twice: first we process read-write pointers, last we // process read-only pointers. This allows us to skip dependence tests for // read-only pointers. DEBUG(dbgs() << "LAA: Processing memory accesses...\n"); DEBUG(dbgs() << " AST: "; AST.dump()); DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n"); DEBUG({ for (auto A : Accesses) dbgs() << "\t" << *A.getPointer() << " (" << (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ? "read-only" : "read")) << ")\n"; }); // The AliasSetTracker has nicely partitioned our pointers by metadata // compatibility and potential for underlying-object overlap. As a result, we // only need to check for potential pointer dependencies within each alias // set. for (auto &AS : AST) { // Note that both the alias-set tracker and the alias sets themselves used // linked lists internally and so the iteration order here is deterministic // (matching the original instruction order within each set). bool SetHasWrite = false; // Map of pointers to last access encountered. typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap; UnderlyingObjToAccessMap ObjToLastAccess; // Set of access to check after all writes have been processed. PtrAccessSet DeferredAccesses; // Iterate over each alias set twice, once to process read/write pointers, // and then to process read-only pointers. for (int SetIteration = 0; SetIteration < 2; ++SetIteration) { bool UseDeferred = SetIteration > 0; PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses; for (auto AV : AS) { Value *Ptr = AV.getValue(); // For a single memory access in AliasSetTracker, Accesses may contain // both read and write, and they both need to be handled for CheckDeps. for (auto AC : S) { if (AC.getPointer() != Ptr) continue; bool IsWrite = AC.getInt(); // If we're using the deferred access set, then it contains only // reads. bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite; if (UseDeferred && !IsReadOnlyPtr) continue; // Otherwise, the pointer must be in the PtrAccessSet, either as a // read or a write. assert(((IsReadOnlyPtr && UseDeferred) || IsWrite || S.count(MemAccessInfo(Ptr, false))) && "Alias-set pointer not in the access set?"); MemAccessInfo Access(Ptr, IsWrite); DepCands.insert(Access); // Memorize read-only pointers for later processing and skip them in // the first round (they need to be checked after we have seen all // write pointers). Note: we also mark pointer that are not // consecutive as "read-only" pointers (so that we check // "a[b[i]] +="). Hence, we need the second check for "!IsWrite". if (!UseDeferred && IsReadOnlyPtr) { DeferredAccesses.insert(Access); continue; } // If this is a write - check other reads and writes for conflicts. If // this is a read only check other writes for conflicts (but only if // there is no other write to the ptr - this is an optimization to // catch "a[i] = a[i] + " without having to do a dependence check). if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) { CheckDeps.insert(Access); IsRTCheckAnalysisNeeded = true; } if (IsWrite) SetHasWrite = true; // Create sets of pointers connected by a shared alias set and // underlying object. typedef SmallVector<Value *, 16> ValueVector; ValueVector TempObjects; GetUnderlyingObjects(Ptr, TempObjects, DL, LI); DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n"); for (Value *UnderlyingObj : TempObjects) { UnderlyingObjToAccessMap::iterator Prev = ObjToLastAccess.find(UnderlyingObj); if (Prev != ObjToLastAccess.end()) DepCands.unionSets(Access, Prev->second); ObjToLastAccess[UnderlyingObj] = Access; DEBUG(dbgs() << " " << *UnderlyingObj << "\n"); } } } } } } static bool isInBoundsGep(Value *Ptr) { if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) return GEP->isInBounds(); return false; } /// \brief Return true if an AddRec pointer \p Ptr is unsigned non-wrapping, /// i.e. monotonically increasing/decreasing. static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, ScalarEvolution *SE, const Loop *L) { // FIXME: This should probably only return true for NUW. if (AR->getNoWrapFlags(SCEV::NoWrapMask)) return true; // Scalar evolution does not propagate the non-wrapping flags to values that // are derived from a non-wrapping induction variable because non-wrapping // could be flow-sensitive. // // Look through the potentially overflowing instruction to try to prove // non-wrapping for the *specific* value of Ptr. // The arithmetic implied by an inbounds GEP can't overflow. auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); if (!GEP || !GEP->isInBounds()) return false; // Make sure there is only one non-const index and analyze that. Value *NonConstIndex = nullptr; for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) if (!isa<ConstantInt>(*Index)) { if (NonConstIndex) return false; NonConstIndex = *Index; } if (!NonConstIndex) // The recurrence is on the pointer, ignore for now. return false; // The index in GEP is signed. It is non-wrapping if it's derived from a NSW // AddRec using a NSW operation. if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex)) if (OBO->hasNoSignedWrap() && // Assume constant for other the operand so that the AddRec can be // easily found. isa<ConstantInt>(OBO->getOperand(1))) { auto *OpScev = SE->getSCEV(OBO->getOperand(0)); if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev)) return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW); } return false; } /// \brief Check whether the access through \p Ptr has a constant stride. int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp, const ValueToValueMap &StridesMap) { const Type *Ty = Ptr->getType(); assert(Ty->isPointerTy() && "Unexpected non-ptr"); // Make sure that the pointer does not point to aggregate types. const PointerType *PtrTy = cast<PointerType>(Ty); if (PtrTy->getElementType()->isAggregateType()) { DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type" << *Ptr << "\n"); return 0; } const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Ptr); const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev); if (!AR) { DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr << " SCEV: " << *PtrScev << "\n"); return 0; } // The accesss function must stride over the innermost loop. if (Lp != AR->getLoop()) { DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << *Ptr << " SCEV: " << *PtrScev << "\n"); } // The address calculation must not wrap. Otherwise, a dependence could be // inverted. // An inbounds getelementptr that is a AddRec with a unit stride // cannot wrap per definition. The unit stride requirement is checked later. // An getelementptr without an inbounds attribute and unit stride would have // to access the pointer value "0" which is undefined behavior in address // space 0, therefore we can also vectorize this case. bool IsInBoundsGEP = isInBoundsGep(Ptr); bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, SE, Lp); bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " << *Ptr << " SCEV: " << *PtrScev << "\n"); return 0; } // Check the step is constant. const SCEV *Step = AR->getStepRecurrence(*SE); // Calculate the pointer stride and check if it is constant. const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); if (!C) { DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << " SCEV: " << *PtrScev << "\n"); return 0; } auto &DL = Lp->getHeader()->getModule()->getDataLayout(); int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); const APInt &APStepVal = C->getValue()->getValue(); // Huge step value - give up. if (APStepVal.getBitWidth() > 64) return 0; int64_t StepVal = APStepVal.getSExtValue(); // Strided access. int64_t Stride = StepVal / Size; int64_t Rem = StepVal % Size; if (Rem) return 0; // If the SCEV could wrap but we have an inbounds gep with a unit stride we // know we can't "wrap around the address space". In case of address space // zero we know that this won't happen without triggering undefined behavior. if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) && Stride != 1 && Stride != -1) return 0; return Stride; } bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) { switch (Type) { case NoDep: case Forward: case BackwardVectorizable: return true; case Unknown: case ForwardButPreventsForwarding: case Backward: case BackwardVectorizableButPreventsForwarding: return false; } llvm_unreachable("unexpected DepType!"); } bool MemoryDepChecker::Dependence::isInterestingDependence(DepType Type) { switch (Type) { case NoDep: case Forward: return false; case BackwardVectorizable: case Unknown: case ForwardButPreventsForwarding: case Backward: case BackwardVectorizableButPreventsForwarding: return true; } llvm_unreachable("unexpected DepType!"); } bool MemoryDepChecker::Dependence::isPossiblyBackward() const { switch (Type) { case NoDep: case Forward: case ForwardButPreventsForwarding: return false; case Unknown: case BackwardVectorizable: case Backward: case BackwardVectorizableButPreventsForwarding: return true; } llvm_unreachable("unexpected DepType!"); } bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance, unsigned TypeByteSize) { // If loads occur at a distance that is not a multiple of a feasible vector // factor store-load forwarding does not take place. // Positive dependences might cause troubles because vectorizing them might // prevent store-load forwarding making vectorized code run a lot slower. // a[i] = a[i-3] ^ a[i-8]; // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and // hence on your typical architecture store-load forwarding does not take // place. Vectorizing in such cases does not make sense. // Store-load forwarding distance. const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize; // Maximum vector factor. unsigned MaxVFWithoutSLForwardIssues = VectorizerParams::MaxVectorWidth * TypeByteSize; if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues) MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes; for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues; vf *= 2) { if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) { MaxVFWithoutSLForwardIssues = (vf >>=1); break; } } if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) { DEBUG(dbgs() << "LAA: Distance " << Distance << " that could cause a store-load forwarding conflict\n"); return true; } if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes && MaxVFWithoutSLForwardIssues != VectorizerParams::MaxVectorWidth * TypeByteSize) MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues; return false; } /// \brief Check the dependence for two accesses with the same stride \p Stride. /// \p Distance is the positive distance and \p TypeByteSize is type size in /// bytes. /// /// \returns true if they are independent. static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride, unsigned TypeByteSize) { assert(Stride > 1 && "The stride must be greater than 1"); assert(TypeByteSize > 0 && "The type size in byte must be non-zero"); assert(Distance > 0 && "The distance must be non-zero"); // Skip if the distance is not multiple of type byte size. if (Distance % TypeByteSize) return false; unsigned ScaledDist = Distance / TypeByteSize; // No dependence if the scaled distance is not multiple of the stride. // E.g. // for (i = 0; i < 1024 ; i += 4) // A[i+2] = A[i] + 1; // // Two accesses in memory (scaled distance is 2, stride is 4): // | A[0] | | | | A[4] | | | | // | | | A[2] | | | | A[6] | | // // E.g. // for (i = 0; i < 1024 ; i += 3) // A[i+4] = A[i] + 1; // // Two accesses in memory (scaled distance is 4, stride is 3): // | A[0] | | | A[3] | | | A[6] | | | // | | | | | A[4] | | | A[7] | | return ScaledDist % Stride; } MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B, unsigned BIdx, const ValueToValueMap &Strides) { assert (AIdx < BIdx && "Must pass arguments in program order"); Value *APtr = A.getPointer(); Value *BPtr = B.getPointer(); bool AIsWrite = A.getInt(); bool BIsWrite = B.getInt(); // Two reads are independent. if (!AIsWrite && !BIsWrite) return Dependence::NoDep; // We cannot check pointers in different address spaces. if (APtr->getType()->getPointerAddressSpace() != BPtr->getType()->getPointerAddressSpace()) return Dependence::Unknown; const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr); const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr); int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides); int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides); const SCEV *Src = AScev; const SCEV *Sink = BScev; // If the induction step is negative we have to invert source and sink of the // dependence. if (StrideAPtr < 0) { //Src = BScev; //Sink = AScev; std::swap(APtr, BPtr); std::swap(Src, Sink); std::swap(AIsWrite, BIsWrite); std::swap(AIdx, BIdx); std::swap(StrideAPtr, StrideBPtr); } const SCEV *Dist = SE->getMinusSCEV(Sink, Src); DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink << "(Induction step: " << StrideAPtr << ")\n"); DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to " << *InstMap[BIdx] << ": " << *Dist << "\n"); // Need accesses with constant stride. We don't want to vectorize // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in // the address space. if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){ DEBUG(dbgs() << "Pointer access with non-constant stride\n"); return Dependence::Unknown; } const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist); if (!C) { DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); ShouldRetryWithRuntimeCheck = true; return Dependence::Unknown; } Type *ATy = APtr->getType()->getPointerElementType(); Type *BTy = BPtr->getType()->getPointerElementType(); auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); unsigned TypeByteSize = DL.getTypeAllocSize(ATy); // Negative distances are not plausible dependencies. const APInt &Val = C->getValue()->getValue(); if (Val.isNegative()) { bool IsTrueDataDependence = (AIsWrite && !BIsWrite); if (IsTrueDataDependence && (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) || ATy != BTy)) return Dependence::ForwardButPreventsForwarding; DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n"); return Dependence::Forward; } // Write to the same location with the same size. // Could be improved to assert type sizes are the same (i32 == float, etc). if (Val == 0) { if (ATy == BTy) return Dependence::NoDep; DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n"); return Dependence::Unknown; } assert(Val.isStrictlyPositive() && "Expect a positive value"); if (ATy != BTy) { DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with different types\n"); return Dependence::Unknown; } unsigned Distance = (unsigned) Val.getZExtValue(); unsigned Stride = std::abs(StrideAPtr); if (Stride > 1 && areStridedAccessesIndependent(Distance, Stride, TypeByteSize)) { DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); return Dependence::NoDep; } // Bail out early if passed-in parameters make vectorization not feasible. unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ? VectorizerParams::VectorizationFactor : 1); unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ? VectorizerParams::VectorizationInterleave : 1); // The minimum number of iterations for a vectorized/unrolled version. unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U); // It's not vectorizable if the distance is smaller than the minimum distance // needed for a vectroized/unrolled version. Vectorizing one iteration in // front needs TypeByteSize * Stride. Vectorizing the last iteration needs // TypeByteSize (No need to plus the last gap distance). // // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. // foo(int *A) { // int *B = (int *)((char *)A + 14); // for (i = 0 ; i < 1024 ; i += 2) // B[i] = A[i] + 1; // } // // Two accesses in memory (stride is 2): // | A[0] | | A[2] | | A[4] | | A[6] | | // | B[0] | | B[2] | | B[4] | // // Distance needs for vectorizing iterations except the last iteration: // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4. // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4. // // If MinNumIter is 2, it is vectorizable as the minimum distance needed is // 12, which is less than distance. // // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4), // the minimum distance needed is 28, which is greater than distance. It is // not safe to do vectorization. unsigned MinDistanceNeeded = TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize; if (MinDistanceNeeded > Distance) { DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance << '\n'); return Dependence::Backward; } // Unsafe if the minimum distance needed is greater than max safe distance. if (MinDistanceNeeded > MaxSafeDepDistBytes) { DEBUG(dbgs() << "LAA: Failure because it needs at least " << MinDistanceNeeded << " size in bytes"); return Dependence::Backward; } // Positive distance bigger than max vectorization factor. // FIXME: Should use max factor instead of max distance in bytes, which could // not handle different types. // E.g. Assume one char is 1 byte in memory and one int is 4 bytes. // void foo (int *A, char *B) { // for (unsigned i = 0; i < 1024; i++) { // A[i+2] = A[i] + 1; // B[i+2] = B[i] + 1; // } // } // // This case is currently unsafe according to the max safe distance. If we // analyze the two accesses on array B, the max safe dependence distance // is 2. Then we analyze the accesses on array A, the minimum distance needed // is 8, which is less than 2 and forbidden vectorization, But actually // both A and B could be vectorized by 2 iterations. MaxSafeDepDistBytes = Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes; bool IsTrueDataDependence = (!AIsWrite && BIsWrite); if (IsTrueDataDependence && couldPreventStoreLoadForward(Distance, TypeByteSize)) return Dependence::BackwardVectorizableButPreventsForwarding; DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() << " with max VF = " << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n'); return Dependence::BackwardVectorizable; } bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets, MemAccessInfoSet &CheckDeps, const ValueToValueMap &Strides) { MaxSafeDepDistBytes = -1U; while (!CheckDeps.empty()) { MemAccessInfo CurAccess = *CheckDeps.begin(); // Get the relevant memory access set. EquivalenceClasses<MemAccessInfo>::iterator I = AccessSets.findValue(AccessSets.getLeaderValue(CurAccess)); // Check accesses within this set. EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE; AI = AccessSets.member_begin(I), AE = AccessSets.member_end(); // Check every access pair. while (AI != AE) { CheckDeps.erase(*AI); EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI); while (OI != AE) { // Check every accessing instruction pair in program order. for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(), I1E = Accesses[*AI].end(); I1 != I1E; ++I1) for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(), I2E = Accesses[*OI].end(); I2 != I2E; ++I2) { auto A = std::make_pair(&*AI, *I1); auto B = std::make_pair(&*OI, *I2); assert(*I1 != *I2); if (*I1 > *I2) std::swap(A, B); Dependence::DepType Type = isDependent(*A.first, A.second, *B.first, B.second, Strides); SafeForVectorization &= Dependence::isSafeForVectorization(Type); // Gather dependences unless we accumulated MaxInterestingDependence // dependences. In that case return as soon as we find the first // unsafe dependence. This puts a limit on this quadratic // algorithm. if (RecordInterestingDependences) { if (Dependence::isInterestingDependence(Type)) InterestingDependences.push_back( Dependence(A.second, B.second, Type)); if (InterestingDependences.size() >= MaxInterestingDependence) { RecordInterestingDependences = false; InterestingDependences.clear(); DEBUG(dbgs() << "Too many dependences, stopped recording\n"); } } if (!RecordInterestingDependences && !SafeForVectorization) return false; } ++OI; } AI++; } } DEBUG(dbgs() << "Total Interesting Dependences: " << InterestingDependences.size() << "\n"); return SafeForVectorization; } SmallVector<Instruction *, 4> MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const { MemAccessInfo Access(Ptr, isWrite); auto &IndexVector = Accesses.find(Access)->second; SmallVector<Instruction *, 4> Insts; std::transform(IndexVector.begin(), IndexVector.end(), std::back_inserter(Insts), [&](unsigned Idx) { return this->InstMap[Idx]; }); return Insts; } const char *MemoryDepChecker::Dependence::DepName[] = { "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward", "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"}; void MemoryDepChecker::Dependence::print( raw_ostream &OS, unsigned Depth, const SmallVectorImpl<Instruction *> &Instrs) const { OS.indent(Depth) << DepName[Type] << ":\n"; OS.indent(Depth + 2) << *Instrs[Source] << " -> \n"; OS.indent(Depth + 2) << *Instrs[Destination] << "\n"; } bool LoopAccessInfo::canAnalyzeLoop() { // We need to have a loop header. DEBUG(dbgs() << "LAA: Found a loop: " << TheLoop->getHeader()->getName() << '\n'); // We can only analyze innermost loops. if (!TheLoop->empty()) { DEBUG(dbgs() << "LAA: loop is not the innermost loop\n"); emitAnalysis(LoopAccessReport() << "loop is not the innermost loop"); return false; } // We must have a single backedge. if (TheLoop->getNumBackEdges() != 1) { DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); emitAnalysis( LoopAccessReport() << "loop control flow is not understood by analyzer"); return false; } // We must have a single exiting block. if (!TheLoop->getExitingBlock()) { DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); emitAnalysis( LoopAccessReport() << "loop control flow is not understood by analyzer"); return false; } // We only handle bottom-tested loops, i.e. loop in which the condition is // checked at the end of each iteration. With that we can assume that all // instructions in the loop are executed the same number of times. if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n"); emitAnalysis( LoopAccessReport() << "loop control flow is not understood by analyzer"); return false; } // ScalarEvolution needs to be able to find the exit count. const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop); if (ExitCount == SE->getCouldNotCompute()) { emitAnalysis(LoopAccessReport() << "could not determine number of loop iterations"); DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n"); return false; } return true; } void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { typedef SmallVector<Value*, 16> ValueVector; typedef SmallPtrSet<Value*, 16> ValueSet; // Holds the Load and Store *instructions*. ValueVector Loads; ValueVector Stores; // Holds all the different accesses in the loop. unsigned NumReads = 0; unsigned NumReadWrites = 0; PtrRtChecking.Pointers.clear(); PtrRtChecking.Need = false; const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); // For each block. for (Loop::block_iterator bb = TheLoop->block_begin(), be = TheLoop->block_end(); bb != be; ++bb) { // Scan the BB and collect legal loads and stores. for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e; ++it) { // If this is a load, save it. If this instruction can read from memory // but is not a load, then we quit. Notice that we don't handle function // calls that read or write. if (it->mayReadFromMemory()) { // Many math library functions read the rounding mode. We will only // vectorize a loop if it contains known function calls that don't set // the flag. Therefore, it is safe to ignore this read from memory. CallInst *Call = dyn_cast<CallInst>(it); if (Call && getIntrinsicIDForCall(Call, TLI)) continue; // If the function has an explicit vectorized counterpart, we can safely // assume that it can be vectorized. if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() && TLI->isFunctionVectorizable(Call->getCalledFunction()->getName())) continue; LoadInst *Ld = dyn_cast<LoadInst>(it); if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) { emitAnalysis(LoopAccessReport(Ld) << "read with atomic ordering or volatile read"); DEBUG(dbgs() << "LAA: Found a non-simple load.\n"); CanVecMem = false; return; } NumLoads++; Loads.push_back(Ld); DepChecker.addAccess(Ld); continue; } // Save 'store' instructions. Abort if other instructions write to memory. if (it->mayWriteToMemory()) { StoreInst *St = dyn_cast<StoreInst>(it); if (!St) { emitAnalysis(LoopAccessReport(it) << "instruction cannot be vectorized"); CanVecMem = false; return; } if (!St->isSimple() && !IsAnnotatedParallel) { emitAnalysis(LoopAccessReport(St) << "write with atomic ordering or volatile write"); DEBUG(dbgs() << "LAA: Found a non-simple store.\n"); CanVecMem = false; return; } NumStores++; Stores.push_back(St); DepChecker.addAccess(St); } } // Next instr. } // Next block. // Now we have two lists that hold the loads and the stores. // Next, we find the pointers that they use. // Check if we see any stores. If there are no stores, then we don't // care if the pointers are *restrict*. if (!Stores.size()) { DEBUG(dbgs() << "LAA: Found a read-only loop!\n"); CanVecMem = true; return; } MemoryDepChecker::DepCandidates DependentAccesses; AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), AA, LI, DependentAccesses); // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects // multiple times on the same object. If the ptr is accessed twice, once // for read and once for write, it will only appear once (on the write // list). This is okay, since we are going to check for conflicts between // writes and between reads and writes, but not between reads and reads. ValueSet Seen; ValueVector::iterator I, IE; for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) { StoreInst *ST = cast<StoreInst>(*I); Value* Ptr = ST->getPointerOperand(); // Check for store to loop invariant address. StoreToLoopInvariantAddress |= isUniform(Ptr); // If we did *not* see this pointer before, insert it to the read-write // list. At this phase it is only a 'write' list. if (Seen.insert(Ptr).second) { ++NumReadWrites; MemoryLocation Loc = MemoryLocation::get(ST); // The TBAA metadata could have a control dependency on the predication // condition, so we cannot rely on it when determining whether or not we // need runtime pointer checks. if (blockNeedsPredication(ST->getParent(), TheLoop, DT)) Loc.AATags.TBAA = nullptr; Accesses.addStore(Loc); } } if (IsAnnotatedParallel) { DEBUG(dbgs() << "LAA: A loop annotated parallel, ignore memory dependency " << "checks.\n"); CanVecMem = true; return; } for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) { LoadInst *LD = cast<LoadInst>(*I); Value* Ptr = LD->getPointerOperand(); // If we did *not* see this pointer before, insert it to the // read list. If we *did* see it before, then it is already in // the read-write list. This allows us to vectorize expressions // such as A[i] += x; Because the address of A[i] is a read-write // pointer. This only works if the index of A[i] is consecutive. // If the address of i is unknown (for example A[B[i]]) then we may // read a few words, modify, and write a few words, and some of the // words may be written to the same address. bool IsReadOnlyPtr = false; if (Seen.insert(Ptr).second || !isStridedPtr(SE, Ptr, TheLoop, Strides)) { ++NumReads; IsReadOnlyPtr = true; } MemoryLocation Loc = MemoryLocation::get(LD); // The TBAA metadata could have a control dependency on the predication // condition, so we cannot rely on it when determining whether or not we // need runtime pointer checks. if (blockNeedsPredication(LD->getParent(), TheLoop, DT)) Loc.AATags.TBAA = nullptr; Accesses.addLoad(Loc, IsReadOnlyPtr); } // If we write (or read-write) to a single destination and there are no // other reads in this loop then is it safe to vectorize. if (NumReadWrites == 1 && NumReads == 0) { DEBUG(dbgs() << "LAA: Found a write-only loop!\n"); CanVecMem = true; return; } // Build dependence sets and check whether we need a runtime pointer bounds // check. Accesses.buildDependenceSets(); // Find pointers with computable bounds. We are going to use this information // to place a runtime bound check. bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides); if (!CanDoRTIfNeeded) { emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " << "the array bounds.\n"); CanVecMem = false; return; } DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n"); CanVecMem = true; if (Accesses.isDependencyCheckNeeded()) { DEBUG(dbgs() << "LAA: Checking memory dependencies\n"); CanVecMem = DepChecker.areDepsSafe( DependentAccesses, Accesses.getDependenciesToCheck(), Strides); MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes(); if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) { DEBUG(dbgs() << "LAA: Retrying with memory checks\n"); // Clear the dependency checks. We assume they are not needed. Accesses.resetDepChecks(DepChecker); PtrRtChecking.reset(); PtrRtChecking.Need = true; CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides, true); // Check that we found the bounds for the pointer. if (!CanDoRTIfNeeded) { emitAnalysis(LoopAccessReport() << "cannot check memory dependencies at runtime"); DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n"); CanVecMem = false; return; } CanVecMem = true; } } if (CanVecMem) DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" << (PtrRtChecking.Need ? "" : " don't") << " need runtime memory checks.\n"); else { emitAnalysis(LoopAccessReport() << "unsafe dependent memory operations in loop"); DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n"); } } bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT) { assert(TheLoop->contains(BB) && "Unknown block used"); // Blocks that do not dominate the latch need predication. BasicBlock* Latch = TheLoop->getLoopLatch(); return !DT->dominates(BB, Latch); } void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) { assert(!Report && "Multiple reports generated"); Report = Message; } bool LoopAccessInfo::isUniform(Value *V) const { return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); } // FIXME: this function is currently a duplicate of the one in // LoopVectorize.cpp. static Instruction *getFirstInst(Instruction *FirstInst, Value *V, Instruction *Loc) { if (FirstInst) return FirstInst; if (Instruction *I = dyn_cast<Instruction>(V)) return I->getParent() == Loc->getParent() ? I : nullptr; return nullptr; } std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeCheck( Instruction *Loc, const SmallVectorImpl<int> *PtrPartition) const { if (!PtrRtChecking.Need) return std::make_pair(nullptr, nullptr); SmallVector<TrackingVH<Value>, 2> Starts; SmallVector<TrackingVH<Value>, 2> Ends; LLVMContext &Ctx = Loc->getContext(); SCEVExpander Exp(*SE, DL, "induction"); Instruction *FirstInst = nullptr; for (unsigned i = 0; i < PtrRtChecking.CheckingGroups.size(); ++i) { const RuntimePointerChecking::CheckingPtrGroup &CG = PtrRtChecking.CheckingGroups[i]; Value *Ptr = PtrRtChecking.Pointers[CG.Members[0]].PointerValue; const SCEV *Sc = SE->getSCEV(Ptr); if (SE->isLoopInvariant(Sc, TheLoop)) { DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" << *Ptr << "\n"); Starts.push_back(Ptr); Ends.push_back(Ptr); } else { unsigned AS = Ptr->getType()->getPointerAddressSpace(); // Use this type for pointer arithmetic. Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS); Value *Start = nullptr, *End = nullptr; DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); Start = Exp.expandCodeFor(CG.Low, PtrArithTy, Loc); End = Exp.expandCodeFor(CG.High, PtrArithTy, Loc); DEBUG(dbgs() << "Start: " << *CG.Low << " End: " << *CG.High << "\n"); Starts.push_back(Start); Ends.push_back(End); } } IRBuilder<> ChkBuilder(Loc); // Our instructions might fold to a constant. Value *MemoryRuntimeCheck = nullptr; for (unsigned i = 0; i < PtrRtChecking.CheckingGroups.size(); ++i) { for (unsigned j = i + 1; j < PtrRtChecking.CheckingGroups.size(); ++j) { const RuntimePointerChecking::CheckingPtrGroup &CGI = PtrRtChecking.CheckingGroups[i]; const RuntimePointerChecking::CheckingPtrGroup &CGJ = PtrRtChecking.CheckingGroups[j]; if (!PtrRtChecking.needsChecking(CGI, CGJ, PtrPartition)) continue; unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace(); unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace(); assert((AS0 == Ends[j]->getType()->getPointerAddressSpace()) && (AS1 == Ends[i]->getType()->getPointerAddressSpace()) && "Trying to bounds check pointers with different address spaces"); Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy0, "bc"); Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy1, "bc"); Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy1, "bc"); Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc"); Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0"); FirstInst = getFirstInst(FirstInst, Cmp0, Loc); Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1"); FirstInst = getFirstInst(FirstInst, Cmp1, Loc); Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); FirstInst = getFirstInst(FirstInst, IsConflict, Loc); if (MemoryRuntimeCheck) { IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); FirstInst = getFirstInst(FirstInst, IsConflict, Loc); } MemoryRuntimeCheck = IsConflict; } } if (!MemoryRuntimeCheck) return std::make_pair(nullptr, nullptr); // We have to do this trickery because the IRBuilder might fold the check to a // constant expression in which case there is no Instruction anchored in a // the block. Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck, ConstantInt::getTrue(Ctx)); ChkBuilder.Insert(Check, "memcheck.conflict"); FirstInst = getFirstInst(FirstInst, Check, Loc); return std::make_pair(FirstInst, Check); } LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout &DL, const TargetLibraryInfo *TLI, AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI, const ValueToValueMap &Strides) : PtrRtChecking(SE), DepChecker(SE, L), TheLoop(L), SE(SE), DL(DL), TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1U), CanVecMem(false), StoreToLoopInvariantAddress(false) { if (canAnalyzeLoop()) analyzeLoop(Strides); } void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { if (CanVecMem) { if (PtrRtChecking.Need) OS.indent(Depth) << "Memory dependences are safe with run-time checks\n"; else OS.indent(Depth) << "Memory dependences are safe\n"; } if (Report) OS.indent(Depth) << "Report: " << Report->str() << "\n"; if (auto *InterestingDependences = DepChecker.getInterestingDependences()) { OS.indent(Depth) << "Interesting Dependences:\n"; for (auto &Dep : *InterestingDependences) { Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions()); OS << "\n"; } } else OS.indent(Depth) << "Too many interesting dependences, not recorded\n"; // List the pair of accesses need run-time checks to prove independence. PtrRtChecking.print(OS, Depth); OS << "\n"; OS.indent(Depth) << "Store to invariant address was " << (StoreToLoopInvariantAddress ? "" : "not ") << "found in loop.\n"; } const LoopAccessInfo & LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) { auto &LAI = LoopAccessInfoMap[L]; #ifndef NDEBUG assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) && "Symbolic strides changed for loop"); #endif if (!LAI) { const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI, Strides); #ifndef NDEBUG LAI->NumSymbolicStrides = Strides.size(); #endif } return *LAI.get(); } void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this); ValueToValueMap NoSymbolicStrides; for (Loop *TopLevelLoop : *LI) for (Loop *L : depth_first(TopLevelLoop)) { OS.indent(2) << L->getHeader()->getName() << ":\n"; auto &LAI = LAA.getInfo(L, NoSymbolicStrides); LAI.print(OS, 4); } } bool LoopAccessAnalysis::runOnFunction(Function &F) { SE = &getAnalysis<ScalarEvolution>(); auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); TLI = TLIP ? &TLIP->getTLI() : nullptr; AA = &getAnalysis<AliasAnalysis>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); return false; } void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<ScalarEvolution>(); AU.addRequired<AliasAnalysis>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.setPreservesAll(); } char LoopAccessAnalysis::ID = 0; static const char laa_name[] = "Loop Access Analysis"; #define LAA_NAME "loop-accesses" INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) namespace llvm { Pass *createLAAPass() { return new LoopAccessAnalysis(); } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/Lint.cpp
//===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass statically checks for common and easily-identified constructs // which produce undefined or likely unintended behavior in LLVM IR. // // It is not a guarantee of correctness, in two ways. First, it isn't // comprehensive. There are checks which could be done statically which are // not yet implemented. Some of these are indicated by TODO comments, but // those aren't comprehensive either. Second, many conditions cannot be // checked statically. This pass does no dynamic instrumentation, so it // can't check for all possible problems. // // Another limitation is that it assumes all code will be executed. A store // through a null pointer in a basic block which is never reached is harmless, // but this pass will warn about it anyway. This is the main reason why most // of these checks live here instead of in the Verifier pass. // // Optimization passes may make conditions that this pass checks for more or // less obvious. If an optimization pass appears to be introducing a warning, // it may be that the optimization pass is merely exposing an existing // condition in the code. // // This code may be run before instcombine. In many cases, instcombine checks // for the same kinds of things and turns instructions with undefined behavior // into unreachable (or equivalent). Because of this, this pass makes some // effort to look through bitcasts and so on. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Lint.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallSet.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; namespace { namespace MemRef { static const unsigned Read = 1; static const unsigned Write = 2; static const unsigned Callee = 4; static const unsigned Branchee = 8; } class Lint : public FunctionPass, public InstVisitor<Lint> { friend class InstVisitor<Lint>; void visitFunction(Function &F); void visitCallSite(CallSite CS); void visitMemoryReference(Instruction &I, Value *Ptr, uint64_t Size, unsigned Align, Type *Ty, unsigned Flags); void visitEHBeginCatch(IntrinsicInst *II); void visitEHEndCatch(IntrinsicInst *II); void visitCallInst(CallInst &I); void visitInvokeInst(InvokeInst &I); void visitReturnInst(ReturnInst &I); void visitLoadInst(LoadInst &I); void visitStoreInst(StoreInst &I); void visitXor(BinaryOperator &I); void visitSub(BinaryOperator &I); void visitLShr(BinaryOperator &I); void visitAShr(BinaryOperator &I); void visitShl(BinaryOperator &I); void visitSDiv(BinaryOperator &I); void visitUDiv(BinaryOperator &I); void visitSRem(BinaryOperator &I); void visitURem(BinaryOperator &I); void visitAllocaInst(AllocaInst &I); void visitVAArgInst(VAArgInst &I); void visitIndirectBrInst(IndirectBrInst &I); void visitExtractElementInst(ExtractElementInst &I); void visitInsertElementInst(InsertElementInst &I); void visitUnreachableInst(UnreachableInst &I); Value *findValue(Value *V, const DataLayout &DL, bool OffsetOk) const; Value *findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk, SmallPtrSetImpl<Value *> &Visited) const; public: Module *Mod; AliasAnalysis *AA; AssumptionCache *AC; DominatorTree *DT; TargetLibraryInfo *TLI; std::string Messages; raw_string_ostream MessagesStr; static char ID; // Pass identification, replacement for typeid Lint() : FunctionPass(ID), MessagesStr(Messages) { initializeLintPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); AU.addRequired<AliasAnalysis>(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); } void print(raw_ostream &O, const Module *M) const override {} void WriteValues(ArrayRef<const Value *> Vs) { for (const Value *V : Vs) { if (!V) continue; if (isa<Instruction>(V)) { MessagesStr << *V << '\n'; } else { V->printAsOperand(MessagesStr, true, Mod); MessagesStr << '\n'; } } } /// \brief A check failed, so printout out the condition and the message. /// /// This provides a nice place to put a breakpoint if you want to see why /// something is not correct. void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; } /// \brief A check failed (with values to print). /// /// This calls the Message-only version so that the above is easier to set /// a breakpoint on. template <typename T1, typename... Ts> void CheckFailed(const Twine &Message, const T1 &V1, const Ts &...Vs) { CheckFailed(Message); WriteValues({V1, Vs...}); } }; } char Lint::ID = 0; INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR", false, true) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR", false, true) // Assert - We know that cond should be true, if not print an error message. #define Assert(C, ...) \ do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (0) // Lint::run - This is the main Analysis entry point for a // function. // bool Lint::runOnFunction(Function &F) { Mod = F.getParent(); AA = &getAnalysis<AliasAnalysis>(); AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); visit(F); dbgs() << MessagesStr.str(); Messages.clear(); return false; } void Lint::visitFunction(Function &F) { // This isn't undefined behavior, it's just a little unusual, and it's a // fairly common mistake to neglect to name a function. Assert(F.hasName() || F.hasLocalLinkage(), "Unusual: Unnamed function with non-local linkage", &F); // TODO: Check for irreducible control flow. } void Lint::visitCallSite(CallSite CS) { Instruction &I = *CS.getInstruction(); Value *Callee = CS.getCalledValue(); const DataLayout &DL = CS->getModule()->getDataLayout(); visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr, MemRef::Callee); if (Function *F = dyn_cast<Function>(findValue(Callee, DL, /*OffsetOk=*/false))) { Assert(CS.getCallingConv() == F->getCallingConv(), "Undefined behavior: Caller and callee calling convention differ", &I); FunctionType *FT = F->getFunctionType(); unsigned NumActualArgs = CS.arg_size(); Assert(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs : FT->getNumParams() == NumActualArgs, "Undefined behavior: Call argument count mismatches callee " "argument count", &I); Assert(FT->getReturnType() == I.getType(), "Undefined behavior: Call return type mismatches " "callee return type", &I); // Check argument types (in case the callee was casted) and attributes. // TODO: Verify that caller and callee attributes are compatible. Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end(); CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); for (; AI != AE; ++AI) { Value *Actual = *AI; if (PI != PE) { Argument *Formal = PI++; Assert(Formal->getType() == Actual->getType(), "Undefined behavior: Call argument type mismatches " "callee parameter type", &I); // Check that noalias arguments don't alias other arguments. This is // not fully precise because we don't know the sizes of the dereferenced // memory regions. if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy()) for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI) if (AI != BI && (*BI)->getType()->isPointerTy()) { AliasResult Result = AA->alias(*AI, *BI); Assert(Result != MustAlias && Result != PartialAlias, "Unusual: noalias argument aliases another argument", &I); } // Check that an sret argument points to valid memory. if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) { Type *Ty = cast<PointerType>(Formal->getType())->getElementType(); visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty), DL.getABITypeAlignment(Ty), Ty, MemRef::Read | MemRef::Write); } } } } if (CS.isCall() && cast<CallInst>(CS.getInstruction())->isTailCall()) for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { Value *Obj = findValue(*AI, DL, /*OffsetOk=*/true); Assert(!isa<AllocaInst>(Obj), "Undefined behavior: Call with \"tail\" keyword references " "alloca", &I); } if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) switch (II->getIntrinsicID()) { default: break; // TODO: Check more intrinsics case Intrinsic::memcpy: { MemCpyInst *MCI = cast<MemCpyInst>(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize, MCI->getAlignment(), nullptr, MemRef::Write); visitMemoryReference(I, MCI->getSource(), MemoryLocation::UnknownSize, MCI->getAlignment(), nullptr, MemRef::Read); // Check that the memcpy arguments don't overlap. The AliasAnalysis API // isn't expressive enough for what we really want to do. Known partial // overlap is not distinguished from the case where nothing is known. uint64_t Size = 0; if (const ConstantInt *Len = dyn_cast<ConstantInt>(findValue(MCI->getLength(), DL, /*OffsetOk=*/false))) if (Len->getValue().isIntN(32)) Size = Len->getValue().getZExtValue(); Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) != MustAlias, "Undefined behavior: memcpy source and destination overlap", &I); break; } case Intrinsic::memmove: { MemMoveInst *MMI = cast<MemMoveInst>(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize, MMI->getAlignment(), nullptr, MemRef::Write); visitMemoryReference(I, MMI->getSource(), MemoryLocation::UnknownSize, MMI->getAlignment(), nullptr, MemRef::Read); break; } case Intrinsic::memset: { MemSetInst *MSI = cast<MemSetInst>(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize, MSI->getAlignment(), nullptr, MemRef::Write); break; } case Intrinsic::vastart: Assert(I.getParent()->getParent()->isVarArg(), "Undefined behavior: va_start called in a non-varargs function", &I); visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::vacopy: visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0, nullptr, MemRef::Write); visitMemoryReference(I, CS.getArgument(1), MemoryLocation::UnknownSize, 0, nullptr, MemRef::Read); break; case Intrinsic::vaend: visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::stackrestore: // Stackrestore doesn't read or write memory, but it sets the // stack pointer, which the compiler may read from or write to // at any time, so check it for both readability and writeability. visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::eh_begincatch: visitEHBeginCatch(II); break; case Intrinsic::eh_endcatch: visitEHEndCatch(II); break; } } void Lint::visitCallInst(CallInst &I) { return visitCallSite(&I); } void Lint::visitInvokeInst(InvokeInst &I) { return visitCallSite(&I); } void Lint::visitReturnInst(ReturnInst &I) { Function *F = I.getParent()->getParent(); Assert(!F->doesNotReturn(), "Unusual: Return statement in function with noreturn attribute", &I); if (Value *V = I.getReturnValue()) { Value *Obj = findValue(V, F->getParent()->getDataLayout(), /*OffsetOk=*/true); Assert(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I); } } // TODO: Check that the reference is in bounds. // TODO: Check readnone/readonly function attributes. void Lint::visitMemoryReference(Instruction &I, Value *Ptr, uint64_t Size, unsigned Align, Type *Ty, unsigned Flags) { // If no memory is being referenced, it doesn't matter if the pointer // is valid. if (Size == 0) return; Value *UnderlyingObject = findValue(Ptr, I.getModule()->getDataLayout(), /*OffsetOk=*/true); Assert(!isa<ConstantPointerNull>(UnderlyingObject), "Undefined behavior: Null pointer dereference", &I); Assert(!isa<UndefValue>(UnderlyingObject), "Undefined behavior: Undef pointer dereference", &I); Assert(!isa<ConstantInt>(UnderlyingObject) || !cast<ConstantInt>(UnderlyingObject)->isAllOnesValue(), "Unusual: All-ones pointer dereference", &I); Assert(!isa<ConstantInt>(UnderlyingObject) || !cast<ConstantInt>(UnderlyingObject)->isOne(), "Unusual: Address one pointer dereference", &I); if (Flags & MemRef::Write) { if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject)) Assert(!GV->isConstant(), "Undefined behavior: Write to read-only memory", &I); Assert(!isa<Function>(UnderlyingObject) && !isa<BlockAddress>(UnderlyingObject), "Undefined behavior: Write to text section", &I); } if (Flags & MemRef::Read) { Assert(!isa<Function>(UnderlyingObject), "Unusual: Load from function body", &I); Assert(!isa<BlockAddress>(UnderlyingObject), "Undefined behavior: Load from block address", &I); } if (Flags & MemRef::Callee) { Assert(!isa<BlockAddress>(UnderlyingObject), "Undefined behavior: Call to block address", &I); } if (Flags & MemRef::Branchee) { Assert(!isa<Constant>(UnderlyingObject) || isa<BlockAddress>(UnderlyingObject), "Undefined behavior: Branch to non-blockaddress", &I); } // Check for buffer overflows and misalignment. // Only handles memory references that read/write something simple like an // alloca instruction or a global variable. auto &DL = I.getModule()->getDataLayout(); int64_t Offset = 0; if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) { // OK, so the access is to a constant offset from Ptr. Check that Ptr is // something we can handle and if so extract the size of this base object // along with its alignment. uint64_t BaseSize = MemoryLocation::UnknownSize; unsigned BaseAlign = 0; if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) { Type *ATy = AI->getAllocatedType(); if (!AI->isArrayAllocation() && ATy->isSized()) BaseSize = DL.getTypeAllocSize(ATy); BaseAlign = AI->getAlignment(); if (BaseAlign == 0 && ATy->isSized()) BaseAlign = DL.getABITypeAlignment(ATy); } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) { // If the global may be defined differently in another compilation unit // then don't warn about funky memory accesses. if (GV->hasDefinitiveInitializer()) { Type *GTy = GV->getType()->getElementType(); if (GTy->isSized()) BaseSize = DL.getTypeAllocSize(GTy); BaseAlign = GV->getAlignment(); if (BaseAlign == 0 && GTy->isSized()) BaseAlign = DL.getABITypeAlignment(GTy); } } // Accesses from before the start or after the end of the object are not // defined. Assert(Size == MemoryLocation::UnknownSize || BaseSize == MemoryLocation::UnknownSize || (Offset >= 0 && Offset + Size <= BaseSize), "Undefined behavior: Buffer overflow", &I); // Accesses that say that the memory is more aligned than it is are not // defined. if (Align == 0 && Ty && Ty->isSized()) Align = DL.getABITypeAlignment(Ty); Assert(!BaseAlign || Align <= MinAlign(BaseAlign, Offset), "Undefined behavior: Memory reference address is misaligned", &I); } } void Lint::visitLoadInst(LoadInst &I) { visitMemoryReference(I, I.getPointerOperand(), AA->getTypeStoreSize(I.getType()), I.getAlignment(), I.getType(), MemRef::Read); } void Lint::visitStoreInst(StoreInst &I) { visitMemoryReference(I, I.getPointerOperand(), AA->getTypeStoreSize(I.getOperand(0)->getType()), I.getAlignment(), I.getOperand(0)->getType(), MemRef::Write); } void Lint::visitXor(BinaryOperator &I) { Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)), "Undefined result: xor(undef, undef)", &I); } void Lint::visitSub(BinaryOperator &I) { Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)), "Undefined result: sub(undef, undef)", &I); } void Lint::visitLShr(BinaryOperator &I) { if (ConstantInt *CI = dyn_cast<ConstantInt>( findValue(I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false))) Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()), "Undefined result: Shift count out of range", &I); } void Lint::visitAShr(BinaryOperator &I) { if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue( I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false))) Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()), "Undefined result: Shift count out of range", &I); } void Lint::visitShl(BinaryOperator &I) { if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue( I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false))) Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()), "Undefined result: Shift count out of range", &I); } static bool allPredsCameFromLandingPad(BasicBlock *BB, SmallSet<BasicBlock *, 4> &VisitedBlocks) { VisitedBlocks.insert(BB); if (BB->isLandingPad()) return true; // If we find a block with no predecessors, the search failed. if (pred_empty(BB)) return false; for (BasicBlock *Pred : predecessors(BB)) { if (VisitedBlocks.count(Pred)) continue; if (!allPredsCameFromLandingPad(Pred, VisitedBlocks)) return false; } return true; } static bool allSuccessorsReachEndCatch(BasicBlock *BB, BasicBlock::iterator InstBegin, IntrinsicInst **SecondBeginCatch, SmallSet<BasicBlock *, 4> &VisitedBlocks) { VisitedBlocks.insert(BB); for (BasicBlock::iterator I = InstBegin, E = BB->end(); I != E; ++I) { IntrinsicInst *IC = dyn_cast<IntrinsicInst>(I); if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch) return true; // If we find another begincatch while looking for an endcatch, // that's also an error. if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch) { *SecondBeginCatch = IC; return false; } } // If we reach a block with no successors while searching, the // search has failed. if (succ_empty(BB)) return false; // Otherwise, search all of the successors. for (BasicBlock *Succ : successors(BB)) { if (VisitedBlocks.count(Succ)) continue; if (!allSuccessorsReachEndCatch(Succ, Succ->begin(), SecondBeginCatch, VisitedBlocks)) return false; } return true; } void Lint::visitEHBeginCatch(IntrinsicInst *II) { // The checks in this function make a potentially dubious assumption about // the CFG, namely that any block involved in a catch is only used for the // catch. This will very likely be true of IR generated by a front end, // but it may cease to be true, for example, if the IR is run through a // pass which combines similar blocks. // // In general, if we encounter a block the isn't dominated by the catch // block while we are searching the catch block's successors for a call // to end catch intrinsic, then it is possible that it will be legal for // a path through this block to never reach a call to llvm.eh.endcatch. // An analogous statement could be made about our search for a landing // pad among the catch block's predecessors. // // What is actually required is that no path is possible at runtime that // reaches a call to llvm.eh.begincatch without having previously visited // a landingpad instruction and that no path is possible at runtime that // calls llvm.eh.begincatch and does not subsequently call llvm.eh.endcatch // (mentally adjusting for the fact that in reality these calls will be // removed before code generation). // // Because this is a lint check, we take a pessimistic approach and warn if // the control flow is potentially incorrect. SmallSet<BasicBlock *, 4> VisitedBlocks; BasicBlock *CatchBB = II->getParent(); // The begin catch must occur in a landing pad block or all paths // to it must have come from a landing pad. Assert(allPredsCameFromLandingPad(CatchBB, VisitedBlocks), "llvm.eh.begincatch may be reachable without passing a landingpad", II); // Reset the visited block list. VisitedBlocks.clear(); IntrinsicInst *SecondBeginCatch = nullptr; // This has to be called before it is asserted. Otherwise, the first assert // below can never be hit. bool EndCatchFound = allSuccessorsReachEndCatch( CatchBB, std::next(static_cast<BasicBlock::iterator>(II)), &SecondBeginCatch, VisitedBlocks); Assert( SecondBeginCatch == nullptr, "llvm.eh.begincatch may be called a second time before llvm.eh.endcatch", II, SecondBeginCatch); Assert(EndCatchFound, "Some paths from llvm.eh.begincatch may not reach llvm.eh.endcatch", II); } static bool allPredCameFromBeginCatch( BasicBlock *BB, BasicBlock::reverse_iterator InstRbegin, IntrinsicInst **SecondEndCatch, SmallSet<BasicBlock *, 4> &VisitedBlocks) { VisitedBlocks.insert(BB); // Look for a begincatch in this block. for (BasicBlock::reverse_iterator RI = InstRbegin, RE = BB->rend(); RI != RE; ++RI) { IntrinsicInst *IC = dyn_cast<IntrinsicInst>(&*RI); if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch) return true; // If we find another end catch before we find a begin catch, that's // an error. if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch) { *SecondEndCatch = IC; return false; } // If we encounter a landingpad instruction, the search failed. if (isa<LandingPadInst>(*RI)) return false; } // If while searching we find a block with no predeccesors, // the search failed. if (pred_empty(BB)) return false; // Search any predecessors we haven't seen before. for (BasicBlock *Pred : predecessors(BB)) { if (VisitedBlocks.count(Pred)) continue; if (!allPredCameFromBeginCatch(Pred, Pred->rbegin(), SecondEndCatch, VisitedBlocks)) return false; } return true; } void Lint::visitEHEndCatch(IntrinsicInst *II) { // The check in this function makes a potentially dubious assumption about // the CFG, namely that any block involved in a catch is only used for the // catch. This will very likely be true of IR generated by a front end, // but it may cease to be true, for example, if the IR is run through a // pass which combines similar blocks. // // In general, if we encounter a block the isn't post-dominated by the // end catch block while we are searching the end catch block's predecessors // for a call to the begin catch intrinsic, then it is possible that it will // be legal for a path to reach the end catch block without ever having // called llvm.eh.begincatch. // // What is actually required is that no path is possible at runtime that // reaches a call to llvm.eh.endcatch without having previously visited // a call to llvm.eh.begincatch (mentally adjusting for the fact that in // reality these calls will be removed before code generation). // // Because this is a lint check, we take a pessimistic approach and warn if // the control flow is potentially incorrect. BasicBlock *EndCatchBB = II->getParent(); // Alls paths to the end catch call must pass through a begin catch call. // If llvm.eh.begincatch wasn't called in the current block, we'll use this // lambda to recursively look for it in predecessors. SmallSet<BasicBlock *, 4> VisitedBlocks; IntrinsicInst *SecondEndCatch = nullptr; // This has to be called before it is asserted. Otherwise, the first assert // below can never be hit. bool BeginCatchFound = allPredCameFromBeginCatch(EndCatchBB, BasicBlock::reverse_iterator(II), &SecondEndCatch, VisitedBlocks); Assert( SecondEndCatch == nullptr, "llvm.eh.endcatch may be called a second time after llvm.eh.begincatch", II, SecondEndCatch); Assert(BeginCatchFound, "llvm.eh.endcatch may be reachable without passing llvm.eh.begincatch", II); } static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC) { // Assume undef could be zero. if (isa<UndefValue>(V)) return true; VectorType *VecTy = dyn_cast<VectorType>(V->getType()); if (!VecTy) { unsigned BitWidth = V->getType()->getIntegerBitWidth(); APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC, dyn_cast<Instruction>(V), DT); return KnownZero.isAllOnesValue(); } // Per-component check doesn't work with zeroinitializer Constant *C = dyn_cast<Constant>(V); if (!C) return false; if (C->isZeroValue()) return true; // For a vector, KnownZero will only be true if all values are zero, so check // this per component unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth(); for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) { Constant *Elem = C->getAggregateElement(I); if (isa<UndefValue>(Elem)) return true; APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); computeKnownBits(Elem, KnownZero, KnownOne, DL); if (KnownZero.isAllOnesValue()) return true; } return false; } void Lint::visitSDiv(BinaryOperator &I) { Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC), "Undefined behavior: Division by zero", &I); } void Lint::visitUDiv(BinaryOperator &I) { Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC), "Undefined behavior: Division by zero", &I); } void Lint::visitSRem(BinaryOperator &I) { Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC), "Undefined behavior: Division by zero", &I); } void Lint::visitURem(BinaryOperator &I) { Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC), "Undefined behavior: Division by zero", &I); } void Lint::visitAllocaInst(AllocaInst &I) { if (isa<ConstantInt>(I.getArraySize())) // This isn't undefined behavior, it's just an obvious pessimization. Assert(&I.getParent()->getParent()->getEntryBlock() == I.getParent(), "Pessimization: Static alloca outside of entry block", &I); // TODO: Check for an unusual size (MSB set?) } void Lint::visitVAArgInst(VAArgInst &I) { visitMemoryReference(I, I.getOperand(0), MemoryLocation::UnknownSize, 0, nullptr, MemRef::Read | MemRef::Write); } void Lint::visitIndirectBrInst(IndirectBrInst &I) { visitMemoryReference(I, I.getAddress(), MemoryLocation::UnknownSize, 0, nullptr, MemRef::Branchee); Assert(I.getNumDestinations() != 0, "Undefined behavior: indirectbr with no destinations", &I); } void Lint::visitExtractElementInst(ExtractElementInst &I) { if (ConstantInt *CI = dyn_cast<ConstantInt>( findValue(I.getIndexOperand(), I.getModule()->getDataLayout(), /*OffsetOk=*/false))) Assert(CI->getValue().ult(I.getVectorOperandType()->getNumElements()), "Undefined result: extractelement index out of range", &I); } void Lint::visitInsertElementInst(InsertElementInst &I) { if (ConstantInt *CI = dyn_cast<ConstantInt>( findValue(I.getOperand(2), I.getModule()->getDataLayout(), /*OffsetOk=*/false))) Assert(CI->getValue().ult(I.getType()->getNumElements()), "Undefined result: insertelement index out of range", &I); } void Lint::visitUnreachableInst(UnreachableInst &I) { // This isn't undefined behavior, it's merely suspicious. Assert(&I == I.getParent()->begin() || std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(), "Unusual: unreachable immediately preceded by instruction without " "side effects", &I); } /// findValue - Look through bitcasts and simple memory reference patterns /// to identify an equivalent, but more informative, value. If OffsetOk /// is true, look through getelementptrs with non-zero offsets too. /// /// Most analysis passes don't require this logic, because instcombine /// will simplify most of these kinds of things away. But it's a goal of /// this Lint pass to be useful even on non-optimized IR. Value *Lint::findValue(Value *V, const DataLayout &DL, bool OffsetOk) const { SmallPtrSet<Value *, 4> Visited; return findValueImpl(V, DL, OffsetOk, Visited); } /// findValueImpl - Implementation helper for findValue. Value *Lint::findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk, SmallPtrSetImpl<Value *> &Visited) const { // Detect self-referential values. if (!Visited.insert(V).second) return UndefValue::get(V->getType()); // TODO: Look through sext or zext cast, when the result is known to // be interpreted as signed or unsigned, respectively. // TODO: Look through eliminable cast pairs. // TODO: Look through calls with unique return values. // TODO: Look through vector insert/extract/shuffle. V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts(); if (LoadInst *L = dyn_cast<LoadInst>(V)) { BasicBlock::iterator BBI = L; BasicBlock *BB = L->getParent(); SmallPtrSet<BasicBlock *, 4> VisitedBlocks; for (;;) { if (!VisitedBlocks.insert(BB).second) break; if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(), BB, BBI, 6, AA)) return findValueImpl(U, DL, OffsetOk, Visited); if (BBI != BB->begin()) break; BB = BB->getUniquePredecessor(); if (!BB) break; BBI = BB->end(); } } else if (PHINode *PN = dyn_cast<PHINode>(V)) { if (Value *W = PN->hasConstantValue()) if (W != V) return findValueImpl(W, DL, OffsetOk, Visited); } else if (CastInst *CI = dyn_cast<CastInst>(V)) { if (CI->isNoopCast(DL)) return findValueImpl(CI->getOperand(0), DL, OffsetOk, Visited); } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) { if (Value *W = FindInsertedValue(Ex->getAggregateOperand(), Ex->getIndices())) if (W != V) return findValueImpl(W, DL, OffsetOk, Visited); } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { // Same as above, but for ConstantExpr instead of Instruction. if (Instruction::isCast(CE->getOpcode())) { if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()), CE->getOperand(0)->getType(), CE->getType(), DL.getIntPtrType(V->getType()))) return findValueImpl(CE->getOperand(0), DL, OffsetOk, Visited); } else if (CE->getOpcode() == Instruction::ExtractValue) { ArrayRef<unsigned> Indices = CE->getIndices(); if (Value *W = FindInsertedValue(CE->getOperand(0), Indices)) if (W != V) return findValueImpl(W, DL, OffsetOk, Visited); } } // As a last resort, try SimplifyInstruction or constant folding. if (Instruction *Inst = dyn_cast<Instruction>(V)) { if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT, AC)) return findValueImpl(W, DL, OffsetOk, Visited); } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI)) if (W != V) return findValueImpl(W, DL, OffsetOk, Visited); } return V; } //===----------------------------------------------------------------------===// // Implement the public interfaces to this file... //===----------------------------------------------------------------------===// FunctionPass *llvm::createLintPass() { return new Lint(); } /// lintFunction - Check a function for errors, printing messages on stderr. /// void llvm::lintFunction(const Function &f) { Function &F = const_cast<Function&>(f); assert(!F.isDeclaration() && "Cannot lint external functions"); legacy::FunctionPassManager FPM(F.getParent()); Lint *V = new Lint(); FPM.add(V); FPM.run(F); } /// lintModule - Check a module for errors, printing messages on stderr. /// void llvm::lintModule(const Module &M) { legacy::PassManager PM; Lint *V = new Lint(); PM.add(V); PM.run(const_cast<Module&>(M)); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/BasicAliasAnalysis.cpp
//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the primary stateless implementation of the // Alias Analysis interface that implements identities (two different // globals cannot alias, etc), but does no stateful analysis. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> using namespace llvm; /// Cutoff after which to stop analysing a set of phi nodes potentially involved /// in a cycle. Because we are analysing 'through' phi nodes we need to be /// careful with value equivalence. We use reachability to make sure a value /// cannot be involved in a cycle. const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; // The max limit of the search depth in DecomposeGEPExpression() and // GetUnderlyingObject(), both functions need to use the same search // depth otherwise the algorithm in aliasGEP will assert. static const unsigned MaxLookupSearchDepth = 6; //===----------------------------------------------------------------------===// // Useful predicates //===----------------------------------------------------------------------===// /// isNonEscapingLocalObject - Return true if the pointer is to a function-local /// object that never escapes from the function. static bool isNonEscapingLocalObject(const Value *V) { // If this is a local allocation, check to see if it escapes. if (isa<AllocaInst>(V) || isNoAliasCall(V)) // Set StoreCaptures to True so that we can assume in our callers that the // pointer is not the result of a load instruction. Currently // PointerMayBeCaptured doesn't have any special analysis for the // StoreCaptures=false case; if it did, our callers could be refined to be // more precise. return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); // If this is an argument that corresponds to a byval or noalias argument, // then it has not escaped before entering the function. Check if it escapes // inside the function. if (const Argument *A = dyn_cast<Argument>(V)) if (A->hasByValAttr() || A->hasNoAliasAttr()) // Note even if the argument is marked nocapture we still need to check // for copies made inside the function. The nocapture attribute only // specifies that there are no copies made that outlive the function. return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); return false; } /// isEscapeSource - Return true if the pointer is one which would have /// been considered an escape by isNonEscapingLocalObject. static bool isEscapeSource(const Value *V) { if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) return true; // The load case works because isNonEscapingLocalObject considers all // stores to be escapes (it passes true for the StoreCaptures argument // to PointerMayBeCaptured). if (isa<LoadInst>(V)) return true; return false; } /// getObjectSize - Return the size of the object specified by V, or /// UnknownSize if unknown. static uint64_t getObjectSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo &TLI, bool RoundToAlign = false) { uint64_t Size; if (getObjectSize(V, Size, DL, &TLI, RoundToAlign)) return Size; return MemoryLocation::UnknownSize; } /// isObjectSmallerThan - Return true if we can prove that the object specified /// by V is smaller than Size. static bool isObjectSmallerThan(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI) { // Note that the meanings of the "object" are slightly different in the // following contexts: // c1: llvm::getObjectSize() // c2: llvm.objectsize() intrinsic // c3: isObjectSmallerThan() // c1 and c2 share the same meaning; however, the meaning of "object" in c3 // refers to the "entire object". // // Consider this example: // char *p = (char*)malloc(100) // char *q = p+80; // // In the context of c1 and c2, the "object" pointed by q refers to the // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. // // However, in the context of c3, the "object" refers to the chunk of memory // being allocated. So, the "object" has 100 bytes, and q points to the middle // the "object". In case q is passed to isObjectSmallerThan() as the 1st // parameter, before the llvm::getObjectSize() is called to get the size of // entire object, we should: // - either rewind the pointer q to the base-address of the object in // question (in this case rewind to p), or // - just give up. It is up to caller to make sure the pointer is pointing // to the base address the object. // // We go for 2nd option for simplicity. if (!isIdentifiedObject(V)) return false; // This function needs to use the aligned object size because we allow // reads a bit past the end given sufficient alignment. uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true); return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; } /// isObjectSize - Return true if we can prove that the object specified /// by V has size Size. static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, const TargetLibraryInfo &TLI) { uint64_t ObjectSize = getObjectSize(V, DL, TLI); return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; } //===----------------------------------------------------------------------===// // GetElementPtr Instruction Decomposition and Analysis //===----------------------------------------------------------------------===// namespace { enum ExtensionKind { EK_NotExtended, EK_SignExt, EK_ZeroExt }; struct VariableGEPIndex { const Value *V; ExtensionKind Extension; int64_t Scale; bool operator==(const VariableGEPIndex &Other) const { return V == Other.V && Extension == Other.Extension && Scale == Other.Scale; } bool operator!=(const VariableGEPIndex &Other) const { return !operator==(Other); } }; } /// GetLinearExpression - Analyze the specified value as a linear expression: /// "A*V + B", where A and B are constant integers. Return the scale and offset /// values as APInts and return V as a Value*, and return whether we looked /// through any sign or zero extends. The incoming Value is known to have /// IntegerType and it may already be sign or zero extended. /// /// Note that this looks through extends, so the high bits may not be /// represented in the result. static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, ExtensionKind &Extension, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT) { assert(V->getType()->isIntegerTy() && "Not an integer value"); // Limit our recursion depth. if (Depth == 6) { Scale = 1; Offset = 0; return V; } if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { switch (BOp->getOpcode()) { default: break; case Instruction::Or: // X|C == X+C if all the bits in C are unset in X. Otherwise we can't // analyze it. if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, BOp, DT)) break; LLVM_FALLTHROUGH; // HLSL Change case Instruction::Add: V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, DL, Depth + 1, AC, DT); Offset += RHSC->getValue(); return V; case Instruction::Mul: V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, DL, Depth + 1, AC, DT); Offset *= RHSC->getValue(); Scale *= RHSC->getValue(); return V; case Instruction::Shl: V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, DL, Depth + 1, AC, DT); Offset <<= RHSC->getValue().getLimitedValue(); Scale <<= RHSC->getValue().getLimitedValue(); return V; } } } // Since GEP indices are sign extended anyway, we don't care about the high // bits of a sign or zero extended value - just scales and offsets. The // extensions have to be consistent though. if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) || (isa<ZExtInst>(V) && Extension != EK_SignExt)) { Value *CastOp = cast<CastInst>(V)->getOperand(0); unsigned OldWidth = Scale.getBitWidth(); unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); Scale = Scale.trunc(SmallWidth); Offset = Offset.trunc(SmallWidth); Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt; Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL, Depth + 1, AC, DT); Scale = Scale.zext(OldWidth); Offset = Offset.zext(OldWidth); return Result; } Scale = 1; Offset = 0; return V; } /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it /// into a base pointer with a constant offset and a number of scaled symbolic /// offsets. /// /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in /// the VarIndices vector) are Value*'s that are known to be scaled by the /// specified amount, but which may have other unrepresented high bits. As such, /// the gep cannot necessarily be reconstructed from its decomposed form. /// /// When DataLayout is around, this function is capable of analyzing everything /// that GetUnderlyingObject can look through. To be able to do that /// GetUnderlyingObject and DecomposeGEPExpression must use the same search /// depth (MaxLookupSearchDepth). /// When DataLayout not is around, it just looks through pointer casts. /// static const Value * DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, SmallVectorImpl<VariableGEPIndex> &VarIndices, bool &MaxLookupReached, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT) { // Limit recursion depth to limit compile time in crazy cases. unsigned MaxLookup = MaxLookupSearchDepth; MaxLookupReached = false; BaseOffs = 0; do { // See if this is a bitcast or GEP. const Operator *Op = dyn_cast<Operator>(V); if (!Op) { // The only non-operator case we can handle are GlobalAliases. if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { if (!GA->mayBeOverridden()) { V = GA->getAliasee(); continue; } } return V; } if (Op->getOpcode() == Instruction::BitCast || Op->getOpcode() == Instruction::AddrSpaceCast) { V = Op->getOperand(0); continue; } const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); if (!GEPOp) { // If it's not a GEP, hand it off to SimplifyInstruction to see if it // can come up with something. This matches what GetUnderlyingObject does. if (const Instruction *I = dyn_cast<Instruction>(V)) // TODO: Get a DominatorTree and AssumptionCache and use them here // (these are both now available in this function, but this should be // updated when GetUnderlyingObject is updated). TLI should be // provided also. if (const Value *Simplified = SimplifyInstruction(const_cast<Instruction *>(I), DL)) { V = Simplified; continue; } return V; } // Don't attempt to analyze GEPs over unsized objects. if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized()) return V; unsigned AS = GEPOp->getPointerAddressSpace(); // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. gep_type_iterator GTI = gep_type_begin(GEPOp); for (User::const_op_iterator I = GEPOp->op_begin()+1, E = GEPOp->op_end(); I != E; ++I) { Value *Index = *I; // Compute the (potentially symbolic) offset in bytes for this index. if (StructType *STy = dyn_cast<StructType>(*GTI++)) { // For a struct, add the member offset. unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); if (FieldNo == 0) continue; BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo); continue; } // For an array/pointer, add the element offset, explicitly scaled. if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { if (CIdx->isZero()) continue; BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); continue; } uint64_t Scale = DL.getTypeAllocSize(*GTI); ExtensionKind Extension = EK_NotExtended; // If the integer type is smaller than the pointer size, it is implicitly // sign extended to pointer size. unsigned Width = Index->getType()->getIntegerBitWidth(); if (DL.getPointerSizeInBits(AS) > Width) Extension = EK_SignExt; // Use GetLinearExpression to decompose the index into a C1*V+C2 form. APInt IndexScale(Width, 0), IndexOffset(Width, 0); Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL, 0, AC, DT); // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. BaseOffs += IndexOffset.getSExtValue()*Scale; Scale *= IndexScale.getSExtValue(); // If we already had an occurrence of this index variable, merge this // scale into it. For example, we want to handle: // A[x][x] -> x*16 + x*4 -> x*20 // This also ensures that 'x' only appears in the index list once. for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { if (VarIndices[i].V == Index && VarIndices[i].Extension == Extension) { Scale += VarIndices[i].Scale; VarIndices.erase(VarIndices.begin()+i); break; } } // Make sure that we have a scale that makes sense for this target's // pointer size. if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) { Scale <<= ShiftBits; Scale = (int64_t)Scale >> ShiftBits; } if (Scale) { VariableGEPIndex Entry = {Index, Extension, static_cast<int64_t>(Scale)}; VarIndices.push_back(Entry); } } // Analyze the base pointer next. V = GEPOp->getOperand(0); } while (--MaxLookup); // If the chain of expressions is too deep, just return early. MaxLookupReached = true; return V; } //===----------------------------------------------------------------------===// // BasicAliasAnalysis Pass //===----------------------------------------------------------------------===// #ifndef NDEBUG static const Function *getParent(const Value *V) { if (const Instruction *inst = dyn_cast<Instruction>(V)) return inst->getParent()->getParent(); if (const Argument *arg = dyn_cast<Argument>(V)) return arg->getParent(); return nullptr; } static bool notDifferentParent(const Value *O1, const Value *O2) { const Function *F1 = getParent(O1); const Function *F2 = getParent(O2); return !F1 || !F2 || F1 == F2; } #endif namespace { /// BasicAliasAnalysis - This is the primary alias analysis implementation. struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { static char ID; // Class identification, replacement for typeinfo BasicAliasAnalysis() : ImmutablePass(ID) { initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); } bool doInitialization(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AliasAnalysis>(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) override { assert(AliasCache.empty() && "AliasCache must be cleared after use!"); assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && "BasicAliasAnalysis doesn't support interprocedural queries."); AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, LocB.Size, LocB.AATags); // AliasCache rarely has more than 1 or 2 elements, always use // shrink_and_clear so it quickly returns to the inline capacity of the // SmallDenseMap if it ever grows larger. // FIXME: This should really be shrink_to_inline_capacity_and_clear(). AliasCache.shrink_and_clear(); VisitedPhiBBs.clear(); return Alias; } ModRefResult getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) override; ModRefResult getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) override; /// pointsToConstantMemory - Chase pointers until we find a (constant /// global) or not. bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) override; /// Get the location associated with a pointer argument of a callsite. ModRefResult getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override; /// getModRefBehavior - Return the behavior when calling the given /// call site. ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; /// getModRefBehavior - Return the behavior when calling the given function. /// For use when the call site is not known. ModRefBehavior getModRefBehavior(const Function *F) override; /// getAdjustedAnalysisPointer - This method is used when a pass implements /// an analysis interface through multiple inheritance. If needed, it /// should override this to adjust the this pointer as needed for the /// specified pass info. void *getAdjustedAnalysisPointer(const void *ID) override { if (ID == &AliasAnalysis::ID) return (AliasAnalysis*)this; return this; } private: // AliasCache - Track alias queries to guard against recursion. typedef std::pair<MemoryLocation, MemoryLocation> LocPair; typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy; AliasCacheTy AliasCache; /// \brief Track phi nodes we have visited. When interpret "Value" pointer /// equality as value equality we need to make sure that the "Value" is not /// part of a cycle. Otherwise, two uses could come from different /// "iterations" of a cycle and see different values for the same "Value" /// pointer. /// The following example shows the problem: /// %p = phi(%alloca1, %addr2) /// %l = load %ptr /// %addr1 = gep, %alloca2, 0, %l /// %addr2 = gep %alloca2, 0, (%l + 1) /// alias(%p, %addr1) -> MayAlias ! /// store %l, ... SmallPtrSet<const BasicBlock*, 8> VisitedPhiBBs; // Visited - Track instructions visited by pointsToConstantMemory. SmallPtrSet<const Value*, 16> Visited; /// \brief Check whether two Values can be considered equivalent. /// /// In addition to pointer equivalence of \p V1 and \p V2 this checks /// whether they can not be part of a cycle in the value graph by looking at /// all visited phi nodes an making sure that the phis cannot reach the /// value. We have to do this because we are looking through phi nodes (That /// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2); /// \brief Dest and Src are the variable indices from two decomposed /// GetElementPtr instructions GEP1 and GEP2 which have common base /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic /// difference between the two pointers. void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, const SmallVectorImpl<VariableGEPIndex> &Src); // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP // instruction against another. AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, const AAMDNodes &V1AAInfo, const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo, const Value *UnderlyingV1, const Value *UnderlyingV2); // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI // instruction against another. AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, const AAMDNodes &PNAAInfo, const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo); /// aliasSelect - Disambiguate a Select instruction against another value. AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, const AAMDNodes &SIAAInfo, const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo); AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag, const Value *V2, uint64_t V2Size, AAMDNodes V2AATag); }; } // End of anonymous namespace // Register this pass... char BasicAliasAnalysis::ID = 0; INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", "Basic Alias Analysis (stateless AA impl)", false, true, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", "Basic Alias Analysis (stateless AA impl)", false, true, false) ImmutablePass *llvm::createBasicAliasAnalysisPass() { return new BasicAliasAnalysis(); } /// pointsToConstantMemory - Returns whether the given pointer value /// points to memory that is local to the function, with global constants being /// considered local to all functions. bool BasicAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) { assert(Visited.empty() && "Visited must be cleared after use!"); unsigned MaxLookup = 8; SmallVector<const Value *, 16> Worklist; Worklist.push_back(Loc.Ptr); do { const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL); if (!Visited.insert(V).second) { Visited.clear(); return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); } // An alloca instruction defines local memory. if (OrLocal && isa<AllocaInst>(V)) continue; // A global constant counts as local memory for our purposes. if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { // Note: this doesn't require GV to be "ODR" because it isn't legal for a // global to be marked constant in some modules and non-constant in // others. GV may even be a declaration, not a definition. if (!GV->isConstant()) { Visited.clear(); return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); } continue; } // If both select values point to local memory, then so does the select. if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { Worklist.push_back(SI->getTrueValue()); Worklist.push_back(SI->getFalseValue()); continue; } // If all values incoming to a phi node point to local memory, then so does // the phi. if (const PHINode *PN = dyn_cast<PHINode>(V)) { // Don't bother inspecting phi nodes with many operands. if (PN->getNumIncomingValues() > MaxLookup) { Visited.clear(); return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); } for (Value *IncValue : PN->incoming_values()) Worklist.push_back(IncValue); continue; } // Otherwise be conservative. Visited.clear(); return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); } while (!Worklist.empty() && --MaxLookup); Visited.clear(); return Worklist.empty(); } // FIXME: This code is duplicated with MemoryLocation and should be hoisted to // some common utility location. static bool isMemsetPattern16(const Function *MS, const TargetLibraryInfo &TLI) { if (TLI.has(LibFunc::memset_pattern16) && MS->getName() == "memset_pattern16") { FunctionType *MemsetType = MS->getFunctionType(); if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && isa<PointerType>(MemsetType->getParamType(0)) && isa<PointerType>(MemsetType->getParamType(1)) && isa<IntegerType>(MemsetType->getParamType(2))) return true; } return false; } /// getModRefBehavior - Return the behavior when calling the given call site. AliasAnalysis::ModRefBehavior BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { if (CS.doesNotAccessMemory()) // Can't do better than this. return DoesNotAccessMemory; ModRefBehavior Min = UnknownModRefBehavior; // If the callsite knows it only reads memory, don't return worse // than that. if (CS.onlyReadsMemory()) Min = OnlyReadsMemory; if (CS.onlyAccessesArgMemory()) Min = ModRefBehavior(Min & OnlyAccessesArgumentPointees); // The AliasAnalysis base class has some smarts, lets use them. return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); } /// getModRefBehavior - Return the behavior when calling the given function. /// For use when the call site is not known. AliasAnalysis::ModRefBehavior BasicAliasAnalysis::getModRefBehavior(const Function *F) { // If the function declares it doesn't access memory, we can't do better. if (F->doesNotAccessMemory()) return DoesNotAccessMemory; // For intrinsics, we can check the table. if (Intrinsic::ID iid = F->getIntrinsicID()) { #define GET_INTRINSIC_MODREF_BEHAVIOR #include "llvm/IR/Intrinsics.gen" #undef GET_INTRINSIC_MODREF_BEHAVIOR } ModRefBehavior Min = UnknownModRefBehavior; // If the function declares it only reads memory, go with that. if (F->onlyReadsMemory()) Min = OnlyReadsMemory; if (F->onlyAccessesArgMemory()) Min = ModRefBehavior(Min & OnlyAccessesArgumentPointees); const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); if (isMemsetPattern16(F, TLI)) Min = OnlyAccessesArgumentPointees; // Otherwise be conservative. return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); } AliasAnalysis::ModRefResult BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) switch (II->getIntrinsicID()) { default: break; case Intrinsic::memset: case Intrinsic::memcpy: case Intrinsic::memmove: assert((ArgIdx == 0 || ArgIdx == 1) && "Invalid argument index for memory intrinsic"); return ArgIdx ? Ref : Mod; } // We can bound the aliasing properties of memset_pattern16 just as we can // for memcpy/memset. This is particularly important because the // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 // whenever possible. if (CS.getCalledFunction() && isMemsetPattern16(CS.getCalledFunction(), *TLI)) { assert((ArgIdx == 0 || ArgIdx == 1) && "Invalid argument index for memset_pattern16"); return ArgIdx ? Ref : Mod; } // FIXME: Handle memset_pattern4 and memset_pattern8 also. return AliasAnalysis::getArgModRefInfo(CS, ArgIdx); } static bool isAssumeIntrinsic(ImmutableCallSite CS) { const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); if (II && II->getIntrinsicID() == Intrinsic::assume) return true; return false; } bool BasicAliasAnalysis::doInitialization(Module &M) { InitializeAliasAnalysis(this, &M.getDataLayout()); return true; } /// getModRefInfo - Check to see if the specified callsite can clobber the /// specified memory object. Since we only look at local properties of this /// function, we really can't say much about this query. We do, however, use /// simple "address taken" analysis on local objects. AliasAnalysis::ModRefResult BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && "AliasAnalysis query involving multiple functions!"); const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL); // If this is a tail call and Loc.Ptr points to a stack location, we know that // the tail call cannot access or modify the local stack. // We cannot exclude byval arguments here; these belong to the caller of // the current function not to the current function, and a tail callee // may reference them. if (isa<AllocaInst>(Object)) if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) if (CI->isTailCall()) return NoModRef; // If the pointer is to a locally allocated object that does not escape, // then the call can not mod/ref the pointer unless the call takes the pointer // as an argument, and itself doesn't capture it. if (!isa<Constant>(Object) && CS.getInstruction() != Object && isNonEscapingLocalObject(Object)) { bool PassedAsArg = false; unsigned ArgNo = 0; for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); CI != CE; ++CI, ++ArgNo) { // Only look at the no-capture or byval pointer arguments. If this // pointer were passed to arguments that were neither of these, then it // couldn't be no-capture. if (!(*CI)->getType()->isPointerTy() || (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) continue; // If this is a no-capture pointer argument, see if we can tell that it // is impossible to alias the pointer we're checking. If not, we have to // assume that the call could touch the pointer, even though it doesn't // escape. if (!isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) { PassedAsArg = true; break; } } if (!PassedAsArg) return NoModRef; } // While the assume intrinsic is marked as arbitrarily writing so that // proper control dependencies will be maintained, it never aliases any // particular memory location. if (isAssumeIntrinsic(CS)) return NoModRef; // The AliasAnalysis base class has some smarts, lets use them. return AliasAnalysis::getModRefInfo(CS, Loc); } AliasAnalysis::ModRefResult BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { // While the assume intrinsic is marked as arbitrarily writing so that // proper control dependencies will be maintained, it never aliases any // particular memory location. if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2)) return NoModRef; // The AliasAnalysis base class has some smarts, lets use them. return AliasAnalysis::getModRefInfo(CS1, CS2); } /// \brief Provide ad-hoc rules to disambiguate accesses through two GEP /// operators, both having the exact same pointer operand. static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size, const GEPOperator *GEP2, uint64_t V2Size, const DataLayout &DL) { assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() && "Expected GEPs with the same pointer operand"); // Try to determine whether GEP1 and GEP2 index through arrays, into structs, // such that the struct field accesses provably cannot alias. // We also need at least two indices (the pointer, and the struct field). if (GEP1->getNumIndices() != GEP2->getNumIndices() || GEP1->getNumIndices() < 2) return MayAlias; // If we don't know the size of the accesses through both GEPs, we can't // determine whether the struct fields accessed can't alias. if (V1Size == MemoryLocation::UnknownSize || V2Size == MemoryLocation::UnknownSize) return MayAlias; ConstantInt *C1 = dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); ConstantInt *C2 = dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); // If the last (struct) indices aren't constants, we can't say anything. // If they're identical, the other indices might be also be dynamically // equal, so the GEPs can alias. if (!C1 || !C2 || C1 == C2) return MayAlias; // Find the last-indexed type of the GEP, i.e., the type you'd get if // you stripped the last index. // On the way, look at each indexed type. If there's something other // than an array, different indices can lead to different final types. SmallVector<Value *, 8> IntermediateIndices; // Insert the first index; we don't need to check the type indexed // through it as it only drops the pointer indirection. assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); IntermediateIndices.push_back(GEP1->getOperand(1)); // Insert all the remaining indices but the last one. // Also, check that they all index through arrays. for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( GEP1->getSourceElementType(), IntermediateIndices))) return MayAlias; IntermediateIndices.push_back(GEP1->getOperand(i + 1)); } StructType *LastIndexedStruct = dyn_cast<StructType>(GetElementPtrInst::getIndexedType( GEP1->getSourceElementType(), IntermediateIndices)); if (!LastIndexedStruct) return MayAlias; // We know that: // - both GEPs begin indexing from the exact same pointer; // - the last indices in both GEPs are constants, indexing into a struct; // - said indices are different, hence, the pointed-to fields are different; // - both GEPs only index through arrays prior to that. // // This lets us determine that the struct that GEP1 indexes into and the // struct that GEP2 indexes into must either precisely overlap or be // completely disjoint. Because they cannot partially overlap, indexing into // different non-overlapping fields of the struct will never alias. // Therefore, the only remaining thing needed to show that both GEPs can't // alias is that the fields are not overlapping. const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); const uint64_t StructSize = SL->getSizeInBytes(); const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, uint64_t V2Off, uint64_t V2Size) { return V1Off < V2Off && V1Off + V1Size <= V2Off && ((V2Off + V2Size <= StructSize) || (V2Off + V2Size - StructSize <= V1Off)); }; if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) return NoAlias; return MayAlias; } /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction /// against another pointer. We know that V1 is a GEP, but we don't know /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL), /// UnderlyingV2 is the same for V2. /// AliasResult BasicAliasAnalysis::aliasGEP( const GEPOperator *GEP1, uint64_t V1Size, const AAMDNodes &V1AAInfo, const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo, const Value *UnderlyingV1, const Value *UnderlyingV2) { int64_t GEP1BaseOffset; bool GEP1MaxLookupReached; SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; // We have to get two AssumptionCaches here because GEP1 and V2 may be from // different functions. // FIXME: This really doesn't make any sense. We get a dominator tree below // that can only refer to a single function. But this function (aliasGEP) is // a method on an immutable pass that can be called when there *isn't* // a single function. The old pass management layer makes this "work", but // this isn't really a clean solution. AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>(); AssumptionCache *AC1 = nullptr, *AC2 = nullptr; if (auto *GEP1I = dyn_cast<Instruction>(GEP1)) AC1 = &ACT.getAssumptionCache( const_cast<Function &>(*GEP1I->getParent()->getParent())); if (auto *I2 = dyn_cast<Instruction>(V2)) AC2 = &ACT.getAssumptionCache( const_cast<Function &>(*I2->getParent()->getParent())); DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; // If we have two gep instructions with must-alias or not-alias'ing base // pointers, figure out if the indexes to the GEP tell us anything about the // derived pointer. if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { // Do the base pointers alias? AliasResult BaseAlias = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(), UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes()); // Check for geps of non-aliasing underlying pointers where the offsets are // identical. if ((BaseAlias == MayAlias) && V1Size == V2Size) { // Do the base pointers alias assuming type and size. AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo); if (PreciseBaseAlias == NoAlias) { // See if the computed offset from the common pointer tells us about the // relation of the resulting pointer. int64_t GEP2BaseOffset; bool GEP2MaxLookupReached; SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; const Value *GEP2BasePtr = DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, GEP2MaxLookupReached, *DL, AC2, DT); const Value *GEP1BasePtr = DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, GEP1MaxLookupReached, *DL, AC1, DT); // DecomposeGEPExpression and GetUnderlyingObject should return the // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { assert(!DL && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); return MayAlias; } // If the max search depth is reached the result is undefined if (GEP2MaxLookupReached || GEP1MaxLookupReached) return MayAlias; // Same offsets. if (GEP1BaseOffset == GEP2BaseOffset && GEP1VariableIndices == GEP2VariableIndices) return NoAlias; GEP1VariableIndices.clear(); } } // If we get a No or May, then return it immediately, no amount of analysis // will improve this situation. if (BaseAlias != MustAlias) return BaseAlias; // Otherwise, we have a MustAlias. Since the base pointers alias each other // exactly, see if the computed offset from the common pointer tells us // about the relation of the resulting pointer. const Value *GEP1BasePtr = DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, GEP1MaxLookupReached, *DL, AC1, DT); int64_t GEP2BaseOffset; bool GEP2MaxLookupReached; SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; const Value *GEP2BasePtr = DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, GEP2MaxLookupReached, *DL, AC2, DT); // DecomposeGEPExpression and GetUnderlyingObject should return the // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { assert(!DL && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); return MayAlias; } // If we know the two GEPs are based off of the exact same pointer (and not // just the same underlying object), see if that tells us anything about // the resulting pointers. if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) { AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL); // If we couldn't find anything interesting, don't abandon just yet. if (R != MayAlias) return R; } // If the max search depth is reached the result is undefined if (GEP2MaxLookupReached || GEP1MaxLookupReached) return MayAlias; // Subtract the GEP2 pointer from the GEP1 pointer to find out their // symbolic difference. GEP1BaseOffset -= GEP2BaseOffset; GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); } else { // Check to see if these two pointers are related by the getelementptr // instruction. If one pointer is a GEP with a non-zero index of the other // pointer, we know they cannot alias. // If both accesses are unknown size, we can't do anything useful here. if (V1Size == MemoryLocation::UnknownSize && V2Size == MemoryLocation::UnknownSize) return MayAlias; AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(), V2, V2Size, V2AAInfo); if (R != MustAlias) // If V2 may alias GEP base pointer, conservatively returns MayAlias. // If V2 is known not to alias GEP base pointer, then the two values // cannot alias per GEP semantics: "A pointer value formed from a // getelementptr instruction is associated with the addresses associated // with the first operand of the getelementptr". return R; const Value *GEP1BasePtr = DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, GEP1MaxLookupReached, *DL, AC1, DT); // DecomposeGEPExpression and GetUnderlyingObject should return the // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1) { assert(!DL && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); return MayAlias; } // If the max search depth is reached the result is undefined if (GEP1MaxLookupReached) return MayAlias; } // In the two GEP Case, if there is no difference in the offsets of the // computed pointers, the resultant pointers are a must alias. This // hapens when we have two lexically identical GEP's (for example). // // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 // must aliases the GEP, the end result is a must alias also. if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) return MustAlias; // If there is a constant difference between the pointers, but the difference // is less than the size of the associated memory object, then we know // that the objects are partially overlapping. If the difference is // greater, we know they do not overlap. if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { if (GEP1BaseOffset >= 0) { if (V2Size != MemoryLocation::UnknownSize) { if ((uint64_t)GEP1BaseOffset < V2Size) return PartialAlias; return NoAlias; } } else { // We have the situation where: // + + // | BaseOffset | // ---------------->| // |-->V1Size |-------> V2Size // GEP1 V2 // We need to know that V2Size is not unknown, otherwise we might have // stripped a gep with negative index ('gep <ptr>, -1, ...). if (V1Size != MemoryLocation::UnknownSize && V2Size != MemoryLocation::UnknownSize) { if (-(uint64_t)GEP1BaseOffset < V1Size) return PartialAlias; return NoAlias; } } } // Try to distinguish something like &A[i][1] against &A[42][0]. // Grab the least significant bit set in any of the scales. if (!GEP1VariableIndices.empty()) { uint64_t Modulo = 0; for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) Modulo |= (uint64_t) GEP1VariableIndices[i].Scale; Modulo = Modulo ^ (Modulo & (Modulo - 1)); // We can compute the difference between the two addresses // mod Modulo. Check whether that difference guarantees that the // two locations do not alias. uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); if (V1Size != MemoryLocation::UnknownSize && V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size && V1Size <= Modulo - ModOffset) return NoAlias; } // Statically, we can see that the base objects are the same, but the // pointers have dynamic offsets which we can't resolve. And none of our // little tricks above worked. // // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the // practical effect of this is protecting TBAA in the case of dynamic // indices into arrays of unions or malloc'd memory. return PartialAlias; } static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { // If the results agree, take it. if (A == B) return A; // A mix of PartialAlias and MustAlias is PartialAlias. if ((A == PartialAlias && B == MustAlias) || (B == PartialAlias && A == MustAlias)) return PartialAlias; // Otherwise, we don't know anything. return MayAlias; } /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select /// instruction against another. AliasResult BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize, const AAMDNodes &SIAAInfo, const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo) { // If the values are Selects with the same condition, we can do a more precise // check: just check for aliases between the values on corresponding arms. if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) if (SI->getCondition() == SI2->getCondition()) { AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(), V2Size, V2AAInfo); if (Alias == MayAlias) return MayAlias; AliasResult ThisAlias = aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, SI2->getFalseValue(), V2Size, V2AAInfo); return MergeAliasResults(ThisAlias, Alias); } // If both arms of the Select node NoAlias or MustAlias V2, then returns // NoAlias / MustAlias. Otherwise, returns MayAlias. AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo); if (Alias == MayAlias) return MayAlias; AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo); return MergeAliasResults(ThisAlias, Alias); } // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction // against another. AliasResult BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, const AAMDNodes &PNAAInfo, const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo) { // Track phi nodes we have visited. We use this information when we determine // value equivalence. VisitedPhiBBs.insert(PN->getParent()); // If the values are PHIs in the same block, we can do a more precise // as well as efficient check: just check for aliases between the values // on corresponding edges. if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) if (PN2->getParent() == PN->getParent()) { LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), MemoryLocation(V2, V2Size, V2AAInfo)); if (PN > V2) std::swap(Locs.first, Locs.second); // Analyse the PHIs' inputs under the assumption that the PHIs are // NoAlias. // If the PHIs are May/MustAlias there must be (recursively) an input // operand from outside the PHIs' cycle that is MayAlias/MustAlias or // there must be an operation on the PHIs within the PHIs' value cycle // that causes a MayAlias. // Pretend the phis do not alias. AliasResult Alias = NoAlias; assert(AliasCache.count(Locs) && "There must exist an entry for the phi node"); AliasResult OrigAliasResult = AliasCache[Locs]; AliasCache[Locs] = NoAlias; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { AliasResult ThisAlias = aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size, V2AAInfo); Alias = MergeAliasResults(ThisAlias, Alias); if (Alias == MayAlias) break; } // Reset if speculation failed. if (Alias != NoAlias) AliasCache[Locs] = OrigAliasResult; return Alias; } SmallPtrSet<Value*, 4> UniqueSrc; SmallVector<Value*, 4> V1Srcs; for (Value *PV1 : PN->incoming_values()) { if (isa<PHINode>(PV1)) // If any of the source itself is a PHI, return MayAlias conservatively // to avoid compile time explosion. The worst possible case is if both // sides are PHI nodes. In which case, this is O(m x n) time where 'm' // and 'n' are the number of PHI sources. return MayAlias; if (UniqueSrc.insert(PV1).second) V1Srcs.push_back(PV1); } AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, PNAAInfo); // Early exit if the check of the first PHI source against V2 is MayAlias. // Other results are not possible. if (Alias == MayAlias) return MayAlias; // If all sources of the PHI node NoAlias or MustAlias V2, then returns // NoAlias / MustAlias. Otherwise, returns MayAlias. for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { Value *V = V1Srcs[i]; AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo); Alias = MergeAliasResults(ThisAlias, Alias); if (Alias == MayAlias) break; } return Alias; } // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, // such as array references. // AliasResult BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AAInfo, const Value *V2, uint64_t V2Size, AAMDNodes V2AAInfo) { // If either of the memory references is empty, it doesn't matter what the // pointer values are. if (V1Size == 0 || V2Size == 0) return NoAlias; // Strip off any casts if they exist. V1 = V1->stripPointerCasts(); V2 = V2->stripPointerCasts(); // If V1 or V2 is undef, the result is NoAlias because we can always pick a // value for undef that aliases nothing in the program. if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) return NoAlias; // Are we checking for alias of the same value? // Because we look 'through' phi nodes we could look at "Value" pointers from // different iterations. We must therefore make sure that this is not the // case. The function isValueEqualInPotentialCycles ensures that this cannot // happen by looking at the visited phi nodes and making sure they cannot // reach the value. if (isValueEqualInPotentialCycles(V1, V2)) return MustAlias; if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) return NoAlias; // Scalars cannot alias each other // Figure out what objects these things are pointing to if we can. const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth); const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth); // Null values in the default address space don't point to any object, so they // don't alias any other pointer. if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) if (CPN->getType()->getAddressSpace() == 0) return NoAlias; if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) if (CPN->getType()->getAddressSpace() == 0) return NoAlias; if (O1 != O2) { // If V1/V2 point to two different objects we know that we have no alias. if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) return NoAlias; // Constant pointers can't alias with non-const isIdentifiedObject objects. if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) return NoAlias; // Function arguments can't alias with things that are known to be // unambigously identified at the function level. if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) return NoAlias; // Most objects can't alias null. if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) return NoAlias; // If one pointer is the result of a call/invoke or load and the other is a // non-escaping local object within the same function, then we know the // object couldn't escape to a point where the call could return it. // // Note that if the pointers are in different functions, there are a // variety of complications. A call with a nocapture argument may still // temporary store the nocapture argument's value in a temporary memory // location if that memory location doesn't escape. Or it may pass a // nocapture value to other functions as long as they don't capture it. if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) return NoAlias; if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) return NoAlias; } // If the size of one access is larger than the entire object on the other // side, then we know such behavior is undefined and can assume no alias. if (DL) if ((V1Size != MemoryLocation::UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) || (V2Size != MemoryLocation::UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI))) return NoAlias; // Check the cache before climbing up use-def chains. This also terminates // otherwise infinitely recursive queries. LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), MemoryLocation(V2, V2Size, V2AAInfo)); if (V1 > V2) std::swap(Locs.first, Locs.second); std::pair<AliasCacheTy::iterator, bool> Pair = AliasCache.insert(std::make_pair(Locs, MayAlias)); if (!Pair.second) return Pair.first->second; // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the // GEP can't simplify, we don't even look at the PHI cases. if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { std::swap(V1, V2); std::swap(V1Size, V2Size); std::swap(O1, O2); std::swap(V1AAInfo, V2AAInfo); } if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); if (Result != MayAlias) return AliasCache[Locs] = Result; } if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { std::swap(V1, V2); std::swap(V1Size, V2Size); std::swap(V1AAInfo, V2AAInfo); } if (const PHINode *PN = dyn_cast<PHINode>(V1)) { AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo); if (Result != MayAlias) return AliasCache[Locs] = Result; } if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { std::swap(V1, V2); std::swap(V1Size, V2Size); std::swap(V1AAInfo, V2AAInfo); } if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo); if (Result != MayAlias) return AliasCache[Locs] = Result; } // If both pointers are pointing into the same object and one of them // accesses is accessing the entire object, then the accesses must // overlap in some way. if (DL && O1 == O2) if ((V1Size != MemoryLocation::UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) || (V2Size != MemoryLocation::UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI))) return AliasCache[Locs] = PartialAlias; AliasResult Result = AliasAnalysis::alias(MemoryLocation(V1, V1Size, V1AAInfo), MemoryLocation(V2, V2Size, V2AAInfo)); return AliasCache[Locs] = Result; } bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V, const Value *V2) { if (V != V2) return false; const Instruction *Inst = dyn_cast<Instruction>(V); if (!Inst) return true; if (VisitedPhiBBs.empty()) return true; if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) return false; // Use dominance or loop info if available. DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; // Make sure that the visited phis cannot reach the Value. This ensures that // the Values cannot come from different iterations of a potential cycle the // phi nodes could be involved in. for (auto *P : VisitedPhiBBs) if (isPotentiallyReachable(P->begin(), Inst, DT, LI)) return false; return true; } /// GetIndexDifference - Dest and Src are the variable indices from two /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic /// difference between the two pointers. void BasicAliasAnalysis::GetIndexDifference( SmallVectorImpl<VariableGEPIndex> &Dest, const SmallVectorImpl<VariableGEPIndex> &Src) { if (Src.empty()) return; for (unsigned i = 0, e = Src.size(); i != e; ++i) { const Value *V = Src[i].V; ExtensionKind Extension = Src[i].Extension; int64_t Scale = Src[i].Scale; // Find V in Dest. This is N^2, but pointer indices almost never have more // than a few variable indexes. for (unsigned j = 0, e = Dest.size(); j != e; ++j) { if (!isValueEqualInPotentialCycles(Dest[j].V, V) || Dest[j].Extension != Extension) continue; // If we found it, subtract off Scale V's from the entry in Dest. If it // goes to zero, remove the entry. if (Dest[j].Scale != Scale) Dest[j].Scale -= Scale; else Dest.erase(Dest.begin() + j); Scale = 0; break; } // If we didn't consume this entry, add it to the end of the Dest list. if (Scale) { VariableGEPIndex Entry = { V, Extension, -Scale }; Dest.push_back(Entry); } } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/LibCallAliasAnalysis.cpp
//===- LibCallAliasAnalysis.cpp - Implement AliasAnalysis for libcalls ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the LibCallAliasAnalysis class. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/LibCallAliasAnalysis.h" #include "llvm/Analysis/LibCallSemantics.h" #include "llvm/Analysis/Passes.h" #include "llvm/IR/Function.h" #include "llvm/Pass.h" using namespace llvm; // Register this pass... char LibCallAliasAnalysis::ID = 0; INITIALIZE_AG_PASS(LibCallAliasAnalysis, AliasAnalysis, "libcall-aa", "LibCall Alias Analysis", false, true, false) FunctionPass *llvm::createLibCallAliasAnalysisPass(LibCallInfo *LCI) { return new LibCallAliasAnalysis(LCI); } LibCallAliasAnalysis::~LibCallAliasAnalysis() { delete LCI; } void LibCallAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AliasAnalysis::getAnalysisUsage(AU); AU.setPreservesAll(); // Does not transform code } bool LibCallAliasAnalysis::runOnFunction(Function &F) { // set up super class InitializeAliasAnalysis(this, &F.getParent()->getDataLayout()); return false; } /// AnalyzeLibCallDetails - Given a call to a function with the specified /// LibCallFunctionInfo, see if we can improve the mod/ref footprint of the call /// vs the specified pointer/size. AliasAnalysis::ModRefResult LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI, ImmutableCallSite CS, const MemoryLocation &Loc) { // If we have a function, check to see what kind of mod/ref effects it // has. Start by including any info globally known about the function. AliasAnalysis::ModRefResult MRInfo = FI->UniversalBehavior; if (MRInfo == NoModRef) return MRInfo; // If that didn't tell us that the function is 'readnone', check to see // if we have detailed info and if 'P' is any of the locations we know // about. const LibCallFunctionInfo::LocationMRInfo *Details = FI->LocationDetails; if (Details == nullptr) return MRInfo; // If the details array is of the 'DoesNot' kind, we only know something if // the pointer is a match for one of the locations in 'Details'. If we find a // match, we can prove some interactions cannot happen. // if (FI->DetailsType == LibCallFunctionInfo::DoesNot) { // Find out if the pointer refers to a known location. for (unsigned i = 0; Details[i].LocationID != ~0U; ++i) { const LibCallLocationInfo &LocInfo = LCI->getLocationInfo(Details[i].LocationID); LibCallLocationInfo::LocResult Res = LocInfo.isLocation(CS, Loc); if (Res != LibCallLocationInfo::Yes) continue; // If we find a match against a location that we 'do not' interact with, // learn this info into MRInfo. return ModRefResult(MRInfo & ~Details[i].MRInfo); } return MRInfo; } // If the details are of the 'DoesOnly' sort, we know something if the pointer // is a match for one of the locations in 'Details'. Also, if we can prove // that the pointers is *not* one of the locations in 'Details', we know that // the call is NoModRef. assert(FI->DetailsType == LibCallFunctionInfo::DoesOnly); // Find out if the pointer refers to a known location. bool NoneMatch = true; for (unsigned i = 0; Details[i].LocationID != ~0U; ++i) { const LibCallLocationInfo &LocInfo = LCI->getLocationInfo(Details[i].LocationID); LibCallLocationInfo::LocResult Res = LocInfo.isLocation(CS, Loc); if (Res == LibCallLocationInfo::No) continue; // If we don't know if this pointer points to the location, then we have to // assume it might alias in some case. if (Res == LibCallLocationInfo::Unknown) { NoneMatch = false; continue; } // If we know that this pointer definitely is pointing into the location, // merge in this information. return ModRefResult(MRInfo & Details[i].MRInfo); } // If we found that the pointer is guaranteed to not match any of the // locations in our 'DoesOnly' rule, then we know that the pointer must point // to some other location. Since the libcall doesn't mod/ref any other // locations, return NoModRef. if (NoneMatch) return NoModRef; // Otherwise, return any other info gained so far. return MRInfo; } // getModRefInfo - Check to see if the specified callsite can clobber the // specified memory object. // AliasAnalysis::ModRefResult LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { ModRefResult MRInfo = ModRef; // If this is a direct call to a function that LCI knows about, get the // information about the runtime function. if (LCI) { if (const Function *F = CS.getCalledFunction()) { if (const LibCallFunctionInfo *FI = LCI->getFunctionInfo(F)) { MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, Loc)); if (MRInfo == NoModRef) return NoModRef; } } } // The AliasAnalysis base class has some smarts, lets use them. return (ModRefResult)(MRInfo | AliasAnalysis::getModRefInfo(CS, Loc)); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/DxilValueCache.cpp
//===---------- DxilValueCache.cpp - Dxil Constant Value Cache ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Utility to compute and cache constant values for instructions. // #include "dxc/DXIL/DxilConstants.h" #include "dxc/Support/Global.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/DxilSimplify.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/ModuleSlotTracker.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Analysis/DxilValueCache.h" #include <unordered_map> #include <unordered_set> #define DEBUG_TYPE "dxil-value-cache" using namespace llvm; static bool IsConstantTrue(const Value *V) { if (const ConstantInt *C = dyn_cast<ConstantInt>(V)) return C->getLimitedValue() != 0; return false; } static bool IsConstantFalse(const Value *V) { if (const ConstantInt *C = dyn_cast<ConstantInt>(V)) return C->getLimitedValue() == 0; return false; } static bool IsEntryBlock(const BasicBlock *BB) { return BB == &BB->getParent()->getEntryBlock(); } void DxilValueCache::MarkUnreachable(BasicBlock *BB) { Map.Set(BB, ConstantInt::get(Type::getInt1Ty(BB->getContext()), 0)); } bool DxilValueCache::MayBranchTo(BasicBlock *A, BasicBlock *B) { TerminatorInst *Term = A->getTerminator(); if (BranchInst *Br = dyn_cast<BranchInst>(Term)) { if (Br->isUnconditional() && Br->getSuccessor(0) == B) return true; if (ConstantInt *C = dyn_cast<ConstantInt>(TryGetCachedValue(Br->getCondition()))) { unsigned SuccIndex = C->getLimitedValue() != 0 ? 0 : 1; return Br->getSuccessor(SuccIndex) == B; } } else if (SwitchInst *Sw = dyn_cast<SwitchInst>(Term)) { if (ConstantInt *C = dyn_cast<ConstantInt>(TryGetCachedValue(Sw->getCondition()))) { for (auto Case : Sw->cases()) { if (Case.getCaseValue() == C) return Case.getCaseSuccessor() == B; } return Sw->getDefaultDest() == B; } } else if (isa<ReturnInst>(Term) || isa<UnreachableInst>(Term)) { return false; } else { // Should not see: IndirectBrInst, InvokeInst, ResumeInst DXASSERT(false, "otherwise, unexpected terminator instruction."); } return true; } bool DxilValueCache::IsUnreachable_(BasicBlock *BB) { if (Value *V = Map.Get(BB)) if (IsConstantFalse(V)) return true; return false; } Value *DxilValueCache::ProcessAndSimplify_PHI(Instruction *I, DominatorTree *DT) { PHINode *PN = cast<PHINode>(I); BasicBlock *SoleIncoming = nullptr; bool Unreachable = true; Value *Simplified = nullptr; Value *SimplifiedNotDominating = nullptr; for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { BasicBlock *PredBB = PN->getIncomingBlock(i); if (IsUnreachable_(PredBB)) continue; Unreachable = false; if (MayBranchTo(PredBB, PN->getParent())) { if (SoleIncoming) { SoleIncoming = nullptr; break; } SoleIncoming = PredBB; } } if (Unreachable) { return UndefValue::get(I->getType()); } if (SoleIncoming) { Value *V = TryGetCachedValue(PN->getIncomingValueForBlock(SoleIncoming)); if (isa<Constant>(V)) Simplified = V; else if (Instruction *I = dyn_cast<Instruction>(V)) { // If this is an instruction, we have to make sure it // dominates this PHI. // There are several conditions that qualify: // 1. There's only one predecessor // 2. If the instruction is in the entry block, then it must dominate // 3. If we are provided with a Dominator tree, and it decides that // it dominates. if (PN->getNumIncomingValues() == 1 || IsEntryBlock(I->getParent()) || (DT && DT->dominates(I, PN))) { Simplified = I; } else { SimplifiedNotDominating = I; } } } // If we have a value but it's not dominating our PHI, see if it has a cached // value that were computed previously. if (!Simplified) { if (SimplifiedNotDominating) if (Value *CachedV = Map.Get(SimplifiedNotDominating)) Simplified = CachedV; } // If we coulnd't deduce it, run the LLVM stock simplification to see // if we could do anything. if (!Simplified) Simplified = llvm::SimplifyInstruction(I, I->getModule()->getDataLayout()); // One last step, to check if we have anything cached for whatever we // simplified to. if (Simplified) Simplified = TryGetCachedValue(Simplified); return Simplified; } Value *DxilValueCache::ProcessAndSimplify_Switch(Instruction *I, DominatorTree *DT) { SwitchInst *Sw = cast<SwitchInst>(I); BasicBlock *BB = Sw->getParent(); Value *Cond = TryGetCachedValue(Sw->getCondition()); if (IsUnreachable_(BB)) { for (unsigned i = 0; i < Sw->getNumSuccessors(); i++) { BasicBlock *Succ = Sw->getSuccessor(i); if (Succ->getUniquePredecessor()) MarkUnreachable(Succ); } } else if (isa<Constant>(Cond)) { BasicBlock *ConstDest = nullptr; for (auto Case : Sw->cases()) { BasicBlock *Succ = Case.getCaseSuccessor(); if (Case.getCaseValue() == Cond) { ConstDest = Succ; break; } } if (!ConstDest) { ConstDest = Sw->getDefaultDest(); } DXASSERT_NOMSG(ConstDest); if (ConstDest) { for (unsigned i = 0; i < Sw->getNumSuccessors(); i++) { BasicBlock *Succ = Sw->getSuccessor(i); if (Succ != ConstDest && Succ->getUniquePredecessor()) { MarkUnreachable(Succ); } } } } return nullptr; } Value *DxilValueCache::ProcessAndSimplify_Br(Instruction *I, DominatorTree *DT) { // The *only* reason we're paying special attention to the // branch inst, is to mark certain Basic Blocks as always // reachable or unreachable. BranchInst *Br = cast<BranchInst>(I); BasicBlock *BB = Br->getParent(); if (Br->isConditional()) { BasicBlock *TrueSucc = Br->getSuccessor(0); BasicBlock *FalseSucc = Br->getSuccessor(1); Value *Cond = TryGetCachedValue(Br->getCondition()); if (IsUnreachable_(BB)) { if (FalseSucc->getSinglePredecessor()) MarkUnreachable(FalseSucc); if (TrueSucc->getSinglePredecessor()) MarkUnreachable(TrueSucc); } else if (IsConstantTrue(Cond)) { if (FalseSucc->getSinglePredecessor()) MarkUnreachable(FalseSucc); } else if (IsConstantFalse(Cond)) { if (TrueSucc->getSinglePredecessor()) MarkUnreachable(TrueSucc); } } else { BasicBlock *Succ = Br->getSuccessor(0); if (Succ->getSinglePredecessor() && IsUnreachable_(BB)) MarkUnreachable(Succ); } return nullptr; } Value *DxilValueCache::ProcessAndSimplify_Load(Instruction *I, DominatorTree *DT) { LoadInst *LI = cast<LoadInst>(I); Value *V = TryGetCachedValue(LI->getPointerOperand()); if (Constant *ConstPtr = dyn_cast<Constant>(V)) { const DataLayout &DL = I->getModule()->getDataLayout(); return llvm::ConstantFoldLoadFromConstPtr(ConstPtr, DL); } return nullptr; } Value *DxilValueCache::SimplifyAndCacheResult(Instruction *I, DominatorTree *DT) { if (ShouldSkipCallback && ShouldSkipCallback(I)) return nullptr; const DataLayout &DL = I->getModule()->getDataLayout(); Value *Simplified = nullptr; if (Instruction::Br == I->getOpcode()) { Simplified = ProcessAndSimplify_Br(I, DT); } if (Instruction::Switch == I->getOpcode()) { Simplified = ProcessAndSimplify_Switch(I, DT); } else if (Instruction::PHI == I->getOpcode()) { Simplified = ProcessAndSimplify_PHI(I, DT); } else if (Instruction::Load == I->getOpcode()) { Simplified = ProcessAndSimplify_Load(I, DT); } else if (Instruction::GetElementPtr == I->getOpcode()) { SmallVector<Value *, 4> Ops; for (unsigned i = 0; i < I->getNumOperands(); i++) Ops.push_back(TryGetCachedValue(I->getOperand(i))); Simplified = llvm::SimplifyGEPInst(Ops, DL, nullptr, DT); } else if (Instruction::Call == I->getOpcode()) { Module *M = I->getModule(); CallInst *CI = cast<CallInst>(I); Value *Callee = CI->getCalledValue(); Function *CalledFunction = dyn_cast<Function>(Callee); if (CalledFunction && CalledFunction->getName() == hlsl::DXIL::kDxBreakFuncName) { llvm::Type *i1Ty = llvm::Type::getInt1Ty(M->getContext()); Simplified = llvm::ConstantInt::get(i1Ty, 1); } else { SmallVector<Value *, 16> Args; for (unsigned i = 0; i < CI->getNumArgOperands(); i++) { Args.push_back(TryGetCachedValue(CI->getArgOperand(i))); } if (CalledFunction && hlsl::CanSimplify(CalledFunction)) { Simplified = hlsl::SimplifyDxilCall(CalledFunction, Args, CI, /* MayInsert */ false); } else { Simplified = llvm::SimplifyCall(Callee, Args, DL, nullptr, DT); } } } // The rest of the checks use LLVM stock simplifications else if (I->isBinaryOp()) { if (FPMathOperator *FPOp = dyn_cast<FPMathOperator>(I)) { Simplified = llvm::SimplifyFPBinOp( I->getOpcode(), TryGetCachedValue(I->getOperand(0)), TryGetCachedValue(I->getOperand(1)), FPOp->getFastMathFlags(), DL); } else { Simplified = llvm::SimplifyBinOp(I->getOpcode(), TryGetCachedValue(I->getOperand(0)), TryGetCachedValue(I->getOperand(1)), DL); } } else if (GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(I)) { SmallVector<Value *, 4> Values; for (Value *V : Gep->operand_values()) { Values.push_back(TryGetCachedValue(V)); } Simplified = llvm::SimplifyGEPInst(Values, DL, nullptr, DT, nullptr, nullptr); } else if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { if (FPMathOperator *FPOp = dyn_cast<FPMathOperator>(I)) { Simplified = llvm::SimplifyFCmpInst( Cmp->getPredicate(), TryGetCachedValue(I->getOperand(0)), TryGetCachedValue(I->getOperand(1)), FPOp->getFastMathFlags(), DL); } else { Simplified = llvm::SimplifyCmpInst( Cmp->getPredicate(), TryGetCachedValue(I->getOperand(0)), TryGetCachedValue(I->getOperand(1)), DL); } } else if (SelectInst *Select = dyn_cast<SelectInst>(I)) { Simplified = llvm::SimplifySelectInst( TryGetCachedValue(Select->getCondition()), TryGetCachedValue(Select->getTrueValue()), TryGetCachedValue(Select->getFalseValue()), DL); } else if (ExtractElementInst *IE = dyn_cast<ExtractElementInst>(I)) { Simplified = llvm::SimplifyExtractElementInst( TryGetCachedValue(IE->getVectorOperand()), TryGetCachedValue(IE->getIndexOperand()), DL, nullptr, DT); } else if (CastInst *Cast = dyn_cast<CastInst>(I)) { Simplified = llvm::SimplifyCastInst(Cast->getOpcode(), TryGetCachedValue(Cast->getOperand(0)), Cast->getType(), DL); } if (Simplified && isa<Constant>(Simplified)) Map.Set(I, Simplified); return Simplified; } bool DxilValueCache::WeakValueMap::Seen(Value *V) { auto FindIt = Map.find(V); if (FindIt == Map.end()) return false; auto &Entry = FindIt->second; if (Entry.IsStale()) return false; return Entry.Value; } Value *DxilValueCache::WeakValueMap::Get(Value *V) { auto FindIt = Map.find(V); if (FindIt == Map.end()) return nullptr; auto &Entry = FindIt->second; if (Entry.IsStale()) return nullptr; Value *Result = Entry.Value; if (Result == GetSentinel(V->getContext())) return nullptr; return Result; } void DxilValueCache::WeakValueMap::SetSentinel(Value *Key) { Map[Key].Set(Key, GetSentinel(Key->getContext())); } Value *DxilValueCache::WeakValueMap::GetSentinel(LLVMContext &Ctx) { if (!Sentinel) { Sentinel.reset(PHINode::Create(Type::getInt1Ty(Ctx), 0)); } return Sentinel.get(); } void DxilValueCache::WeakValueMap::ResetAll() { Map.clear(); } void DxilValueCache::WeakValueMap::ResetUnknowns() { if (!Sentinel) return; for (auto it = Map.begin(); it != Map.end();) { auto nextIt = std::next(it); if (it->second.Value == Sentinel.get()) Map.erase(it); it = nextIt; } } LLVM_DUMP_METHOD void DxilValueCache::WeakValueMap::dump() const { std::unordered_map<const Module *, std::unique_ptr<ModuleSlotTracker>> MSTs; for (auto It = Map.begin(), E = Map.end(); It != E; It++) { const Value *Key = It->first; if (It->second.IsStale()) continue; if (!Key) continue; ModuleSlotTracker *MST = nullptr; { const Module *M = nullptr; if (auto I = dyn_cast<Instruction>(Key)) M = I->getModule(); else if (auto BB = dyn_cast<BasicBlock>(Key)) M = BB->getModule(); else { errs() << *Key; llvm_unreachable("How can a key be neither an instruction or BB?"); } std::unique_ptr<ModuleSlotTracker> &optMst = MSTs[M]; if (!optMst) { optMst = llvm::make_unique<ModuleSlotTracker>(M); } MST = optMst.get(); } const Value *V = It->second.Value; bool IsSentinel = Sentinel && V == Sentinel.get(); if (const BasicBlock *BB = dyn_cast<BasicBlock>(Key)) { dbgs() << "[BB]"; BB->printAsOperand(dbgs(), false, *MST); dbgs() << " -> "; if (IsSentinel) dbgs() << "NO_VALUE"; else { if (IsConstantTrue(V)) dbgs() << "Always Reachable!"; else if (IsConstantFalse(V)) dbgs() << "Never Reachable!"; } } else { dbgs() << *Key << " -> "; if (IsSentinel) dbgs() << "NO_VALUE"; else dbgs() << *V; } dbgs() << "\n"; } } void DxilValueCache::WeakValueMap::Set(Value *Key, Value *V) { Map[Key].Set(Key, V); } // If there's a cached value, return it. Otherwise, return // the value itself. Value *DxilValueCache::TryGetCachedValue(Value *V) { if (Value *Simplified = Map.Get(V)) return Simplified; return V; } DxilValueCache::DxilValueCache() : ImmutablePass(ID) { initializeDxilValueCachePass(*PassRegistry::getPassRegistry()); } StringRef DxilValueCache::getPassName() const { return "Dxil Value Cache"; } Value *DxilValueCache::GetValue(Value *V, DominatorTree *DT) { if (dyn_cast<Constant>(V)) return V; if (Value *NewV = Map.Get(V)) return NewV; return ProcessValue(V, DT); } Constant *DxilValueCache::GetConstValue(Value *V, DominatorTree *DT) { if (Value *NewV = GetValue(V)) return dyn_cast<Constant>(NewV); return nullptr; } ConstantInt *DxilValueCache::GetConstInt(Value *V, DominatorTree *DT) { if (Value *NewV = GetValue(V)) return dyn_cast<ConstantInt>(NewV); return nullptr; } bool DxilValueCache::IsUnreachable(BasicBlock *BB, DominatorTree *DT) { ProcessValue(BB, DT); return IsUnreachable_(BB); } LLVM_DUMP_METHOD void DxilValueCache::dump() const { Map.dump(); } void DxilValueCache::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); } Value *DxilValueCache::ProcessValue(Value *NewV, DominatorTree *DT) { if (NewV->getType()->isVoidTy()) return nullptr; Value *Result = nullptr; SmallVector<Value *, 16> WorkList; // Although we accept all values for convenience, we only process // Instructions. if (Instruction *I = dyn_cast<Instruction>(NewV)) { WorkList.push_back(I); } else if (BasicBlock *BB = dyn_cast<BasicBlock>(NewV)) { WorkList.push_back(BB->getTerminator()); WorkList.push_back(BB); } else { return nullptr; } // Unconditionally process this one instruction, whether we've seen // it or not. The simplification might be able to do something to // simplify it even when we don't have its value cached. // This is a basic DFS setup. while (WorkList.size()) { Value *V = WorkList.back(); // If we haven't seen this value, go in and push things it depends on // into the worklist. if (!Map.Seen(V)) { Map.SetSentinel(V); if (Instruction *I = dyn_cast<Instruction>(V)) { for (Use &U : I->operands()) { Instruction *UseI = dyn_cast<Instruction>(U.get()); if (!UseI) continue; if (!Map.Seen(UseI)) WorkList.push_back(UseI); } if (PHINode *PN = dyn_cast<PHINode>(I)) { for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { BasicBlock *BB = PN->getIncomingBlock(i); TerminatorInst *Term = BB->getTerminator(); if (!Map.Seen(Term)) WorkList.push_back(Term); if (!Map.Seen(BB)) WorkList.push_back(BB); } } } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) { for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; PI++) { BasicBlock *PredBB = *PI; TerminatorInst *Term = PredBB->getTerminator(); if (!Map.Seen(Term)) WorkList.push_back(Term); if (!Map.Seen(PredBB)) WorkList.push_back(PredBB); } } } // If we've seen this values, all its dependencies must have been processed // as well. else { WorkList.pop_back(); if (Instruction *I = dyn_cast<Instruction>(V)) { Value *SimplifiedValue = SimplifyAndCacheResult(I, DT); // Set the result if this is the input inst. // SimplifyInst may not have cached the value // so we return it directly. if (I == NewV) Result = SimplifiedValue; } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) { // Deduce the basic block's reachability based on // other analysis. if (!IsEntryBlock(BB)) { bool AllNeverReachable = true; for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; PI++) { if (!IsUnreachable_(*PI)) { AllNeverReachable = false; break; } } if (AllNeverReachable) MarkUnreachable(BB); } } } } return Result; } char DxilValueCache::ID; Pass *llvm::createDxilValueCachePass() { return new DxilValueCache(); } INITIALIZE_PASS(DxilValueCache, DEBUG_TYPE, "Dxil Value Cache", false, false)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/README.txt
Analysis Opportunities: //===---------------------------------------------------------------------===// In test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll, the ScalarEvolution expression for %r is this: {1,+,3,+,2}<loop> Outside the loop, this could be evaluated simply as (%n * %n), however ScalarEvolution currently evaluates it as (-2 + (2 * (trunc i65 (((zext i64 (-2 + %n) to i65) * (zext i64 (-1 + %n) to i65)) /u 2) to i64)) + (3 * %n)) In addition to being much more complicated, it involves i65 arithmetic, which is very inefficient when expanded into code. //===---------------------------------------------------------------------===// In formatValue in test/CodeGen/X86/lsr-delayed-fold.ll, ScalarEvolution is forming this expression: ((trunc i64 (-1 * %arg5) to i32) + (trunc i64 %arg5 to i32) + (-1 * (trunc i64 undef to i32))) This could be folded to (-1 * (trunc i64 undef to i32)) //===---------------------------------------------------------------------===//
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
//===- ScalarEvolutionAliasAnalysis.cpp - SCEV-based Alias Analysis -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ScalarEvolutionAliasAnalysis pass, which implements a // simple alias analysis implemented in terms of ScalarEvolution queries. // // This differs from traditional loop dependence analysis in that it tests // for dependencies within a single iteration of a loop, rather than // dependencies between different iterations. // // ScalarEvolution has a more complete understanding of pointer arithmetic // than BasicAliasAnalysis' collection of ad-hoc analyses. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" using namespace llvm; namespace { /// ScalarEvolutionAliasAnalysis - This is a simple alias analysis /// implementation that uses ScalarEvolution to answer queries. class ScalarEvolutionAliasAnalysis : public FunctionPass, public AliasAnalysis { ScalarEvolution *SE; public: static char ID; // Class identification, replacement for typeinfo ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(nullptr) { initializeScalarEvolutionAliasAnalysisPass( *PassRegistry::getPassRegistry()); } /// getAdjustedAnalysisPointer - This method is used when a pass implements /// an analysis interface through multiple inheritance. If needed, it /// should override this to adjust the this pointer as needed for the /// specified pass info. void *getAdjustedAnalysisPointer(AnalysisID PI) override { if (PI == &AliasAnalysis::ID) return (AliasAnalysis*)this; return this; } private: void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnFunction(Function &F) override; AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) override; Value *GetBaseValue(const SCEV *S); }; } // End of anonymous namespace // Register this pass... char ScalarEvolutionAliasAnalysis::ID = 0; INITIALIZE_AG_PASS_BEGIN(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa", "ScalarEvolution-based Alias Analysis", false, true, false) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_AG_PASS_END(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa", "ScalarEvolution-based Alias Analysis", false, true, false) FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() { return new ScalarEvolutionAliasAnalysis(); } void ScalarEvolutionAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequiredTransitive<ScalarEvolution>(); AU.setPreservesAll(); AliasAnalysis::getAnalysisUsage(AU); } bool ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) { InitializeAliasAnalysis(this, &F.getParent()->getDataLayout()); SE = &getAnalysis<ScalarEvolution>(); return false; } /// GetBaseValue - Given an expression, try to find a /// base value. Return null is none was found. Value * ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) { if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { // In an addrec, assume that the base will be in the start, rather // than the step. return GetBaseValue(AR->getStart()); } else if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { // If there's a pointer operand, it'll be sorted at the end of the list. const SCEV *Last = A->getOperand(A->getNumOperands()-1); if (Last->getType()->isPointerTy()) return GetBaseValue(Last); } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { // This is a leaf node. return U->getValue(); } // No Identified object found. return nullptr; } AliasResult ScalarEvolutionAliasAnalysis::alias(const MemoryLocation &LocA, const MemoryLocation &LocB) { // If either of the memory references is empty, it doesn't matter what the // pointer values are. This allows the code below to ignore this special // case. if (LocA.Size == 0 || LocB.Size == 0) return NoAlias; // This is ScalarEvolutionAliasAnalysis. Get the SCEVs! const SCEV *AS = SE->getSCEV(const_cast<Value *>(LocA.Ptr)); const SCEV *BS = SE->getSCEV(const_cast<Value *>(LocB.Ptr)); // If they evaluate to the same expression, it's a MustAlias. if (AS == BS) return MustAlias; // If something is known about the difference between the two addresses, // see if it's enough to prove a NoAlias. if (SE->getEffectiveSCEVType(AS->getType()) == SE->getEffectiveSCEVType(BS->getType())) { unsigned BitWidth = SE->getTypeSizeInBits(AS->getType()); APInt ASizeInt(BitWidth, LocA.Size); APInt BSizeInt(BitWidth, LocB.Size); // Compute the difference between the two pointers. const SCEV *BA = SE->getMinusSCEV(BS, AS); // Test whether the difference is known to be great enough that memory of // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt // are non-zero, which is special-cased above. if (ASizeInt.ule(SE->getUnsignedRange(BA).getUnsignedMin()) && (-BSizeInt).uge(SE->getUnsignedRange(BA).getUnsignedMax())) return NoAlias; // Folding the subtraction while preserving range information can be tricky // (because of INT_MIN, etc.); if the prior test failed, swap AS and BS // and try again to see if things fold better that way. // Compute the difference between the two pointers. const SCEV *AB = SE->getMinusSCEV(AS, BS); // Test whether the difference is known to be great enough that memory of // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt // are non-zero, which is special-cased above. if (BSizeInt.ule(SE->getUnsignedRange(AB).getUnsignedMin()) && (-ASizeInt).uge(SE->getUnsignedRange(AB).getUnsignedMax())) return NoAlias; } // If ScalarEvolution can find an underlying object, form a new query. // The correctness of this depends on ScalarEvolution not recognizing // inttoptr and ptrtoint operators. Value *AO = GetBaseValue(AS); Value *BO = GetBaseValue(BS); if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr)) if (alias(MemoryLocation(AO ? AO : LocA.Ptr, AO ? +MemoryLocation::UnknownSize : LocA.Size, AO ? AAMDNodes() : LocA.AATags), MemoryLocation(BO ? BO : LocB.Ptr, BO ? +MemoryLocation::UnknownSize : LocB.Size, BO ? AAMDNodes() : LocB.AATags)) == NoAlias) return NoAlias; // Forward the query to the next analysis. return AliasAnalysis::alias(LocA, LocB); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/regioninfo.cpp
//===- RegionInfo.cpp - SESE region detection analysis --------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Detects single entry single exit regions in the control flow graph. //===----------------------------------------------------------------------===// #include "llvm/Analysis/RegionInfo.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/RegionInfoImpl.h" #include "llvm/Analysis/RegionIterator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <iterator> #include <set> using namespace llvm; #define DEBUG_TYPE "region" namespace llvm { template class RegionBase<RegionTraits<Function>>; template class RegionNodeBase<RegionTraits<Function>>; template class RegionInfoBase<RegionTraits<Function>>; } // namespace llvm STATISTIC(numRegions, "The # of regions"); STATISTIC(numSimpleRegions, "The # of simple regions"); // Always verify if expensive checking is enabled. #if 0 // HLSL Change Starts - option pending static cl::opt<bool,true> VerifyRegionInfoX( "verify-region-info", cl::location(RegionInfoBase<RegionTraits<Function>>::VerifyRegionInfo), cl::desc("Verify region info (time consuming)")); static cl::opt<Region::PrintStyle, true> printStyleX("print-region-style", cl::location(RegionInfo::printStyle), cl::Hidden, cl::desc("style of printing regions"), cl::values( clEnumValN(Region::PrintNone, "none", "print no details"), clEnumValN(Region::PrintBB, "bb", "print regions in detail with block_iterator"), clEnumValN(Region::PrintRN, "rn", "print regions in detail with element_iterator"), clEnumValEnd)); #else #endif // HLSL Change Ends //===----------------------------------------------------------------------===// // Region implementation // Region::Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo *RI, DominatorTree *DT, Region *Parent) : RegionBase<RegionTraits<Function>>(Entry, Exit, RI, DT, Parent) {} Region::~Region() {} //===----------------------------------------------------------------------===// // RegionInfo implementation // RegionInfo::RegionInfo() : RegionInfoBase<RegionTraits<Function>>() {} RegionInfo::~RegionInfo() {} void RegionInfo::updateStatistics(Region *R) { ++numRegions; // TODO: Slow. Should only be enabled if -stats is used. if (R->isSimple()) ++numSimpleRegions; } void RegionInfo::recalculate(Function &F, DominatorTree *DT_, PostDominatorTree *PDT_, DominanceFrontier *DF_) { DT = DT_; PDT = PDT_; DF = DF_; TopLevelRegion = new Region(&F.getEntryBlock(), nullptr, this, DT, nullptr); updateStatistics(TopLevelRegion); calculate(F); } //===----------------------------------------------------------------------===// // RegionInfoPass implementation // RegionInfoPass::RegionInfoPass() : FunctionPass(ID) { initializeRegionInfoPassPass(*PassRegistry::getPassRegistry()); } RegionInfoPass::~RegionInfoPass() {} bool RegionInfoPass::runOnFunction(Function &F) { releaseMemory(); auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); auto PDT = &getAnalysis<PostDominatorTree>(); auto DF = &getAnalysis<DominanceFrontier>(); RI.recalculate(F, DT, PDT, DF); return false; } void RegionInfoPass::releaseMemory() { RI.releaseMemory(); } void RegionInfoPass::verifyAnalysis() const { RI.verifyAnalysis(); } void RegionInfoPass::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequiredTransitive<DominatorTreeWrapperPass>(); AU.addRequired<PostDominatorTree>(); AU.addRequired<DominanceFrontier>(); } void RegionInfoPass::print(raw_ostream &OS, const Module *) const { RI.print(OS); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void RegionInfoPass::dump() const { RI.dump(); } #endif char RegionInfoPass::ID = 0; INITIALIZE_PASS_BEGIN(RegionInfoPass, "regions", "Detect single entry single exit regions", true, true) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(PostDominatorTree) INITIALIZE_PASS_DEPENDENCY(DominanceFrontier) INITIALIZE_PASS_END(RegionInfoPass, "regions", "Detect single entry single exit regions", true, true) // Create methods available outside of this file, to use them // "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by // the link time optimization. namespace llvm { FunctionPass *createRegionInfoPass() { return new RegionInfoPass(); } } // namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/DxilConstantFolding.cpp
//===-- DxilConstantFolding.cpp - Fold dxil intrinsics into constants -----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // // Copyright (C) Microsoft Corporation. All rights reserved. // //===----------------------------------------------------------------------===// // // //===----------------------------------------------------------------------===// #include "llvm/Analysis/DxilConstantFolding.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Config/config.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Operator.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include <algorithm> #include <cerrno> #include <cmath> #include <functional> #include "dxc/DXIL/DXIL.h" #include "dxc/HLSL/DxilConvergentName.h" using namespace llvm; using namespace hlsl; namespace { bool IsConvergentMarker(const Function *F) { return F->getName().startswith(kConvergentFunctionPrefix); } bool IsConvergentMarker(const char *Name) { StringRef RName = Name; return RName.startswith(kConvergentFunctionPrefix); } } // namespace // Check if the given function is a dxil intrinsic and if so extract the // opcode for the instrinsic being called. static bool GetDxilOpcode(StringRef Name, ArrayRef<Constant *> Operands, OP::OpCode &out) { if (!OP::IsDxilOpFuncName(Name)) return false; if (!Operands.size()) return false; if (ConstantInt *ci = dyn_cast<ConstantInt>(Operands[0])) { uint64_t opcode = ci->getLimitedValue(); if (opcode < static_cast<uint64_t>(OP::OpCode::NumOpCodes)) { out = static_cast<OP::OpCode>(opcode); return true; } } return false; } // Typedefs for passing function pointers to evaluate float constants. typedef double(__cdecl *NativeFPUnaryOp)(double); typedef std::function<APFloat::opStatus(APFloat &)> APFloatUnaryOp; /// Currently APFloat versions of these functions do not exist, so we use /// the host native double versions. Float versions are not called /// directly but for all these it is true (float)(f((double)arg)) == /// f(arg). Long double not supported yet. /// /// Calls out to the llvm constant folding function to do the real work. static Constant *DxilConstantFoldFP(NativeFPUnaryOp NativeFP, ConstantFP *C, Type *Ty) { double V = llvm::getValueAsDouble(C); return llvm::ConstantFoldFP(NativeFP, V, Ty); } // Constant fold using the provided function on APFloats. static Constant *HLSLConstantFoldAPFloat(APFloatUnaryOp NativeFP, ConstantFP *C, Type *Ty) { APFloat APF = C->getValueAPF(); if (NativeFP(APF) != APFloat::opStatus::opOK) return nullptr; return ConstantFP::get(Ty->getContext(), APF); } // Constant fold a round dxil intrinsic. static Constant *HLSLConstantFoldRound(APFloat::roundingMode roundingMode, ConstantFP *C, Type *Ty) { APFloatUnaryOp f = [roundingMode](APFloat &x) { return x.roundToIntegral(roundingMode); }; return HLSLConstantFoldAPFloat(f, C, Ty); } namespace { // Wrapper for call operands that "shifts past" the hlsl intrinsic opcode. // Also provides accessors that dyn_cast the operand to a constant type. class DxilIntrinsicOperands { public: DxilIntrinsicOperands(ArrayRef<Constant *> RawCallOperands) : m_RawCallOperands(RawCallOperands) {} Constant *const &operator[](size_t index) const { return m_RawCallOperands[index + 1]; } ConstantInt *GetConstantInt(size_t index) const { return dyn_cast<ConstantInt>(this->operator[](index)); } ConstantFP *GetConstantFloat(size_t index) const { return dyn_cast<ConstantFP>(this->operator[](index)); } size_t Size() const { return m_RawCallOperands.size() - 1; } private: ArrayRef<Constant *> m_RawCallOperands; }; } // namespace /// We only fold functions with finite arguments. Folding NaN and inf is /// likely to be aborted with an exception anyway, and some host libms /// have known errors raising exceptions. static bool IsFinite(ConstantFP *C) { if (C->getValueAPF().isNaN() || C->getValueAPF().isInfinity()) return false; return true; } // Check that the op is non-null and finite. static bool IsValidOp(ConstantFP *C) { if (!C || !IsFinite(C)) return false; return true; } // Check that all ops are valid. static bool AllValidOps(ArrayRef<ConstantFP *> Ops) { return std::all_of(Ops.begin(), Ops.end(), IsValidOp); } // Constant fold unary floating point intrinsics. static Constant *ConstantFoldUnaryFPIntrinsic(OP::OpCode opcode, Type *Ty, ConstantFP *Op) { switch (opcode) { default: break; case OP::OpCode::FAbs: return DxilConstantFoldFP(fabs, Op, Ty); case OP::OpCode::Saturate: { NativeFPUnaryOp f = [](double x) { return std::max(std::min(x, 1.0), 0.0); }; return DxilConstantFoldFP(f, Op, Ty); } case OP::OpCode::Cos: return DxilConstantFoldFP(cos, Op, Ty); case OP::OpCode::Sin: return DxilConstantFoldFP(sin, Op, Ty); case OP::OpCode::Tan: return DxilConstantFoldFP(tan, Op, Ty); case OP::OpCode::Acos: return DxilConstantFoldFP(acos, Op, Ty); case OP::OpCode::Asin: return DxilConstantFoldFP(asin, Op, Ty); case OP::OpCode::Atan: return DxilConstantFoldFP(atan, Op, Ty); case OP::OpCode::Hcos: return DxilConstantFoldFP(cosh, Op, Ty); case OP::OpCode::Hsin: return DxilConstantFoldFP(sinh, Op, Ty); case OP::OpCode::Htan: return DxilConstantFoldFP(tanh, Op, Ty); case OP::OpCode::Exp: return DxilConstantFoldFP(exp2, Op, Ty); case OP::OpCode::Frc: { NativeFPUnaryOp f = [](double x) { double unused; return fabs(modf(x, &unused)); }; return DxilConstantFoldFP(f, Op, Ty); } case OP::OpCode::Log: return DxilConstantFoldFP(log2, Op, Ty); case OP::OpCode::Sqrt: return DxilConstantFoldFP(sqrt, Op, Ty); case OP::OpCode::Rsqrt: { NativeFPUnaryOp f = [](double x) { return 1.0 / sqrt(x); }; return DxilConstantFoldFP(f, Op, Ty); } case OP::OpCode::Round_ne: return HLSLConstantFoldRound(APFloat::roundingMode::rmNearestTiesToEven, Op, Ty); case OP::OpCode::Round_ni: return HLSLConstantFoldRound(APFloat::roundingMode::rmTowardNegative, Op, Ty); case OP::OpCode::Round_pi: return HLSLConstantFoldRound(APFloat::roundingMode::rmTowardPositive, Op, Ty); case OP::OpCode::Round_z: return HLSLConstantFoldRound(APFloat::roundingMode::rmTowardZero, Op, Ty); } return nullptr; } // Constant fold binary floating point intrinsics. static Constant *ConstantFoldBinaryFPIntrinsic(OP::OpCode opcode, Type *Ty, ConstantFP *Op1, ConstantFP *Op2) { const APFloat &C1 = Op1->getValueAPF(); const APFloat &C2 = Op2->getValueAPF(); switch (opcode) { default: break; case OP::OpCode::FMax: return ConstantFP::get(Ty->getContext(), maxnum(C1, C2)); case OP::OpCode::FMin: return ConstantFP::get(Ty->getContext(), minnum(C1, C2)); } return nullptr; } // Constant fold ternary floating point intrinsics. static Constant *ConstantFoldTernaryFPIntrinsic(OP::OpCode opcode, Type *Ty, ConstantFP *Op1, ConstantFP *Op2, ConstantFP *Op3) { const APFloat &C1 = Op1->getValueAPF(); const APFloat &C2 = Op2->getValueAPF(); const APFloat &C3 = Op3->getValueAPF(); APFloat::roundingMode roundingMode = APFloat::rmNearestTiesToEven; switch (opcode) { default: break; case OP::OpCode::FMad: { APFloat result(C1); result.multiply(C2, roundingMode); result.add(C3, roundingMode); return ConstantFP::get(Ty->getContext(), result); } case OP::OpCode::Fma: { APFloat result(C1); result.fusedMultiplyAdd(C2, C3, roundingMode); return ConstantFP::get(Ty->getContext(), result); } } return nullptr; } // Compute dot product for arbitrary sized vectors. static Constant *ComputeDot(Type *Ty, ArrayRef<ConstantFP *> A, ArrayRef<ConstantFP *> B) { if (A.size() != B.size() || !A.size()) { assert(false && "invalid call to compute dot"); return nullptr; } if (!AllValidOps(A) || !AllValidOps(B)) return nullptr; APFloat::roundingMode roundingMode = APFloat::roundingMode::rmNearestTiesToEven; APFloat sum = APFloat::getZero(A[0]->getValueAPF().getSemantics()); for (int i = 0, e = A.size(); i != e; ++i) { APFloat val(A[i]->getValueAPF()); val.multiply(B[i]->getValueAPF(), roundingMode); sum.add(val, roundingMode); } return ConstantFP::get(Ty->getContext(), sum); } // Constant folding for dot2, dot3, and dot4. static Constant *ConstantFoldDot(OP::OpCode opcode, Type *Ty, const DxilIntrinsicOperands &operands) { switch (opcode) { default: break; case OP::OpCode::Dot2: { ConstantFP *Ax = operands.GetConstantFloat(0); ConstantFP *Ay = operands.GetConstantFloat(1); ConstantFP *Bx = operands.GetConstantFloat(2); ConstantFP *By = operands.GetConstantFloat(3); return ComputeDot(Ty, {Ax, Ay}, {Bx, By}); } case OP::OpCode::Dot3: { ConstantFP *Ax = operands.GetConstantFloat(0); ConstantFP *Ay = operands.GetConstantFloat(1); ConstantFP *Az = operands.GetConstantFloat(2); ConstantFP *Bx = operands.GetConstantFloat(3); ConstantFP *By = operands.GetConstantFloat(4); ConstantFP *Bz = operands.GetConstantFloat(5); return ComputeDot(Ty, {Ax, Ay, Az}, {Bx, By, Bz}); } case OP::OpCode::Dot4: { ConstantFP *Ax = operands.GetConstantFloat(0); ConstantFP *Ay = operands.GetConstantFloat(1); ConstantFP *Az = operands.GetConstantFloat(2); ConstantFP *Aw = operands.GetConstantFloat(3); ConstantFP *Bx = operands.GetConstantFloat(4); ConstantFP *By = operands.GetConstantFloat(5); ConstantFP *Bz = operands.GetConstantFloat(6); ConstantFP *Bw = operands.GetConstantFloat(7); return ComputeDot(Ty, {Ax, Ay, Az, Aw}, {Bx, By, Bz, Bw}); } } return nullptr; } // Constant fold a Bfrev dxil intrinsic. static Constant *HLSLConstantFoldBfrev(ConstantInt *C, Type *Ty) { APInt API = C->getValue(); uint64_t result = 0; if (Ty == Type::getInt32Ty(Ty->getContext())) { uint32_t val = static_cast<uint32_t>(API.getLimitedValue()); result = llvm::reverseBits(val); } else if (Ty == Type::getInt16Ty(Ty->getContext())) { uint16_t val = static_cast<uint16_t>(API.getLimitedValue()); result = llvm::reverseBits(val); } else if (Ty == Type::getInt64Ty(Ty->getContext())) { uint64_t val = static_cast<uint64_t>(API.getLimitedValue()); result = llvm::reverseBits(val); } else { return nullptr; } return ConstantInt::get(Ty, result); } // Handle special case for findfirst* bit functions. // When the position is equal to the bitwidth the value was not found // and we need to return a result of -1. static Constant *HLSLConstantFoldFindBit(Type *Ty, unsigned position, unsigned bitwidth) { if (position == bitwidth) return ConstantInt::get(Ty, APInt::getAllOnesValue(Ty->getScalarSizeInBits())); return ConstantInt::get(Ty, position); } // Constant fold unary integer intrinsics. static Constant *ConstantFoldUnaryIntIntrinsic(OP::OpCode opcode, Type *Ty, ConstantInt *Op) { APInt API = Op->getValue(); switch (opcode) { default: break; case OP::OpCode::Bfrev: return HLSLConstantFoldBfrev(Op, Ty); case OP::OpCode::Countbits: return ConstantInt::get(Ty, API.countPopulation()); case OP::OpCode::FirstbitLo: return HLSLConstantFoldFindBit(Ty, API.countTrailingZeros(), API.getBitWidth()); case OP::OpCode::FirstbitHi: return HLSLConstantFoldFindBit(Ty, API.countLeadingZeros(), API.getBitWidth()); case OP::OpCode::FirstbitSHi: { if (API.isNegative()) return HLSLConstantFoldFindBit(Ty, API.countLeadingOnes(), API.getBitWidth()); else return HLSLConstantFoldFindBit(Ty, API.countLeadingZeros(), API.getBitWidth()); } } return nullptr; } // Constant fold binary integer intrinsics. static Constant *ConstantFoldBinaryIntIntrinsic(OP::OpCode opcode, Type *Ty, ConstantInt *Op1, ConstantInt *Op2) { APInt C1 = Op1->getValue(); APInt C2 = Op2->getValue(); switch (opcode) { default: break; case OP::OpCode::IMin: { APInt minVal = C1.slt(C2) ? C1 : C2; return ConstantInt::get(Ty, minVal); } case OP::OpCode::IMax: { APInt maxVal = C1.sgt(C2) ? C1 : C2; return ConstantInt::get(Ty, maxVal); } case OP::OpCode::UMin: { APInt minVal = C1.ult(C2) ? C1 : C2; return ConstantInt::get(Ty, minVal); } case OP::OpCode::UMax: { APInt maxVal = C1.ugt(C2) ? C1 : C2; return ConstantInt::get(Ty, maxVal); } } return nullptr; } // Constant fold MakeDouble static Constant * ConstantFoldMakeDouble(Type *Ty, const DxilIntrinsicOperands &IntrinsicOperands) { assert(IntrinsicOperands.Size() == 2); ConstantInt *Op1 = IntrinsicOperands.GetConstantInt(0); ConstantInt *Op2 = IntrinsicOperands.GetConstantInt(1); if (!Op1 || !Op2) return nullptr; uint64_t C1 = Op1->getZExtValue(); uint64_t C2 = Op2->getZExtValue(); uint64_t dbits = C2 << 32 | C1; double dval = *(double *)&dbits; return ConstantFP::get(Ty, dval); } // Compute bit field extract for ibfe and ubfe. // The comptuation for ibfe and ubfe is the same except for the right shift, // which is an arithemetic shift for ibfe and logical shift for ubfe. // ubfe: // https://msdn.microsoft.com/en-us/library/windows/desktop/hh447243(v=vs.85).aspx // ibfe: // https://msdn.microsoft.com/en-us/library/windows/desktop/hh447243(v=vs.85).aspx static Constant *ComputeBFE(Type *Ty, APInt width, APInt offset, APInt val, std::function<APInt(APInt, APInt)> shr) { const APInt bitwidth(width.getBitWidth(), width.getBitWidth()); // Limit width and offset to the bitwidth of the value. width = width.And(bitwidth - 1); offset = offset.And(bitwidth - 1); if (width == 0) { return ConstantInt::get(Ty, 0); } else if ((width + offset).ult(bitwidth)) { APInt dest = val.shl(bitwidth - (width + offset)); dest = shr(dest, bitwidth - width); return ConstantInt::get(Ty, dest); } else { APInt dest = shr(val, offset); return ConstantInt::get(Ty, dest); } } // Constant fold ternary integer intrinsic. static Constant *ConstantFoldTernaryIntIntrinsic(OP::OpCode opcode, Type *Ty, ConstantInt *Op1, ConstantInt *Op2, ConstantInt *Op3) { APInt C1 = Op1->getValue(); APInt C2 = Op2->getValue(); APInt C3 = Op3->getValue(); switch (opcode) { default: break; case OP::OpCode::IMad: case OP::OpCode::UMad: { // Result is same for signed/unsigned since this is twos complement and we // only keep the lower half of the multiply. APInt result = C1 * C2 + C3; return ConstantInt::get(Ty, result); } case OP::OpCode::Ubfe: return ComputeBFE(Ty, C1, C2, C3, [](APInt val, APInt amt) { return val.lshr(amt); }); case OP::OpCode::Ibfe: return ComputeBFE(Ty, C1, C2, C3, [](APInt val, APInt amt) { return val.ashr(amt); }); } return nullptr; } // Constant fold quaternary integer intrinsic. // // Currently we only have one quaternary intrinsic: Bfi. // The Bfi computaion is described here: // https://msdn.microsoft.com/en-us/library/windows/desktop/hh446837(v=vs.85).aspx static Constant *ConstantFoldQuaternaryIntInstrinsic(OP::OpCode opcode, Type *Ty, ConstantInt *Op1, ConstantInt *Op2, ConstantInt *Op3, ConstantInt *Op4) { if (opcode != OP::OpCode::Bfi) return nullptr; APInt bitwidth(Op1->getValue().getBitWidth(), Op1->getValue().getBitWidth()); APInt width = Op1->getValue().And(bitwidth - 1); APInt offset = Op2->getValue().And(bitwidth - 1); APInt src = Op3->getValue(); APInt dst = Op4->getValue(); APInt one(bitwidth.getBitWidth(), 1); APInt allOnes = APInt::getAllOnesValue(bitwidth.getBitWidth()); // bitmask = (((1 << width)-1) << offset) & 0xffffffff // dest = ((src2 << offset) & bitmask) | (src3 & ~bitmask) APInt bitmask = (one.shl(width) - 1).shl(offset).And(allOnes); APInt result = (src.shl(offset).And(bitmask)).Or(dst.And(~bitmask)); return ConstantInt::get(Ty, result); } // Top level function to constant fold floating point intrinsics. static Constant * ConstantFoldFPIntrinsic(OP::OpCode opcode, Type *Ty, const DxilIntrinsicOperands &IntrinsicOperands) { if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) return nullptr; OP::OpCodeClass opClass = OP::GetOpCodeClass(opcode); switch (opClass) { default: break; case OP::OpCodeClass::Unary: { assert(IntrinsicOperands.Size() == 1); ConstantFP *Op = IntrinsicOperands.GetConstantFloat(0); if (!IsValidOp(Op)) return nullptr; return ConstantFoldUnaryFPIntrinsic(opcode, Ty, Op); } case OP::OpCodeClass::Binary: { assert(IntrinsicOperands.Size() == 2); ConstantFP *Op1 = IntrinsicOperands.GetConstantFloat(0); ConstantFP *Op2 = IntrinsicOperands.GetConstantFloat(1); if (!IsValidOp(Op1) || !IsValidOp(Op2)) return nullptr; return ConstantFoldBinaryFPIntrinsic(opcode, Ty, Op1, Op2); } case OP::OpCodeClass::Tertiary: { assert(IntrinsicOperands.Size() == 3); ConstantFP *Op1 = IntrinsicOperands.GetConstantFloat(0); ConstantFP *Op2 = IntrinsicOperands.GetConstantFloat(1); ConstantFP *Op3 = IntrinsicOperands.GetConstantFloat(2); if (!IsValidOp(Op1) || !IsValidOp(Op2) || !IsValidOp(Op3)) return nullptr; return ConstantFoldTernaryFPIntrinsic(opcode, Ty, Op1, Op2, Op3); } case OP::OpCodeClass::Dot2: case OP::OpCodeClass::Dot3: case OP::OpCodeClass::Dot4: return ConstantFoldDot(opcode, Ty, IntrinsicOperands); case OP::OpCodeClass::MakeDouble: return ConstantFoldMakeDouble(Ty, IntrinsicOperands); } return nullptr; } // Top level function to constant fold integer intrinsics. static Constant * ConstantFoldIntIntrinsic(OP::OpCode opcode, Type *Ty, const DxilIntrinsicOperands &IntrinsicOperands) { if (Ty->getScalarSizeInBits() > (sizeof(int64_t) * CHAR_BIT)) return nullptr; OP::OpCodeClass opClass = OP::GetOpCodeClass(opcode); switch (opClass) { default: break; case OP::OpCodeClass::Unary: case OP::OpCodeClass::UnaryBits: { assert(IntrinsicOperands.Size() == 1); ConstantInt *Op = IntrinsicOperands.GetConstantInt(0); if (!Op) return nullptr; return ConstantFoldUnaryIntIntrinsic(opcode, Ty, Op); } case OP::OpCodeClass::Binary: { assert(IntrinsicOperands.Size() == 2); ConstantInt *Op1 = IntrinsicOperands.GetConstantInt(0); ConstantInt *Op2 = IntrinsicOperands.GetConstantInt(1); if (!Op1 || !Op2) return nullptr; return ConstantFoldBinaryIntIntrinsic(opcode, Ty, Op1, Op2); } case OP::OpCodeClass::Tertiary: { assert(IntrinsicOperands.Size() == 3); ConstantInt *Op1 = IntrinsicOperands.GetConstantInt(0); ConstantInt *Op2 = IntrinsicOperands.GetConstantInt(1); ConstantInt *Op3 = IntrinsicOperands.GetConstantInt(2); if (!Op1 || !Op2 || !Op3) return nullptr; return ConstantFoldTernaryIntIntrinsic(opcode, Ty, Op1, Op2, Op3); } case OP::OpCodeClass::Quaternary: { assert(IntrinsicOperands.Size() == 4); ConstantInt *Op1 = IntrinsicOperands.GetConstantInt(0); ConstantInt *Op2 = IntrinsicOperands.GetConstantInt(1); ConstantInt *Op3 = IntrinsicOperands.GetConstantInt(2); ConstantInt *Op4 = IntrinsicOperands.GetConstantInt(3); if (!Op1 || !Op2 || !Op3 || !Op4) return nullptr; return ConstantFoldQuaternaryIntInstrinsic(opcode, Ty, Op1, Op2, Op3, Op4); } case OP::OpCodeClass::IsHelperLane: return ConstantInt::get(Ty, (uint64_t)0); } return nullptr; } // External entry point to constant fold dxil intrinsics. // Called from the llvm constant folding routine. Constant *hlsl::ConstantFoldScalarCall(StringRef Name, Type *Ty, ArrayRef<Constant *> RawOperands) { OP::OpCode opcode; if (GetDxilOpcode(Name, RawOperands, opcode)) { DxilIntrinsicOperands IntrinsicOperands(RawOperands); if (Ty->isFloatingPointTy()) { return ConstantFoldFPIntrinsic(opcode, Ty, IntrinsicOperands); } else if (Ty->isIntegerTy()) { return ConstantFoldIntIntrinsic(opcode, Ty, IntrinsicOperands); } } else if (IsConvergentMarker(Name.data())) { assert(RawOperands.size() == 1); if (ConstantInt *C = dyn_cast<ConstantInt>(RawOperands[0])) return C; if (ConstantFP *C = dyn_cast<ConstantFP>(RawOperands[0])) return C; } return hlsl::ConstantFoldScalarCallExt(Name, Ty, RawOperands); } // External entry point to determine if we can constant fold calls to // the given function. We have to overestimate the set of functions because // we only have the function value here instead of the call. We need the // actual call to get the opcode for the intrinsic. bool hlsl::CanConstantFoldCallTo(const Function *F) { // Only constant fold dxil functions when we have a valid dxil module. if (!F->getParent()->HasDxilModule()) { assert(!OP::IsDxilOpFunc(F) && "dx.op function with no dxil module?"); return false; } if (IsConvergentMarker(F)) return true; // Lookup opcode class in dxil module. Set default value to invalid class. OP::OpCodeClass opClass = OP::OpCodeClass::NumOpClasses; const bool found = F->getParent()->GetDxilModule().GetOP()->GetOpCodeClass(F, opClass); // Return true for those dxil operation classes we can constant fold. if (found) { switch (opClass) { default: break; case OP::OpCodeClass::Unary: case OP::OpCodeClass::UnaryBits: case OP::OpCodeClass::Binary: case OP::OpCodeClass::Tertiary: case OP::OpCodeClass::Quaternary: case OP::OpCodeClass::Dot2: case OP::OpCodeClass::Dot3: case OP::OpCodeClass::Dot4: case OP::OpCodeClass::MakeDouble: return true; case OP::OpCodeClass::IsHelperLane: { const hlsl::ShaderModel *pSM = F->getParent()->GetDxilModule().GetShaderModel(); return !pSM->IsPS() && !pSM->IsLib(); } } } return hlsl::CanConstantFoldCallToExt(F); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/CaptureTracking.cpp
//===--- CaptureTracking.cpp - Determine whether a pointer is captured ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains routines that help determine which pointers are captured. // A pointer value is captured if the function makes a copy of any part of the // pointer that outlives the call. Not being captured means, more or less, that // the pointer is only dereferenced and not stored in a global. Returning part // of the pointer as the function return value may or may not count as capturing // the pointer, depending on the context. // //===----------------------------------------------------------------------===// #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" using namespace llvm; CaptureTracker::~CaptureTracker() {} bool CaptureTracker::shouldExplore(const Use *U) { return true; } namespace { struct SimpleCaptureTracker : public CaptureTracker { explicit SimpleCaptureTracker(bool ReturnCaptures) : ReturnCaptures(ReturnCaptures), Captured(false) {} void tooManyUses() override { Captured = true; } bool captured(const Use *U) override { if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures) return false; Captured = true; return true; } bool ReturnCaptures; bool Captured; }; struct NumberedInstCache { SmallDenseMap<const Instruction *, unsigned, 32> NumberedInsts; BasicBlock::const_iterator LastInstFound; unsigned LastInstPos; const BasicBlock *BB; NumberedInstCache(const BasicBlock *BasicB) : LastInstPos(0), BB(BasicB) { LastInstFound = BB->end(); } /// \brief Find the first instruction 'A' or 'B' in 'BB'. Number out /// instruction while walking 'BB'. const Instruction *find(const Instruction *A, const Instruction *B) { const Instruction *Inst = nullptr; assert(!(LastInstFound == BB->end() && LastInstPos != 0) && "Instruction supposed to be in NumberedInsts"); // Start the search with the instruction found in the last lookup round. auto II = BB->begin(); auto IE = BB->end(); if (LastInstFound != IE) II = std::next(LastInstFound); // Number all instructions up to the point where we find 'A' or 'B'. for (++LastInstPos; II != IE; ++II, ++LastInstPos) { Inst = cast<Instruction>(II); NumberedInsts[Inst] = LastInstPos; if (Inst == A || Inst == B) break; } assert(II != IE && "Instruction not found?"); LastInstFound = II; return Inst; } /// \brief Find out whether 'A' dominates 'B', meaning whether 'A' /// comes before 'B' in 'BB'. This is a simplification that considers /// cached instruction positions and ignores other basic blocks, being /// only relevant to compare relative instructions positions inside 'BB'. bool dominates(const Instruction *A, const Instruction *B) { assert(A->getParent() == B->getParent() && "Instructions must be in the same basic block!"); unsigned NA = NumberedInsts.lookup(A); unsigned NB = NumberedInsts.lookup(B); if (NA && NB) return NA < NB; if (NA) return true; if (NB) return false; return A == find(A, B); } }; /// Only find pointer captures which happen before the given instruction. Uses /// the dominator tree to determine whether one instruction is before another. /// Only support the case where the Value is defined in the same basic block /// as the given instruction and the use. struct CapturesBefore : public CaptureTracker { CapturesBefore(bool ReturnCaptures, const Instruction *I, DominatorTree *DT, bool IncludeI) : LocalInstCache(I->getParent()), BeforeHere(I), DT(DT), ReturnCaptures(ReturnCaptures), IncludeI(IncludeI), Captured(false) {} void tooManyUses() override { Captured = true; } bool isSafeToPrune(Instruction *I) { BasicBlock *BB = I->getParent(); // We explore this usage only if the usage can reach "BeforeHere". // If use is not reachable from entry, there is no need to explore. if (BeforeHere != I && !DT->isReachableFromEntry(BB)) return true; // Compute the case where both instructions are inside the same basic // block. Since instructions in the same BB as BeforeHere are numbered in // 'LocalInstCache', avoid using 'dominates' and 'isPotentiallyReachable' // which are very expensive for large basic blocks. if (BB == BeforeHere->getParent()) { // 'I' dominates 'BeforeHere' => not safe to prune. // // The value defined by an invoke dominates an instruction only if it // dominates every instruction in UseBB. A PHI is dominated only if // the instruction dominates every possible use in the UseBB. Since // UseBB == BB, avoid pruning. if (isa<InvokeInst>(BeforeHere) || isa<PHINode>(I) || I == BeforeHere) return false; if (!LocalInstCache.dominates(BeforeHere, I)) return false; // 'BeforeHere' comes before 'I', it's safe to prune if we also // guarantee that 'I' never reaches 'BeforeHere' through a back-edge or // by its successors, i.e, prune if: // // (1) BB is an entry block or have no sucessors. // (2) There's no path coming back through BB sucessors. if (BB == &BB->getParent()->getEntryBlock() || !BB->getTerminator()->getNumSuccessors()) return true; SmallVector<BasicBlock*, 32> Worklist; Worklist.append(succ_begin(BB), succ_end(BB)); if (!isPotentiallyReachableFromMany(Worklist, BB, DT)) return true; return false; } // If the value is defined in the same basic block as use and BeforeHere, // there is no need to explore the use if BeforeHere dominates use. // Check whether there is a path from I to BeforeHere. if (BeforeHere != I && DT->dominates(BeforeHere, I) && !isPotentiallyReachable(I, BeforeHere, DT)) return true; return false; } bool shouldExplore(const Use *U) override { Instruction *I = cast<Instruction>(U->getUser()); if (BeforeHere == I && !IncludeI) return false; if (isSafeToPrune(I)) return false; return true; } bool captured(const Use *U) override { if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures) return false; if (!shouldExplore(U)) return false; Captured = true; return true; } NumberedInstCache LocalInstCache; const Instruction *BeforeHere; DominatorTree *DT; bool ReturnCaptures; bool IncludeI; bool Captured; }; } /// PointerMayBeCaptured - Return true if this pointer value may be captured /// by the enclosing function (which is required to exist). This routine can /// be expensive, so consider caching the results. The boolean ReturnCaptures /// specifies whether returning the value (or part of it) from the function /// counts as capturing it or not. The boolean StoreCaptures specified whether /// storing the value (or part of it) into memory anywhere automatically /// counts as capturing it or not. bool llvm::PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures) { assert(!isa<GlobalValue>(V) && "It doesn't make sense to ask whether a global is captured."); // TODO: If StoreCaptures is not true, we could do Fancy analysis // to determine whether this store is not actually an escape point. // In that case, BasicAliasAnalysis should be updated as well to // take advantage of this. (void)StoreCaptures; SimpleCaptureTracker SCT(ReturnCaptures); PointerMayBeCaptured(V, &SCT); return SCT.Captured; } /// PointerMayBeCapturedBefore - Return true if this pointer value may be /// captured by the enclosing function (which is required to exist). If a /// DominatorTree is provided, only captures which happen before the given /// instruction are considered. This routine can be expensive, so consider /// caching the results. The boolean ReturnCaptures specifies whether /// returning the value (or part of it) from the function counts as capturing /// it or not. The boolean StoreCaptures specified whether storing the value /// (or part of it) into memory anywhere automatically counts as capturing it /// or not. bool llvm::PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, DominatorTree *DT, bool IncludeI) { assert(!isa<GlobalValue>(V) && "It doesn't make sense to ask whether a global is captured."); if (!DT) return PointerMayBeCaptured(V, ReturnCaptures, StoreCaptures); // TODO: See comment in PointerMayBeCaptured regarding what could be done // with StoreCaptures. CapturesBefore CB(ReturnCaptures, I, DT, IncludeI); PointerMayBeCaptured(V, &CB); return CB.Captured; } /// TODO: Write a new FunctionPass AliasAnalysis so that it can keep /// a cache. Then we can move the code from BasicAliasAnalysis into /// that path, and remove this threshold. static int const Threshold = 20; void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) { assert(V->getType()->isPointerTy() && "Capture is for pointers only!"); SmallVector<const Use *, Threshold> Worklist; SmallSet<const Use *, Threshold> Visited; int Count = 0; for (const Use &U : V->uses()) { // If there are lots of uses, conservatively say that the value // is captured to avoid taking too much compile time. if (Count++ >= Threshold) return Tracker->tooManyUses(); if (!Tracker->shouldExplore(&U)) continue; Visited.insert(&U); Worklist.push_back(&U); } while (!Worklist.empty()) { const Use *U = Worklist.pop_back_val(); Instruction *I = cast<Instruction>(U->getUser()); V = U->get(); switch (I->getOpcode()) { case Instruction::Call: case Instruction::Invoke: { CallSite CS(I); // Not captured if the callee is readonly, doesn't return a copy through // its return value and doesn't unwind (a readonly function can leak bits // by throwing an exception or not depending on the input value). if (CS.onlyReadsMemory() && CS.doesNotThrow() && I->getType()->isVoidTy()) break; // Not captured if only passed via 'nocapture' arguments. Note that // calling a function pointer does not in itself cause the pointer to // be captured. This is a subtle point considering that (for example) // the callee might return its own address. It is analogous to saying // that loading a value from a pointer does not cause the pointer to be // captured, even though the loaded value might be the pointer itself // (think of self-referential objects). CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end(); for (CallSite::arg_iterator A = B; A != E; ++A) if (A->get() == V && !CS.doesNotCapture(A - B)) // The parameter is not marked 'nocapture' - captured. if (Tracker->captured(U)) return; break; } case Instruction::Load: // Loading from a pointer does not cause it to be captured. break; case Instruction::VAArg: // "va-arg" from a pointer does not cause it to be captured. break; case Instruction::Store: if (V == I->getOperand(0)) // Stored the pointer - conservatively assume it may be captured. if (Tracker->captured(U)) return; // Storing to the pointee does not cause the pointer to be captured. break; case Instruction::BitCast: case Instruction::GetElementPtr: case Instruction::PHI: case Instruction::Select: case Instruction::AddrSpaceCast: // The original value is not captured via this if the new value isn't. Count = 0; for (Use &UU : I->uses()) { // If there are lots of uses, conservatively say that the value // is captured to avoid taking too much compile time. if (Count++ >= Threshold) return Tracker->tooManyUses(); if (Visited.insert(&UU).second) if (Tracker->shouldExplore(&UU)) Worklist.push_back(&UU); } break; case Instruction::ICmp: // Don't count comparisons of a no-alias return value against null as // captures. This allows us to ignore comparisons of malloc results // with null, for example. if (ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(I->getOperand(1))) if (CPN->getType()->getAddressSpace() == 0) if (isNoAliasCall(V->stripPointerCasts())) break; // Otherwise, be conservative. There are crazy ways to capture pointers // using comparisons. if (Tracker->captured(U)) return; break; default: // Something else - be conservative and say it is captured. if (Tracker->captured(U)) return; break; } } // All uses examined. }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/CFLAliasAnalysis.cpp
//===- CFLAliasAnalysis.cpp - CFL-Based Alias Analysis Implementation ------==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a CFL-based context-insensitive alias analysis // algorithm. It does not depend on types. The algorithm is a mixture of the one // described in "Demand-driven alias analysis for C" by Xin Zheng and Radu // Rugina, and "Fast algorithms for Dyck-CFL-reachability with applications to // Alias Analysis" by Zhang Q, Lyu M R, Yuan H, and Su Z. -- to summarize the // papers, we build a graph of the uses of a variable, where each node is a // memory location, and each edge is an action that happened on that memory // location. The "actions" can be one of Dereference, Reference, or Assign. // // Two variables are considered as aliasing iff you can reach one value's node // from the other value's node and the language formed by concatenating all of // the edge labels (actions) conforms to a context-free grammar. // // Because this algorithm requires a graph search on each query, we execute the // algorithm outlined in "Fast algorithms..." (mentioned above) // in order to transform the graph into sets of variables that may alias in // ~nlogn time (n = number of variables.), which makes queries take constant // time. //===----------------------------------------------------------------------===// #include "StratifiedSets.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/Passes.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <cassert> #include <forward_list> #include <memory> #include <tuple> using namespace llvm; #define DEBUG_TYPE "cfl-aa" // Try to go from a Value* to a Function*. Never returns nullptr. static Optional<Function *> parentFunctionOfValue(Value *); // Returns possible functions called by the Inst* into the given // SmallVectorImpl. Returns true if targets found, false otherwise. // This is templated because InvokeInst/CallInst give us the same // set of functions that we care about, and I don't like repeating // myself. template <typename Inst> static bool getPossibleTargets(Inst *, SmallVectorImpl<Function *> &); // Some instructions need to have their users tracked. Instructions like // `add` require you to get the users of the Instruction* itself, other // instructions like `store` require you to get the users of the first // operand. This function gets the "proper" value to track for each // type of instruction we support. static Optional<Value *> getTargetValue(Instruction *); // There are certain instructions (i.e. FenceInst, etc.) that we ignore. // This notes that we should ignore those. static bool hasUsefulEdges(Instruction *); const StratifiedIndex StratifiedLink::SetSentinel = std::numeric_limits<StratifiedIndex>::max(); namespace { // StratifiedInfo Attribute things. typedef unsigned StratifiedAttr; LLVM_CONSTEXPR unsigned MaxStratifiedAttrIndex = NumStratifiedAttrs; LLVM_CONSTEXPR unsigned AttrAllIndex = 0; LLVM_CONSTEXPR unsigned AttrGlobalIndex = 1; LLVM_CONSTEXPR unsigned AttrUnknownIndex = 2; LLVM_CONSTEXPR unsigned AttrFirstArgIndex = 3; LLVM_CONSTEXPR unsigned AttrLastArgIndex = MaxStratifiedAttrIndex; LLVM_CONSTEXPR unsigned AttrMaxNumArgs = AttrLastArgIndex - AttrFirstArgIndex; LLVM_CONSTEXPR StratifiedAttr AttrNone = 0; LLVM_CONSTEXPR StratifiedAttr AttrUnknown = 1 << AttrUnknownIndex; LLVM_CONSTEXPR StratifiedAttr AttrAll = ~AttrNone; // \brief StratifiedSets call for knowledge of "direction", so this is how we // represent that locally. enum class Level { Same, Above, Below }; // \brief Edges can be one of four "weights" -- each weight must have an inverse // weight (Assign has Assign; Reference has Dereference). enum class EdgeType { // The weight assigned when assigning from or to a value. For example, in: // %b = getelementptr %a, 0 // ...The relationships are %b assign %a, and %a assign %b. This used to be // two edges, but having a distinction bought us nothing. Assign, // The edge used when we have an edge going from some handle to a Value. // Examples of this include: // %b = load %a (%b Dereference %a) // %b = extractelement %a, 0 (%a Dereference %b) Dereference, // The edge used when our edge goes from a value to a handle that may have // contained it at some point. Examples: // %b = load %a (%a Reference %b) // %b = extractelement %a, 0 (%b Reference %a) Reference }; // \brief Encodes the notion of a "use" struct Edge { // \brief Which value the edge is coming from Value *From; // \brief Which value the edge is pointing to Value *To; // \brief Edge weight EdgeType Weight; // \brief Whether we aliased any external values along the way that may be // invisible to the analysis (i.e. landingpad for exceptions, calls for // interprocedural analysis, etc.) StratifiedAttrs AdditionalAttrs; Edge(Value *From, Value *To, EdgeType W, StratifiedAttrs A) : From(From), To(To), Weight(W), AdditionalAttrs(A) {} }; // \brief Information we have about a function and would like to keep around struct FunctionInfo { StratifiedSets<Value *> Sets; // Lots of functions have < 4 returns. Adjust as necessary. SmallVector<Value *, 4> ReturnedValues; FunctionInfo(StratifiedSets<Value *> &&S, SmallVector<Value *, 4> &&RV) : Sets(std::move(S)), ReturnedValues(std::move(RV)) {} }; struct CFLAliasAnalysis; struct FunctionHandle : public CallbackVH { FunctionHandle(Function *Fn, CFLAliasAnalysis *CFLAA) : CallbackVH(Fn), CFLAA(CFLAA) { assert(Fn != nullptr); assert(CFLAA != nullptr); } ~FunctionHandle() override {} void deleted() override { removeSelfFromCache(); } void allUsesReplacedWith(Value *) override { removeSelfFromCache(); } private: CFLAliasAnalysis *CFLAA; void removeSelfFromCache(); }; struct CFLAliasAnalysis : public ImmutablePass, public AliasAnalysis { private: /// \brief Cached mapping of Functions to their StratifiedSets. /// If a function's sets are currently being built, it is marked /// in the cache as an Optional without a value. This way, if we /// have any kind of recursion, it is discernable from a function /// that simply has empty sets. DenseMap<Function *, Optional<FunctionInfo>> Cache; std::forward_list<FunctionHandle> Handles; public: static char ID; CFLAliasAnalysis() : ImmutablePass(ID) { initializeCFLAliasAnalysisPass(*PassRegistry::getPassRegistry()); } ~CFLAliasAnalysis() override {} void getAnalysisUsage(AnalysisUsage &AU) const override { AliasAnalysis::getAnalysisUsage(AU); } void *getAdjustedAnalysisPointer(const void *ID) override { if (ID == &AliasAnalysis::ID) return (AliasAnalysis *)this; return this; } /// \brief Inserts the given Function into the cache. void scan(Function *Fn); void evict(Function *Fn) { Cache.erase(Fn); } /// \brief Ensures that the given function is available in the cache. /// Returns the appropriate entry from the cache. const Optional<FunctionInfo> &ensureCached(Function *Fn) { auto Iter = Cache.find(Fn); if (Iter == Cache.end()) { scan(Fn); Iter = Cache.find(Fn); assert(Iter != Cache.end()); assert(Iter->second.hasValue()); } return Iter->second; } AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB); AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) override { if (LocA.Ptr == LocB.Ptr) { if (LocA.Size == LocB.Size) { return MustAlias; } else { return PartialAlias; } } // Comparisons between global variables and other constants should be // handled by BasicAA. // TODO: ConstantExpr handling -- CFLAA may report NoAlias when comparing // a GlobalValue and ConstantExpr, but every query needs to have at least // one Value tied to a Function, and neither GlobalValues nor ConstantExprs // are. if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr)) { return AliasAnalysis::alias(LocA, LocB); } AliasResult QueryResult = query(LocA, LocB); if (QueryResult == MayAlias) return AliasAnalysis::alias(LocA, LocB); return QueryResult; } bool doInitialization(Module &M) override; }; void FunctionHandle::removeSelfFromCache() { assert(CFLAA != nullptr); auto *Val = getValPtr(); CFLAA->evict(cast<Function>(Val)); setValPtr(nullptr); } // \brief Gets the edges our graph should have, based on an Instruction* class GetEdgesVisitor : public InstVisitor<GetEdgesVisitor, void> { CFLAliasAnalysis &AA; SmallVectorImpl<Edge> &Output; public: GetEdgesVisitor(CFLAliasAnalysis &AA, SmallVectorImpl<Edge> &Output) : AA(AA), Output(Output) {} void visitInstruction(Instruction &) { llvm_unreachable("Unsupported instruction encountered"); } void visitPtrToIntInst(PtrToIntInst &Inst) { auto *Ptr = Inst.getOperand(0); Output.push_back(Edge(Ptr, Ptr, EdgeType::Assign, AttrUnknown)); } void visitIntToPtrInst(IntToPtrInst &Inst) { auto *Ptr = &Inst; Output.push_back(Edge(Ptr, Ptr, EdgeType::Assign, AttrUnknown)); } void visitCastInst(CastInst &Inst) { Output.push_back( Edge(&Inst, Inst.getOperand(0), EdgeType::Assign, AttrNone)); } void visitBinaryOperator(BinaryOperator &Inst) { auto *Op1 = Inst.getOperand(0); auto *Op2 = Inst.getOperand(1); Output.push_back(Edge(&Inst, Op1, EdgeType::Assign, AttrNone)); Output.push_back(Edge(&Inst, Op2, EdgeType::Assign, AttrNone)); } void visitAtomicCmpXchgInst(AtomicCmpXchgInst &Inst) { auto *Ptr = Inst.getPointerOperand(); auto *Val = Inst.getNewValOperand(); Output.push_back(Edge(Ptr, Val, EdgeType::Dereference, AttrNone)); } void visitAtomicRMWInst(AtomicRMWInst &Inst) { auto *Ptr = Inst.getPointerOperand(); auto *Val = Inst.getValOperand(); Output.push_back(Edge(Ptr, Val, EdgeType::Dereference, AttrNone)); } void visitPHINode(PHINode &Inst) { for (Value *Val : Inst.incoming_values()) { Output.push_back(Edge(&Inst, Val, EdgeType::Assign, AttrNone)); } } void visitGetElementPtrInst(GetElementPtrInst &Inst) { auto *Op = Inst.getPointerOperand(); Output.push_back(Edge(&Inst, Op, EdgeType::Assign, AttrNone)); for (auto I = Inst.idx_begin(), E = Inst.idx_end(); I != E; ++I) Output.push_back(Edge(&Inst, *I, EdgeType::Assign, AttrNone)); } void visitSelectInst(SelectInst &Inst) { // Condition is not processed here (The actual statement producing // the condition result is processed elsewhere). For select, the // condition is evaluated, but not loaded, stored, or assigned // simply as a result of being the condition of a select. auto *TrueVal = Inst.getTrueValue(); Output.push_back(Edge(&Inst, TrueVal, EdgeType::Assign, AttrNone)); auto *FalseVal = Inst.getFalseValue(); Output.push_back(Edge(&Inst, FalseVal, EdgeType::Assign, AttrNone)); } void visitAllocaInst(AllocaInst &) {} void visitLoadInst(LoadInst &Inst) { auto *Ptr = Inst.getPointerOperand(); auto *Val = &Inst; Output.push_back(Edge(Val, Ptr, EdgeType::Reference, AttrNone)); } void visitStoreInst(StoreInst &Inst) { auto *Ptr = Inst.getPointerOperand(); auto *Val = Inst.getValueOperand(); Output.push_back(Edge(Ptr, Val, EdgeType::Dereference, AttrNone)); } void visitVAArgInst(VAArgInst &Inst) { // We can't fully model va_arg here. For *Ptr = Inst.getOperand(0), it does // two things: // 1. Loads a value from *((T*)*Ptr). // 2. Increments (stores to) *Ptr by some target-specific amount. // For now, we'll handle this like a landingpad instruction (by placing the // result in its own group, and having that group alias externals). auto *Val = &Inst; Output.push_back(Edge(Val, Val, EdgeType::Assign, AttrAll)); } static bool isFunctionExternal(Function *Fn) { return Fn->isDeclaration() || !Fn->hasLocalLinkage(); } // Gets whether the sets at Index1 above, below, or equal to the sets at // Index2. Returns None if they are not in the same set chain. static Optional<Level> getIndexRelation(const StratifiedSets<Value *> &Sets, StratifiedIndex Index1, StratifiedIndex Index2) { if (Index1 == Index2) return Level::Same; const auto *Current = &Sets.getLink(Index1); while (Current->hasBelow()) { if (Current->Below == Index2) return Level::Below; Current = &Sets.getLink(Current->Below); } Current = &Sets.getLink(Index1); while (Current->hasAbove()) { if (Current->Above == Index2) return Level::Above; Current = &Sets.getLink(Current->Above); } return NoneType(); } bool tryInterproceduralAnalysis(const SmallVectorImpl<Function *> &Fns, Value *FuncValue, const iterator_range<User::op_iterator> &Args) { const unsigned ExpectedMaxArgs = 8; const unsigned MaxSupportedArgs = 50; assert(Fns.size() > 0); // I put this here to give us an upper bound on time taken by IPA. Is it // really (realistically) needed? Keep in mind that we do have an n^2 algo. if (std::distance(Args.begin(), Args.end()) > (int)MaxSupportedArgs) return false; // Exit early if we'll fail anyway for (auto *Fn : Fns) { if (isFunctionExternal(Fn) || Fn->isVarArg()) return false; auto &MaybeInfo = AA.ensureCached(Fn); if (!MaybeInfo.hasValue()) return false; } SmallVector<Value *, ExpectedMaxArgs> Arguments(Args.begin(), Args.end()); SmallVector<StratifiedInfo, ExpectedMaxArgs> Parameters; for (auto *Fn : Fns) { auto &Info = *AA.ensureCached(Fn); auto &Sets = Info.Sets; auto &RetVals = Info.ReturnedValues; Parameters.clear(); for (auto &Param : Fn->args()) { auto MaybeInfo = Sets.find(&Param); // Did a new parameter somehow get added to the function/slip by? if (!MaybeInfo.hasValue()) return false; Parameters.push_back(*MaybeInfo); } // Adding an edge from argument -> return value for each parameter that // may alias the return value for (unsigned I = 0, E = Parameters.size(); I != E; ++I) { auto &ParamInfo = Parameters[I]; auto &ArgVal = Arguments[I]; bool AddEdge = false; StratifiedAttrs Externals; for (unsigned X = 0, XE = RetVals.size(); X != XE; ++X) { auto MaybeInfo = Sets.find(RetVals[X]); if (!MaybeInfo.hasValue()) return false; auto &RetInfo = *MaybeInfo; auto RetAttrs = Sets.getLink(RetInfo.Index).Attrs; auto ParamAttrs = Sets.getLink(ParamInfo.Index).Attrs; auto MaybeRelation = getIndexRelation(Sets, ParamInfo.Index, RetInfo.Index); if (MaybeRelation.hasValue()) { AddEdge = true; Externals |= RetAttrs | ParamAttrs; } } if (AddEdge) Output.push_back(Edge(FuncValue, ArgVal, EdgeType::Assign, StratifiedAttrs().flip())); } if (Parameters.size() != Arguments.size()) return false; // Adding edges between arguments for arguments that may end up aliasing // each other. This is necessary for functions such as // void foo(int** a, int** b) { *a = *b; } // (Technically, the proper sets for this would be those below // Arguments[I] and Arguments[X], but our algorithm will produce // extremely similar, and equally correct, results either way) for (unsigned I = 0, E = Arguments.size(); I != E; ++I) { auto &MainVal = Arguments[I]; auto &MainInfo = Parameters[I]; auto &MainAttrs = Sets.getLink(MainInfo.Index).Attrs; for (unsigned X = I + 1; X != E; ++X) { auto &SubInfo = Parameters[X]; auto &SubVal = Arguments[X]; auto &SubAttrs = Sets.getLink(SubInfo.Index).Attrs; auto MaybeRelation = getIndexRelation(Sets, MainInfo.Index, SubInfo.Index); if (!MaybeRelation.hasValue()) continue; auto NewAttrs = SubAttrs | MainAttrs; Output.push_back(Edge(MainVal, SubVal, EdgeType::Assign, NewAttrs)); } } } return true; } template <typename InstT> void visitCallLikeInst(InstT &Inst) { SmallVector<Function *, 4> Targets; if (getPossibleTargets(&Inst, Targets)) { if (tryInterproceduralAnalysis(Targets, &Inst, Inst.arg_operands())) return; // Cleanup from interprocedural analysis Output.clear(); } for (Value *V : Inst.arg_operands()) Output.push_back(Edge(&Inst, V, EdgeType::Assign, AttrAll)); } void visitCallInst(CallInst &Inst) { visitCallLikeInst(Inst); } void visitInvokeInst(InvokeInst &Inst) { visitCallLikeInst(Inst); } // Because vectors/aggregates are immutable and unaddressable, // there's nothing we can do to coax a value out of them, other // than calling Extract{Element,Value}. We can effectively treat // them as pointers to arbitrary memory locations we can store in // and load from. void visitExtractElementInst(ExtractElementInst &Inst) { auto *Ptr = Inst.getVectorOperand(); auto *Val = &Inst; Output.push_back(Edge(Val, Ptr, EdgeType::Reference, AttrNone)); } void visitInsertElementInst(InsertElementInst &Inst) { auto *Vec = Inst.getOperand(0); auto *Val = Inst.getOperand(1); Output.push_back(Edge(&Inst, Vec, EdgeType::Assign, AttrNone)); Output.push_back(Edge(&Inst, Val, EdgeType::Dereference, AttrNone)); } void visitLandingPadInst(LandingPadInst &Inst) { // Exceptions come from "nowhere", from our analysis' perspective. // So we place the instruction its own group, noting that said group may // alias externals Output.push_back(Edge(&Inst, &Inst, EdgeType::Assign, AttrAll)); } void visitInsertValueInst(InsertValueInst &Inst) { auto *Agg = Inst.getOperand(0); auto *Val = Inst.getOperand(1); Output.push_back(Edge(&Inst, Agg, EdgeType::Assign, AttrNone)); Output.push_back(Edge(&Inst, Val, EdgeType::Dereference, AttrNone)); } void visitExtractValueInst(ExtractValueInst &Inst) { auto *Ptr = Inst.getAggregateOperand(); Output.push_back(Edge(&Inst, Ptr, EdgeType::Reference, AttrNone)); } void visitShuffleVectorInst(ShuffleVectorInst &Inst) { auto *From1 = Inst.getOperand(0); auto *From2 = Inst.getOperand(1); Output.push_back(Edge(&Inst, From1, EdgeType::Assign, AttrNone)); Output.push_back(Edge(&Inst, From2, EdgeType::Assign, AttrNone)); } void visitConstantExpr(ConstantExpr *CE) { switch (CE->getOpcode()) { default: llvm_unreachable("Unknown instruction type encountered!"); // Build the switch statement using the Instruction.def file. #define HANDLE_INST(NUM, OPCODE, CLASS) \ case Instruction::OPCODE: \ visit##OPCODE(*(CLASS *)CE); \ break; #include "llvm/IR/Instruction.def" } } }; // For a given instruction, we need to know which Value* to get the // users of in order to build our graph. In some cases (i.e. add), // we simply need the Instruction*. In other cases (i.e. store), // finding the users of the Instruction* is useless; we need to find // the users of the first operand. This handles determining which // value to follow for us. // // Note: we *need* to keep this in sync with GetEdgesVisitor. Add // something to GetEdgesVisitor, add it here -- remove something from // GetEdgesVisitor, remove it here. class GetTargetValueVisitor : public InstVisitor<GetTargetValueVisitor, Value *> { public: Value *visitInstruction(Instruction &Inst) { return &Inst; } Value *visitStoreInst(StoreInst &Inst) { return Inst.getPointerOperand(); } Value *visitAtomicCmpXchgInst(AtomicCmpXchgInst &Inst) { return Inst.getPointerOperand(); } Value *visitAtomicRMWInst(AtomicRMWInst &Inst) { return Inst.getPointerOperand(); } Value *visitInsertElementInst(InsertElementInst &Inst) { return Inst.getOperand(0); } Value *visitInsertValueInst(InsertValueInst &Inst) { return Inst.getAggregateOperand(); } }; // Set building requires a weighted bidirectional graph. template <typename EdgeTypeT> class WeightedBidirectionalGraph { public: typedef std::size_t Node; private: const static Node StartNode = Node(0); struct Edge { EdgeTypeT Weight; Node Other; Edge(const EdgeTypeT &W, const Node &N) : Weight(W), Other(N) {} bool operator==(const Edge &E) const { return Weight == E.Weight && Other == E.Other; } bool operator!=(const Edge &E) const { return !operator==(E); } }; struct NodeImpl { std::vector<Edge> Edges; }; std::vector<NodeImpl> NodeImpls; bool inbounds(Node NodeIndex) const { return NodeIndex < NodeImpls.size(); } const NodeImpl &getNode(Node N) const { return NodeImpls[N]; } NodeImpl &getNode(Node N) { return NodeImpls[N]; } public: // ----- Various Edge iterators for the graph ----- // // \brief Iterator for edges. Because this graph is bidirected, we don't // allow modificaiton of the edges using this iterator. Additionally, the // iterator becomes invalid if you add edges to or from the node you're // getting the edges of. struct EdgeIterator { using iterator_category = std::forward_iterator_tag; using value_type = std::tuple<EdgeTypeT, Node *>; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; EdgeIterator(const typename std::vector<Edge>::const_iterator &Iter) : Current(Iter) {} EdgeIterator(NodeImpl &Impl) : Current(Impl.begin()) {} EdgeIterator &operator++() { ++Current; return *this; } EdgeIterator operator++(int) { EdgeIterator Copy(Current); operator++(); return Copy; } std::tuple<EdgeTypeT, Node> &operator*() { Store = std::make_tuple(Current->Weight, Current->Other); return Store; } bool operator==(const EdgeIterator &Other) const { return Current == Other.Current; } bool operator!=(const EdgeIterator &Other) const { return !operator==(Other); } private: typename std::vector<Edge>::const_iterator Current; std::tuple<EdgeTypeT, Node> Store; }; // Wrapper for EdgeIterator with begin()/end() calls. struct EdgeIterable { EdgeIterable(const std::vector<Edge> &Edges) : BeginIter(Edges.begin()), EndIter(Edges.end()) {} EdgeIterator begin() { return EdgeIterator(BeginIter); } EdgeIterator end() { return EdgeIterator(EndIter); } private: typename std::vector<Edge>::const_iterator BeginIter; typename std::vector<Edge>::const_iterator EndIter; }; // ----- Actual graph-related things ----- // WeightedBidirectionalGraph() {} WeightedBidirectionalGraph(WeightedBidirectionalGraph<EdgeTypeT> &&Other) : NodeImpls(std::move(Other.NodeImpls)) {} WeightedBidirectionalGraph<EdgeTypeT> & operator=(WeightedBidirectionalGraph<EdgeTypeT> &&Other) { NodeImpls = std::move(Other.NodeImpls); return *this; } Node addNode() { auto Index = NodeImpls.size(); auto NewNode = Node(Index); NodeImpls.push_back(NodeImpl()); return NewNode; } void addEdge(Node From, Node To, const EdgeTypeT &Weight, const EdgeTypeT &ReverseWeight) { assert(inbounds(From)); assert(inbounds(To)); auto &FromNode = getNode(From); auto &ToNode = getNode(To); FromNode.Edges.push_back(Edge(Weight, To)); ToNode.Edges.push_back(Edge(ReverseWeight, From)); } EdgeIterable edgesFor(const Node &N) const { const auto &Node = getNode(N); return EdgeIterable(Node.Edges); } bool empty() const { return NodeImpls.empty(); } std::size_t size() const { return NodeImpls.size(); } // \brief Gets an arbitrary node in the graph as a starting point for // traversal. Node getEntryNode() { assert(inbounds(StartNode)); return StartNode; } }; typedef WeightedBidirectionalGraph<std::pair<EdgeType, StratifiedAttrs>> GraphT; typedef DenseMap<Value *, GraphT::Node> NodeMapT; } // -- Setting up/registering CFLAA pass -- // char CFLAliasAnalysis::ID = 0; INITIALIZE_AG_PASS(CFLAliasAnalysis, AliasAnalysis, "cfl-aa", "CFL-Based AA implementation", false, true, false) ImmutablePass *llvm::createCFLAliasAnalysisPass() { return new CFLAliasAnalysis(); } //===----------------------------------------------------------------------===// // Function declarations that require types defined in the namespace above //===----------------------------------------------------------------------===// // Given an argument number, returns the appropriate Attr index to set. static StratifiedAttr argNumberToAttrIndex(StratifiedAttr); // Given a Value, potentially return which AttrIndex it maps to. static Optional<StratifiedAttr> valueToAttrIndex(Value *Val); // Gets the inverse of a given EdgeType. static EdgeType flipWeight(EdgeType); // Gets edges of the given Instruction*, writing them to the SmallVector*. static void argsToEdges(CFLAliasAnalysis &, Instruction *, SmallVectorImpl<Edge> &); // Gets edges of the given ConstantExpr*, writing them to the SmallVector*. static void argsToEdges(CFLAliasAnalysis &, ConstantExpr *, SmallVectorImpl<Edge> &); // Gets the "Level" that one should travel in StratifiedSets // given an EdgeType. static Level directionOfEdgeType(EdgeType); // Builds the graph needed for constructing the StratifiedSets for the // given function static void buildGraphFrom(CFLAliasAnalysis &, Function *, SmallVectorImpl<Value *> &, NodeMapT &, GraphT &); // Gets the edges of a ConstantExpr as if it was an Instruction. This // function also acts on any nested ConstantExprs, adding the edges // of those to the given SmallVector as well. static void constexprToEdges(CFLAliasAnalysis &, ConstantExpr &, SmallVectorImpl<Edge> &); // Given an Instruction, this will add it to the graph, along with any // Instructions that are potentially only available from said Instruction // For example, given the following line: // %0 = load i16* getelementptr ([1 x i16]* @a, 0, 0), align 2 // addInstructionToGraph would add both the `load` and `getelementptr` // instructions to the graph appropriately. static void addInstructionToGraph(CFLAliasAnalysis &, Instruction &, SmallVectorImpl<Value *> &, NodeMapT &, GraphT &); // Notes whether it would be pointless to add the given Value to our sets. static bool canSkipAddingToSets(Value *Val); // Builds the graph + StratifiedSets for a function. static FunctionInfo buildSetsFrom(CFLAliasAnalysis &, Function *); static Optional<Function *> parentFunctionOfValue(Value *Val) { if (auto *Inst = dyn_cast<Instruction>(Val)) { auto *Bb = Inst->getParent(); return Bb->getParent(); } if (auto *Arg = dyn_cast<Argument>(Val)) return Arg->getParent(); return NoneType(); } template <typename Inst> static bool getPossibleTargets(Inst *Call, SmallVectorImpl<Function *> &Output) { if (auto *Fn = Call->getCalledFunction()) { Output.push_back(Fn); return true; } // TODO: If the call is indirect, we might be able to enumerate all potential // targets of the call and return them, rather than just failing. return false; } static Optional<Value *> getTargetValue(Instruction *Inst) { GetTargetValueVisitor V; return V.visit(Inst); } static bool hasUsefulEdges(Instruction *Inst) { bool IsNonInvokeTerminator = isa<TerminatorInst>(Inst) && !isa<InvokeInst>(Inst); return !isa<CmpInst>(Inst) && !isa<FenceInst>(Inst) && !IsNonInvokeTerminator; } static bool hasUsefulEdges(ConstantExpr *CE) { // ConstantExpr doens't have terminators, invokes, or fences, so only needs // to check for compares. return CE->getOpcode() != Instruction::ICmp && CE->getOpcode() != Instruction::FCmp; } static Optional<StratifiedAttr> valueToAttrIndex(Value *Val) { if (isa<GlobalValue>(Val)) return AttrGlobalIndex; if (auto *Arg = dyn_cast<Argument>(Val)) // Only pointer arguments should have the argument attribute, // because things can't escape through scalars without us seeing a // cast, and thus, interaction with them doesn't matter. if (!Arg->hasNoAliasAttr() && Arg->getType()->isPointerTy()) return argNumberToAttrIndex(Arg->getArgNo()); return NoneType(); } static StratifiedAttr argNumberToAttrIndex(unsigned ArgNum) { if (ArgNum >= AttrMaxNumArgs) return AttrAllIndex; return ArgNum + AttrFirstArgIndex; } static EdgeType flipWeight(EdgeType Initial) { switch (Initial) { case EdgeType::Assign: return EdgeType::Assign; case EdgeType::Dereference: return EdgeType::Reference; case EdgeType::Reference: return EdgeType::Dereference; } llvm_unreachable("Incomplete coverage of EdgeType enum"); } static void argsToEdges(CFLAliasAnalysis &Analysis, Instruction *Inst, SmallVectorImpl<Edge> &Output) { assert(hasUsefulEdges(Inst) && "Expected instructions to have 'useful' edges"); GetEdgesVisitor v(Analysis, Output); v.visit(Inst); } static void argsToEdges(CFLAliasAnalysis &Analysis, ConstantExpr *CE, SmallVectorImpl<Edge> &Output) { assert(hasUsefulEdges(CE) && "Expected constant expr to have 'useful' edges"); GetEdgesVisitor v(Analysis, Output); v.visitConstantExpr(CE); } static Level directionOfEdgeType(EdgeType Weight) { switch (Weight) { case EdgeType::Reference: return Level::Above; case EdgeType::Dereference: return Level::Below; case EdgeType::Assign: return Level::Same; } llvm_unreachable("Incomplete switch coverage"); } static void constexprToEdges(CFLAliasAnalysis &Analysis, ConstantExpr &CExprToCollapse, SmallVectorImpl<Edge> &Results) { SmallVector<ConstantExpr *, 4> Worklist; Worklist.push_back(&CExprToCollapse); SmallVector<Edge, 8> ConstexprEdges; SmallPtrSet<ConstantExpr *, 4> Visited; while (!Worklist.empty()) { auto *CExpr = Worklist.pop_back_val(); if (!hasUsefulEdges(CExpr)) continue; ConstexprEdges.clear(); argsToEdges(Analysis, CExpr, ConstexprEdges); for (auto &Edge : ConstexprEdges) { if (auto *Nested = dyn_cast<ConstantExpr>(Edge.From)) if (Visited.insert(Nested).second) Worklist.push_back(Nested); if (auto *Nested = dyn_cast<ConstantExpr>(Edge.To)) if (Visited.insert(Nested).second) Worklist.push_back(Nested); } Results.append(ConstexprEdges.begin(), ConstexprEdges.end()); } } static void addInstructionToGraph(CFLAliasAnalysis &Analysis, Instruction &Inst, SmallVectorImpl<Value *> &ReturnedValues, NodeMapT &Map, GraphT &Graph) { const auto findOrInsertNode = [&Map, &Graph](Value *Val) { auto Pair = Map.insert(std::make_pair(Val, GraphT::Node())); auto &Iter = Pair.first; if (Pair.second) { auto NewNode = Graph.addNode(); Iter->second = NewNode; } return Iter->second; }; // We don't want the edges of most "return" instructions, but we *do* want // to know what can be returned. if (isa<ReturnInst>(&Inst)) ReturnedValues.push_back(&Inst); if (!hasUsefulEdges(&Inst)) return; SmallVector<Edge, 8> Edges; argsToEdges(Analysis, &Inst, Edges); // In the case of an unused alloca (or similar), edges may be empty. Note // that it exists so we can potentially answer NoAlias. if (Edges.empty()) { auto MaybeVal = getTargetValue(&Inst); assert(MaybeVal.hasValue()); auto *Target = *MaybeVal; findOrInsertNode(Target); return; } const auto addEdgeToGraph = [&Graph, &findOrInsertNode](const Edge &E) { auto To = findOrInsertNode(E.To); auto From = findOrInsertNode(E.From); auto FlippedWeight = flipWeight(E.Weight); auto Attrs = E.AdditionalAttrs; Graph.addEdge(From, To, std::make_pair(E.Weight, Attrs), std::make_pair(FlippedWeight, Attrs)); }; SmallVector<ConstantExpr *, 4> ConstantExprs; for (const Edge &E : Edges) { addEdgeToGraph(E); if (auto *Constexpr = dyn_cast<ConstantExpr>(E.To)) ConstantExprs.push_back(Constexpr); if (auto *Constexpr = dyn_cast<ConstantExpr>(E.From)) ConstantExprs.push_back(Constexpr); } for (ConstantExpr *CE : ConstantExprs) { Edges.clear(); constexprToEdges(Analysis, *CE, Edges); std::for_each(Edges.begin(), Edges.end(), addEdgeToGraph); } } // Aside: We may remove graph construction entirely, because it doesn't really // buy us much that we don't already have. I'd like to add interprocedural // analysis prior to this however, in case that somehow requires the graph // produced by this for efficient execution static void buildGraphFrom(CFLAliasAnalysis &Analysis, Function *Fn, SmallVectorImpl<Value *> &ReturnedValues, NodeMapT &Map, GraphT &Graph) { for (auto &Bb : Fn->getBasicBlockList()) for (auto &Inst : Bb.getInstList()) addInstructionToGraph(Analysis, Inst, ReturnedValues, Map, Graph); } static bool canSkipAddingToSets(Value *Val) { // Constants can share instances, which may falsely unify multiple // sets, e.g. in // store i32* null, i32** %ptr1 // store i32* null, i32** %ptr2 // clearly ptr1 and ptr2 should not be unified into the same set, so // we should filter out the (potentially shared) instance to // i32* null. if (isa<Constant>(Val)) { bool Container = isa<ConstantVector>(Val) || isa<ConstantArray>(Val) || isa<ConstantStruct>(Val); // TODO: Because all of these things are constant, we can determine whether // the data is *actually* mutable at graph building time. This will probably // come for free/cheap with offset awareness. bool CanStoreMutableData = isa<GlobalValue>(Val) || isa<ConstantExpr>(Val) || Container; return !CanStoreMutableData; } return false; } static FunctionInfo buildSetsFrom(CFLAliasAnalysis &Analysis, Function *Fn) { NodeMapT Map; GraphT Graph; SmallVector<Value *, 4> ReturnedValues; buildGraphFrom(Analysis, Fn, ReturnedValues, Map, Graph); DenseMap<GraphT::Node, Value *> NodeValueMap; NodeValueMap.resize(Map.size()); for (const auto &Pair : Map) NodeValueMap.insert(std::make_pair(Pair.second, Pair.first)); const auto findValueOrDie = [&NodeValueMap](GraphT::Node Node) { auto ValIter = NodeValueMap.find(Node); assert(ValIter != NodeValueMap.end()); return ValIter->second; }; StratifiedSetsBuilder<Value *> Builder; SmallVector<GraphT::Node, 16> Worklist; for (auto &Pair : Map) { Worklist.clear(); auto *Value = Pair.first; Builder.add(Value); auto InitialNode = Pair.second; Worklist.push_back(InitialNode); while (!Worklist.empty()) { auto Node = Worklist.pop_back_val(); auto *CurValue = findValueOrDie(Node); if (canSkipAddingToSets(CurValue)) continue; for (const auto &EdgeTuple : Graph.edgesFor(Node)) { auto Weight = std::get<0>(EdgeTuple); auto Label = Weight.first; auto &OtherNode = std::get<1>(EdgeTuple); auto *OtherValue = findValueOrDie(OtherNode); if (canSkipAddingToSets(OtherValue)) continue; bool Added; switch (directionOfEdgeType(Label)) { case Level::Above: Added = Builder.addAbove(CurValue, OtherValue); break; case Level::Below: Added = Builder.addBelow(CurValue, OtherValue); break; case Level::Same: Added = Builder.addWith(CurValue, OtherValue); break; } auto Aliasing = Weight.second; if (auto MaybeCurIndex = valueToAttrIndex(CurValue)) Aliasing.set(*MaybeCurIndex); if (auto MaybeOtherIndex = valueToAttrIndex(OtherValue)) Aliasing.set(*MaybeOtherIndex); Builder.noteAttributes(CurValue, Aliasing); Builder.noteAttributes(OtherValue, Aliasing); if (Added) Worklist.push_back(OtherNode); } } } // There are times when we end up with parameters not in our graph (i.e. if // it's only used as the condition of a branch). Other bits of code depend on // things that were present during construction being present in the graph. // So, we add all present arguments here. for (auto &Arg : Fn->args()) { if (!Builder.add(&Arg)) continue; auto Attrs = valueToAttrIndex(&Arg); if (Attrs.hasValue()) Builder.noteAttributes(&Arg, *Attrs); } return FunctionInfo(Builder.build(), std::move(ReturnedValues)); } void CFLAliasAnalysis::scan(Function *Fn) { auto InsertPair = Cache.insert(std::make_pair(Fn, Optional<FunctionInfo>())); (void)InsertPair; assert(InsertPair.second && "Trying to scan a function that has already been cached"); FunctionInfo Info(buildSetsFrom(*this, Fn)); Cache[Fn] = std::move(Info); Handles.push_front(FunctionHandle(Fn, this)); } AliasResult CFLAliasAnalysis::query(const MemoryLocation &LocA, const MemoryLocation &LocB) { auto *ValA = const_cast<Value *>(LocA.Ptr); auto *ValB = const_cast<Value *>(LocB.Ptr); Function *Fn = nullptr; auto MaybeFnA = parentFunctionOfValue(ValA); auto MaybeFnB = parentFunctionOfValue(ValB); if (!MaybeFnA.hasValue() && !MaybeFnB.hasValue()) { // The only times this is known to happen are when globals + InlineAsm // are involved DEBUG(dbgs() << "CFLAA: could not extract parent function information.\n"); return MayAlias; } if (MaybeFnA.hasValue()) { Fn = *MaybeFnA; assert((!MaybeFnB.hasValue() || *MaybeFnB == *MaybeFnA) && "Interprocedural queries not supported"); } else { Fn = *MaybeFnB; } assert(Fn != nullptr); auto &MaybeInfo = ensureCached(Fn); assert(MaybeInfo.hasValue()); auto &Sets = MaybeInfo->Sets; auto MaybeA = Sets.find(ValA); if (!MaybeA.hasValue()) return MayAlias; auto MaybeB = Sets.find(ValB); if (!MaybeB.hasValue()) return MayAlias; auto SetA = *MaybeA; auto SetB = *MaybeB; auto AttrsA = Sets.getLink(SetA.Index).Attrs; auto AttrsB = Sets.getLink(SetB.Index).Attrs; // Stratified set attributes are used as markets to signify whether a member // of a StratifiedSet (or a member of a set above the current set) has // interacted with either arguments or globals. "Interacted with" meaning // its value may be different depending on the value of an argument or // global. The thought behind this is that, because arguments and globals // may alias each other, if AttrsA and AttrsB have touched args/globals, // we must conservatively say that they alias. However, if at least one of // the sets has no values that could legally be altered by changing the value // of an argument or global, then we don't have to be as conservative. if (AttrsA.any() && AttrsB.any()) return MayAlias; // We currently unify things even if the accesses to them may not be in // bounds, so we can't return partial alias here because we don't // know whether the pointer is really within the object or not. // IE Given an out of bounds GEP and an alloca'd pointer, we may // unify the two. We can't return partial alias for this case. // Since we do not currently track enough information to // differentiate if (SetA.Index == SetB.Index) return MayAlias; return NoAlias; } bool CFLAliasAnalysis::doInitialization(Module &M) { InitializeAliasAnalysis(this, &M.getDataLayout()); return true; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/LoopInfo.cpp
//===- LoopInfo.cpp - Natural Loop Calculator -----------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the LoopInfo class that is used to identify natural loops // and determine the loop depth of various nodes of the CFG. Note that the // loops identified may actually be several natural loops that share the same // header node... not just a single natural loop. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/LoopInfo.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Analysis/LoopInfoImpl.h" #include "llvm/Analysis/LoopIterator.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/PassManager.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> using namespace llvm; // Explicitly instantiate methods in LoopInfoImpl.h for IR-level Loops. template class llvm::LoopBase<BasicBlock, Loop>; template class llvm::LoopInfoBase<BasicBlock, Loop>; // Always verify loopinfo if expensive checking is enabled. #ifdef XDEBUG static bool VerifyLoopInfo = true; #else static bool VerifyLoopInfo = false; #endif #if 0 // HLSL Change Starts - option pending static cl::opt<bool,true> VerifyLoopInfoX("verify-loop-info", cl::location(VerifyLoopInfo), cl::desc("Verify loop info (time consuming)")); #else #endif // HLSL Change Ends // Loop identifier metadata name. static const char *const LoopMDName = "llvm.loop"; //===----------------------------------------------------------------------===// // Loop implementation // /// isLoopInvariant - Return true if the specified value is loop invariant /// bool Loop::isLoopInvariant(const Value *V) const { if (const Instruction *I = dyn_cast<Instruction>(V)) return !contains(I); return true; // All non-instructions are loop invariant } /// hasLoopInvariantOperands - Return true if all the operands of the /// specified instruction are loop invariant. bool Loop::hasLoopInvariantOperands(const Instruction *I) const { return all_of(I->operands(), [this](Value *V) { return isLoopInvariant(V); }); } /// makeLoopInvariant - If the given value is an instruciton inside of the /// loop and it can be hoisted, do so to make it trivially loop-invariant. /// Return true if the value after any hoisting is loop invariant. This /// function can be used as a slightly more aggressive replacement for /// isLoopInvariant. /// /// If InsertPt is specified, it is the point to hoist instructions to. /// If null, the terminator of the loop preheader is used. /// bool Loop::makeLoopInvariant(Value *V, bool &Changed, Instruction *InsertPt) const { if (Instruction *I = dyn_cast<Instruction>(V)) return makeLoopInvariant(I, Changed, InsertPt); return true; // All non-instructions are loop-invariant. } /// makeLoopInvariant - If the given instruction is inside of the /// loop and it can be hoisted, do so to make it trivially loop-invariant. /// Return true if the instruction after any hoisting is loop invariant. This /// function can be used as a slightly more aggressive replacement for /// isLoopInvariant. /// /// If InsertPt is specified, it is the point to hoist instructions to. /// If null, the terminator of the loop preheader is used. /// bool Loop::makeLoopInvariant(Instruction *I, bool &Changed, Instruction *InsertPt) const { // Test if the value is already loop-invariant. if (isLoopInvariant(I)) return true; if (!isSafeToSpeculativelyExecute(I)) return false; if (I->mayReadFromMemory()) return false; // The landingpad instruction is immobile. if (isa<LandingPadInst>(I)) return false; // Determine the insertion point, unless one was given. if (!InsertPt) { BasicBlock *Preheader = getLoopPreheader(); // Without a preheader, hoisting is not feasible. if (!Preheader) return false; InsertPt = Preheader->getTerminator(); } // Don't hoist instructions with loop-variant operands. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) if (!makeLoopInvariant(I->getOperand(i), Changed, InsertPt)) return false; // Hoist. I->moveBefore(InsertPt); Changed = true; return true; } /// getCanonicalInductionVariable - Check to see if the loop has a canonical /// induction variable: an integer recurrence that starts at 0 and increments /// by one each time through the loop. If so, return the phi node that /// corresponds to it. /// /// The IndVarSimplify pass transforms loops to have a canonical induction /// variable. /// PHINode *Loop::getCanonicalInductionVariable() const { BasicBlock *H = getHeader(); BasicBlock *Incoming = nullptr, *Backedge = nullptr; pred_iterator PI = pred_begin(H); assert(PI != pred_end(H) && "Loop must have at least one backedge!"); Backedge = *PI++; if (PI == pred_end(H)) return nullptr; // dead loop Incoming = *PI++; if (PI != pred_end(H)) return nullptr; // multiple backedges? if (contains(Incoming)) { if (contains(Backedge)) return nullptr; std::swap(Incoming, Backedge); } else if (!contains(Backedge)) return nullptr; // Loop over all of the PHI nodes, looking for a canonical indvar. for (BasicBlock::iterator I = H->begin(); isa<PHINode>(I); ++I) { PHINode *PN = cast<PHINode>(I); if (ConstantInt *CI = dyn_cast<ConstantInt>(PN->getIncomingValueForBlock(Incoming))) if (CI->isNullValue()) if (Instruction *Inc = dyn_cast<Instruction>(PN->getIncomingValueForBlock(Backedge))) if (Inc->getOpcode() == Instruction::Add && Inc->getOperand(0) == PN) if (ConstantInt *CI = dyn_cast<ConstantInt>(Inc->getOperand(1))) if (CI->equalsInt(1)) return PN; } return nullptr; } /// isLCSSAForm - Return true if the Loop is in LCSSA form bool Loop::isLCSSAForm(DominatorTree &DT) const { for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) { BasicBlock *BB = *BI; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I) for (Use &U : I->uses()) { Instruction *UI = cast<Instruction>(U.getUser()); BasicBlock *UserBB = UI->getParent(); if (PHINode *P = dyn_cast<PHINode>(UI)) UserBB = P->getIncomingBlock(U); // Check the current block, as a fast-path, before checking whether // the use is anywhere in the loop. Most values are used in the same // block they are defined in. Also, blocks not reachable from the // entry are special; uses in them don't need to go through PHIs. if (UserBB != BB && !contains(UserBB) && DT.isReachableFromEntry(UserBB)) return false; } } return true; } /// isLoopSimplifyForm - Return true if the Loop is in the form that /// the LoopSimplify form transforms loops to, which is sometimes called /// normal form. bool Loop::isLoopSimplifyForm() const { // Normal-form loops have a preheader, a single backedge, and all of their // exits have all their predecessors inside the loop. return getLoopPreheader() && getLoopLatch() && hasDedicatedExits(); } /// isSafeToClone - Return true if the loop body is safe to clone in practice. /// Routines that reform the loop CFG and split edges often fail on indirectbr. bool Loop::isSafeToClone() const { // Return false if any loop blocks contain indirectbrs, or there are any calls // to noduplicate functions. for (Loop::block_iterator I = block_begin(), E = block_end(); I != E; ++I) { if (isa<IndirectBrInst>((*I)->getTerminator())) return false; if (const InvokeInst *II = dyn_cast<InvokeInst>((*I)->getTerminator())) if (II->cannotDuplicate()) return false; for (BasicBlock::iterator BI = (*I)->begin(), BE = (*I)->end(); BI != BE; ++BI) { if (const CallInst *CI = dyn_cast<CallInst>(BI)) { if (CI->cannotDuplicate()) return false; } } } return true; } MDNode *Loop::getLoopID() const { MDNode *LoopID = nullptr; if (isLoopSimplifyForm()) { LoopID = getLoopLatch()->getTerminator()->getMetadata(LoopMDName); } else { // Go through each predecessor of the loop header and check the // terminator for the metadata. BasicBlock *H = getHeader(); for (block_iterator I = block_begin(), IE = block_end(); I != IE; ++I) { TerminatorInst *TI = (*I)->getTerminator(); MDNode *MD = nullptr; // Check if this terminator branches to the loop header. for (unsigned i = 0, ie = TI->getNumSuccessors(); i != ie; ++i) { if (TI->getSuccessor(i) == H) { MD = TI->getMetadata(LoopMDName); break; } } if (!MD) return nullptr; if (!LoopID) LoopID = MD; else if (MD != LoopID) return nullptr; } } if (!LoopID || LoopID->getNumOperands() == 0 || LoopID->getOperand(0) != LoopID) return nullptr; return LoopID; } void Loop::setLoopID(MDNode *LoopID) const { assert(LoopID && "Loop ID should not be null"); assert(LoopID->getNumOperands() > 0 && "Loop ID needs at least one operand"); assert(LoopID->getOperand(0) == LoopID && "Loop ID should refer to itself"); if (isLoopSimplifyForm()) { getLoopLatch()->getTerminator()->setMetadata(LoopMDName, LoopID); return; } BasicBlock *H = getHeader(); for (block_iterator I = block_begin(), IE = block_end(); I != IE; ++I) { TerminatorInst *TI = (*I)->getTerminator(); for (unsigned i = 0, ie = TI->getNumSuccessors(); i != ie; ++i) { if (TI->getSuccessor(i) == H) TI->setMetadata(LoopMDName, LoopID); } } } bool Loop::isAnnotatedParallel() const { MDNode *desiredLoopIdMetadata = getLoopID(); if (!desiredLoopIdMetadata) return false; // The loop branch contains the parallel loop metadata. In order to ensure // that any parallel-loop-unaware optimization pass hasn't added loop-carried // dependencies (thus converted the loop back to a sequential loop), check // that all the memory instructions in the loop contain parallelism metadata // that point to the same unique "loop id metadata" the loop branch does. for (block_iterator BB = block_begin(), BE = block_end(); BB != BE; ++BB) { for (BasicBlock::iterator II = (*BB)->begin(), EE = (*BB)->end(); II != EE; II++) { if (!II->mayReadOrWriteMemory()) continue; // The memory instruction can refer to the loop identifier metadata // directly or indirectly through another list metadata (in case of // nested parallel loops). The loop identifier metadata refers to // itself so we can check both cases with the same routine. MDNode *loopIdMD = II->getMetadata(LLVMContext::MD_mem_parallel_loop_access); if (!loopIdMD) return false; bool loopIdMDFound = false; for (unsigned i = 0, e = loopIdMD->getNumOperands(); i < e; ++i) { if (loopIdMD->getOperand(i) == desiredLoopIdMetadata) { loopIdMDFound = true; break; } } if (!loopIdMDFound) return false; } } return true; } /// hasDedicatedExits - Return true if no exit block for the loop /// has a predecessor that is outside the loop. bool Loop::hasDedicatedExits() const { // Each predecessor of each exit block of a normal loop is contained // within the loop. SmallVector<BasicBlock *, 4> ExitBlocks; getExitBlocks(ExitBlocks); for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) for (pred_iterator PI = pred_begin(ExitBlocks[i]), PE = pred_end(ExitBlocks[i]); PI != PE; ++PI) if (!contains(*PI)) return false; // All the requirements are met. return true; } /// getUniqueExitBlocks - Return all unique successor blocks of this loop. /// These are the blocks _outside of the current loop_ which are branched to. /// This assumes that loop exits are in canonical form. /// void Loop::getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const { assert(hasDedicatedExits() && "getUniqueExitBlocks assumes the loop has canonical form exits!"); SmallVector<BasicBlock *, 32> switchExitBlocks; for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI) { BasicBlock *current = *BI; switchExitBlocks.clear(); for (succ_iterator I = succ_begin(*BI), E = succ_end(*BI); I != E; ++I) { // If block is inside the loop then it is not a exit block. if (contains(*I)) continue; pred_iterator PI = pred_begin(*I); BasicBlock *firstPred = *PI; // If current basic block is this exit block's first predecessor // then only insert exit block in to the output ExitBlocks vector. // This ensures that same exit block is not inserted twice into // ExitBlocks vector. if (current != firstPred) continue; // If a terminator has more then two successors, for example SwitchInst, // then it is possible that there are multiple edges from current block // to one exit block. if (std::distance(succ_begin(current), succ_end(current)) <= 2) { ExitBlocks.push_back(*I); continue; } // In case of multiple edges from current block to exit block, collect // only one edge in ExitBlocks. Use switchExitBlocks to keep track of // duplicate edges. if (std::find(switchExitBlocks.begin(), switchExitBlocks.end(), *I) == switchExitBlocks.end()) { switchExitBlocks.push_back(*I); ExitBlocks.push_back(*I); } } } } /// getUniqueExitBlock - If getUniqueExitBlocks would return exactly one /// block, return that block. Otherwise return null. BasicBlock *Loop::getUniqueExitBlock() const { SmallVector<BasicBlock *, 8> UniqueExitBlocks; getUniqueExitBlocks(UniqueExitBlocks); if (UniqueExitBlocks.size() == 1) return UniqueExitBlocks[0]; return nullptr; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Loop::dump() const { print(dbgs()); } #endif //===----------------------------------------------------------------------===// // UnloopUpdater implementation // namespace { /// Find the new parent loop for all blocks within the "unloop" whose last /// backedges has just been removed. class UnloopUpdater { Loop *Unloop; LoopInfo *LI; LoopBlocksDFS DFS; // Map unloop's immediate subloops to their nearest reachable parents. Nested // loops within these subloops will not change parents. However, an immediate // subloop's new parent will be the nearest loop reachable from either its own // exits *or* any of its nested loop's exits. DenseMap<Loop*, Loop*> SubloopParents; // Flag the presence of an irreducible backedge whose destination is a block // directly contained by the original unloop. bool FoundIB; public: UnloopUpdater(Loop *UL, LoopInfo *LInfo) : Unloop(UL), LI(LInfo), DFS(UL), FoundIB(false) {} void updateBlockParents(); void removeBlocksFromAncestors(); void updateSubloopParents(); protected: Loop *getNearestLoop(BasicBlock *BB, Loop *BBLoop); }; } // end anonymous namespace /// updateBlockParents - Update the parent loop for all blocks that are directly /// contained within the original "unloop". void UnloopUpdater::updateBlockParents() { if (Unloop->getNumBlocks()) { // Perform a post order CFG traversal of all blocks within this loop, // propagating the nearest loop from sucessors to predecessors. LoopBlocksTraversal Traversal(DFS, LI); for (LoopBlocksTraversal::POTIterator POI = Traversal.begin(), POE = Traversal.end(); POI != POE; ++POI) { Loop *L = LI->getLoopFor(*POI); Loop *NL = getNearestLoop(*POI, L); if (NL != L) { // For reducible loops, NL is now an ancestor of Unloop. assert((NL != Unloop && (!NL || NL->contains(Unloop))) && "uninitialized successor"); LI->changeLoopFor(*POI, NL); } else { // Or the current block is part of a subloop, in which case its parent // is unchanged. assert((FoundIB || Unloop->contains(L)) && "uninitialized successor"); } } } // Each irreducible loop within the unloop induces a round of iteration using // the DFS result cached by Traversal. bool Changed = FoundIB; for (unsigned NIters = 0; Changed; ++NIters) { assert(NIters < Unloop->getNumBlocks() && "runaway iterative algorithm"); // Iterate over the postorder list of blocks, propagating the nearest loop // from successors to predecessors as before. Changed = false; for (LoopBlocksDFS::POIterator POI = DFS.beginPostorder(), POE = DFS.endPostorder(); POI != POE; ++POI) { Loop *L = LI->getLoopFor(*POI); Loop *NL = getNearestLoop(*POI, L); if (NL != L) { assert(NL != Unloop && (!NL || NL->contains(Unloop)) && "uninitialized successor"); LI->changeLoopFor(*POI, NL); Changed = true; } } } } /// removeBlocksFromAncestors - Remove unloop's blocks from all ancestors below /// their new parents. void UnloopUpdater::removeBlocksFromAncestors() { // Remove all unloop's blocks (including those in nested subloops) from // ancestors below the new parent loop. for (Loop::block_iterator BI = Unloop->block_begin(), BE = Unloop->block_end(); BI != BE; ++BI) { Loop *OuterParent = LI->getLoopFor(*BI); if (Unloop->contains(OuterParent)) { while (OuterParent->getParentLoop() != Unloop) OuterParent = OuterParent->getParentLoop(); OuterParent = SubloopParents[OuterParent]; } // Remove blocks from former Ancestors except Unloop itself which will be // deleted. for (Loop *OldParent = Unloop->getParentLoop(); OldParent != OuterParent; OldParent = OldParent->getParentLoop()) { assert(OldParent && "new loop is not an ancestor of the original"); OldParent->removeBlockFromLoop(*BI); } } } /// updateSubloopParents - Update the parent loop for all subloops directly /// nested within unloop. void UnloopUpdater::updateSubloopParents() { while (!Unloop->empty()) { Loop *Subloop = *std::prev(Unloop->end()); Unloop->removeChildLoop(std::prev(Unloop->end())); assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop"); if (Loop *Parent = SubloopParents[Subloop]) Parent->addChildLoop(Subloop); else LI->addTopLevelLoop(Subloop); } } /// getNearestLoop - Return the nearest parent loop among this block's /// successors. If a successor is a subloop header, consider its parent to be /// the nearest parent of the subloop's exits. /// /// For subloop blocks, simply update SubloopParents and return NULL. Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) { // Initially for blocks directly contained by Unloop, NearLoop == Unloop and // is considered uninitialized. Loop *NearLoop = BBLoop; Loop *Subloop = nullptr; if (NearLoop != Unloop && Unloop->contains(NearLoop)) { Subloop = NearLoop; // Find the subloop ancestor that is directly contained within Unloop. while (Subloop->getParentLoop() != Unloop) { Subloop = Subloop->getParentLoop(); assert(Subloop && "subloop is not an ancestor of the original loop"); } // Get the current nearest parent of the Subloop exits, initially Unloop. NearLoop = SubloopParents.insert(std::make_pair(Subloop, Unloop)).first->second; } succ_iterator I = succ_begin(BB), E = succ_end(BB); if (I == E) { assert(!Subloop && "subloop blocks must have a successor"); NearLoop = nullptr; // unloop blocks may now exit the function. } for (; I != E; ++I) { if (*I == BB) continue; // self loops are uninteresting Loop *L = LI->getLoopFor(*I); if (L == Unloop) { // This successor has not been processed. This path must lead to an // irreducible backedge. assert((FoundIB || !DFS.hasPostorder(*I)) && "should have seen IB"); FoundIB = true; } if (L != Unloop && Unloop->contains(L)) { // Successor is in a subloop. if (Subloop) continue; // Branching within subloops. Ignore it. // BB branches from the original into a subloop header. assert(L->getParentLoop() == Unloop && "cannot skip into nested loops"); // Get the current nearest parent of the Subloop's exits. L = SubloopParents[L]; // L could be Unloop if the only exit was an irreducible backedge. } if (L == Unloop) { continue; } // Handle critical edges from Unloop into a sibling loop. if (L && !L->contains(Unloop)) { L = L->getParentLoop(); } // Remember the nearest parent loop among successors or subloop exits. if (NearLoop == Unloop || !NearLoop || NearLoop->contains(L)) NearLoop = L; } if (Subloop) { SubloopParents[Subloop] = NearLoop; return BBLoop; } return NearLoop; } /// updateUnloop - The last backedge has been removed from a loop--now the /// "unloop". Find a new parent for the blocks contained within unloop and /// update the loop tree. We don't necessarily have valid dominators at this /// point, but LoopInfo is still valid except for the removal of this loop. /// /// Note that Unloop may now be an empty loop. Calling Loop::getHeader without /// checking first is illegal. void LoopInfo::updateUnloop(Loop *Unloop) { // First handle the special case of no parent loop to simplify the algorithm. if (!Unloop->getParentLoop()) { // Since BBLoop had no parent, Unloop blocks are no longer in a loop. for (Loop::block_iterator I = Unloop->block_begin(), E = Unloop->block_end(); I != E; ++I) { // Don't reparent blocks in subloops. if (getLoopFor(*I) != Unloop) continue; // Blocks no longer have a parent but are still referenced by Unloop until // the Unloop object is deleted. changeLoopFor(*I, nullptr); } // Remove the loop from the top-level LoopInfo object. for (iterator I = begin();; ++I) { assert(I != end() && "Couldn't find loop"); if (*I == Unloop) { removeLoop(I); break; } } // Move all of the subloops to the top-level. while (!Unloop->empty()) addTopLevelLoop(Unloop->removeChildLoop(std::prev(Unloop->end()))); return; } // Update the parent loop for all blocks within the loop. Blocks within // subloops will not change parents. UnloopUpdater Updater(Unloop, this); Updater.updateBlockParents(); // Remove blocks from former ancestor loops. Updater.removeBlocksFromAncestors(); // Add direct subloops as children in their new parent loop. Updater.updateSubloopParents(); // Remove unloop from its parent loop. Loop *ParentLoop = Unloop->getParentLoop(); for (Loop::iterator I = ParentLoop->begin();; ++I) { assert(I != ParentLoop->end() && "Couldn't find loop"); if (*I == Unloop) { ParentLoop->removeChildLoop(I); break; } } } char LoopAnalysis::PassID; LoopInfo LoopAnalysis::run(Function &F, AnalysisManager<Function> *AM) { // FIXME: Currently we create a LoopInfo from scratch for every function. // This may prove to be too wasteful due to deallocating and re-allocating // memory each time for the underlying map and vector datastructures. At some // point it may prove worthwhile to use a freelist and recycle LoopInfo // objects. I don't want to add that kind of complexity until the scope of // the problem is better understood. LoopInfo LI; LI.Analyze(AM->getResult<DominatorTreeAnalysis>(F)); return LI; } PreservedAnalyses LoopPrinterPass::run(Function &F, AnalysisManager<Function> *AM) { AM->getResult<LoopAnalysis>(F).print(OS); return PreservedAnalyses::all(); } //===----------------------------------------------------------------------===// // LoopInfo implementation // char LoopInfoWrapperPass::ID = 0; INITIALIZE_PASS_BEGIN(LoopInfoWrapperPass, "loops", "Natural Loop Information", true, true) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(LoopInfoWrapperPass, "loops", "Natural Loop Information", true, true) bool LoopInfoWrapperPass::runOnFunction(Function &) { releaseMemory(); LI.Analyze(getAnalysis<DominatorTreeWrapperPass>().getDomTree()); return false; } void LoopInfoWrapperPass::verifyAnalysis() const { // LoopInfoWrapperPass is a FunctionPass, but verifying every loop in the // function each time verifyAnalysis is called is very expensive. The // -verify-loop-info option can enable this. In order to perform some // checking by default, LoopPass has been taught to call verifyLoop manually // during loop pass sequences. if (VerifyLoopInfo) LI.verify(); } void LoopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequired<DominatorTreeWrapperPass>(); } void LoopInfoWrapperPass::print(raw_ostream &OS, const Module *) const { LI.print(OS); } //===----------------------------------------------------------------------===// // LoopBlocksDFS implementation // /// Traverse the loop blocks and store the DFS result. /// Useful for clients that just want the final DFS result and don't need to /// visit blocks during the initial traversal. void LoopBlocksDFS::perform(LoopInfo *LI) { LoopBlocksTraversal Traversal(*this, LI); for (LoopBlocksTraversal::POTIterator POI = Traversal.begin(), POE = Traversal.end(); POI != POE; ++POI) ; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/IteratedDominanceFrontier.cpp
//===- IteratedDominanceFrontier.cpp - Compute IDF ------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // /// \brief Compute iterated dominance frontiers using a linear time algorithm. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/IteratedDominanceFrontier.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Dominators.h" #include <queue> using namespace llvm; void IDFCalculator::calculate(SmallVectorImpl<BasicBlock *> &PHIBlocks) { // If we haven't computed dominator tree levels, do so now. if (DomLevels.empty()) { for (auto DFI = df_begin(DT.getRootNode()), DFE = df_end(DT.getRootNode()); DFI != DFE; ++DFI) { DomLevels[*DFI] = DFI.getPathLength() - 1; } } // Use a priority queue keyed on dominator tree level so that inserted nodes // are handled from the bottom of the dominator tree upwards. typedef std::pair<DomTreeNode *, unsigned> DomTreeNodePair; typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>, less_second> IDFPriorityQueue; IDFPriorityQueue PQ; for (BasicBlock *BB : *DefBlocks) { if (DomTreeNode *Node = DT.getNode(BB)) PQ.push(std::make_pair(Node, DomLevels.lookup(Node))); } SmallVector<DomTreeNode *, 32> Worklist; SmallPtrSet<DomTreeNode *, 32> VisitedPQ; SmallPtrSet<DomTreeNode *, 32> VisitedWorklist; while (!PQ.empty()) { DomTreeNodePair RootPair = PQ.top(); PQ.pop(); DomTreeNode *Root = RootPair.first; unsigned RootLevel = RootPair.second; // Walk all dominator tree children of Root, inspecting their CFG edges with // targets elsewhere on the dominator tree. Only targets whose level is at // most Root's level are added to the iterated dominance frontier of the // definition set. Worklist.clear(); Worklist.push_back(Root); VisitedWorklist.insert(Root); while (!Worklist.empty()) { DomTreeNode *Node = Worklist.pop_back_val(); BasicBlock *BB = Node->getBlock(); for (auto Succ : successors(BB)) { DomTreeNode *SuccNode = DT.getNode(Succ); // Quickly skip all CFG edges that are also dominator tree edges instead // of catching them below. if (SuccNode->getIDom() == Node) continue; unsigned SuccLevel = DomLevels.lookup(SuccNode); if (SuccLevel > RootLevel) continue; if (!VisitedPQ.insert(SuccNode).second) continue; BasicBlock *SuccBB = SuccNode->getBlock(); if (useLiveIn && !LiveInBlocks->count(SuccBB)) continue; PHIBlocks.emplace_back(SuccBB); if (!DefBlocks->count(SuccBB)) PQ.push(std::make_pair(SuccNode, SuccLevel)); } for (auto DomChild : *Node) { if (VisitedWorklist.insert(DomChild).second) Worklist.push_back(DomChild); } } } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/ReducibilityAnalysis.cpp
/////////////////////////////////////////////////////////////////////////////// // // // ReducibilityAnalysis.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/Analysis/ReducibilityAnalysis.h" #include "dxc/Support/Global.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Function.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <unordered_map> #include <unordered_set> #include <vector> using namespace llvm; using llvm::legacy::FunctionPassManager; using llvm::legacy::PassManager; using std::unordered_map; using std::unordered_set; using std::vector; #define DEBUG_TYPE "reducibility" //===----------------------------------------------------------------------===// // Reducibility Analysis Pass // // The pass implements T1-T2 graph reducibility test. // The algorithm can be found in "Engineering a Compiler" text by // Keith Cooper and Linda Torczon. // //===----------------------------------------------------------------------===// namespace ReducibilityAnalysisNS { class ReducibilityAnalysis : public FunctionPass { public: static char ID; ReducibilityAnalysis() : FunctionPass(ID), m_Action(IrreducibilityAction::ThrowException), m_bReducible(true) {} explicit ReducibilityAnalysis(IrreducibilityAction Action) : FunctionPass(ID), m_Action(Action), m_bReducible(true) {} virtual bool runOnFunction(Function &F); virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); } bool IsReducible() const { return m_bReducible; } private: IrreducibilityAction m_Action; bool m_bReducible; }; char ReducibilityAnalysis::ID = 0; struct Node { typedef unordered_set<unsigned> IdxSet; IdxSet m_Succ; IdxSet m_Pred; }; class NodeWorklist { public: NodeWorklist(size_t MaxSize) : m_Size(0) { m_Data.resize(MaxSize); } size_t Size() const { return m_Size; } unsigned Get(size_t idx) const { return m_Data[idx]; } void PushBack(unsigned Val) { m_Data[m_Size++] = Val; } void Clear() { m_Size = 0; } private: size_t m_Size; vector<unsigned> m_Data; }; static bool IsEntryNode(size_t NodeIdx) { return NodeIdx == 0; } bool ReducibilityAnalysis::runOnFunction(Function &F) { m_bReducible = true; if (F.empty()) return false; IFTBOOL(F.size() < UINT32_MAX, DXC_E_DATA_TOO_LARGE); vector<Node> Nodes(F.size()); unordered_map<BasicBlock *, unsigned> BasicBlockToNodeIdxMap; // // Initialize. // unsigned iNode = 0; for (BasicBlock &BB : F) { BasicBlockToNodeIdxMap[&BB] = iNode++; } for (BasicBlock &BB : F) { BasicBlock *pBB = &BB; unsigned N = BasicBlockToNodeIdxMap[pBB]; for (succ_iterator itSucc = succ_begin(pBB), endSucc = succ_end(pBB); itSucc != endSucc; ++itSucc) { BasicBlock *pSuccBB = *itSucc; unsigned SuccNode = BasicBlockToNodeIdxMap[pSuccBB]; Nodes[N].m_Succ.insert(SuccNode); } for (pred_iterator itPred = pred_begin(pBB), endPred = pred_end(pBB); itPred != endPred; ++itPred) { BasicBlock *pPredBB = *itPred; unsigned PredNode = BasicBlockToNodeIdxMap[pPredBB]; Nodes[N].m_Pred.insert(PredNode); } } // // Reduce. // NodeWorklist Q1(Nodes.size()), Q2(Nodes.size()); NodeWorklist *pReady = &Q1, *pWaiting = &Q2; for (unsigned i = 0; i < Nodes.size(); i++) { pReady->PushBack(i); } for (;;) { bool bChanged = false; pWaiting->Clear(); for (unsigned iNode = 0; iNode < pReady->Size(); iNode++) { unsigned N = pReady->Get(iNode); Node *pNode = &Nodes[N]; // T1: self-edge. auto itSucc = pNode->m_Succ.find(N); if (itSucc != pNode->m_Succ.end()) { pWaiting->PushBack(N); pNode->m_Succ.erase(itSucc); auto s1 = pNode->m_Pred.erase(N); DXASSERT_LOCALVAR(s1, s1 == 1, "otherwise check Pred/Succ sets"); bChanged = true; continue; } // T2: single predecessor. if (pNode->m_Pred.size() == 1) { unsigned PredNode = *pNode->m_Pred.begin(); Node *pPredNode = &Nodes[PredNode]; auto s1 = pPredNode->m_Succ.erase(N); DXASSERT_LOCALVAR(s1, s1 == 1, "otherwise check Pred/Succ sets"); // Do not update N's sets, as N is discarded and never looked at again. for (auto itSucc = pNode->m_Succ.begin(), endSucc = pNode->m_Succ.end(); itSucc != endSucc; ++itSucc) { unsigned SuccNode = *itSucc; Node *pSuccNode = &Nodes[SuccNode]; auto s2 = pSuccNode->m_Pred.erase(N); DXASSERT_LOCALVAR(s2, s2, "otherwise check Pred/Succ sets"); pPredNode->m_Succ.insert(SuccNode); pSuccNode->m_Pred.insert(PredNode); } bChanged = true; continue; } // Unreachable. if (pNode->m_Pred.size() == 0 && !IsEntryNode(N)) { for (auto itSucc = pNode->m_Succ.begin(), endSucc = pNode->m_Succ.end(); itSucc != endSucc; ++itSucc) { unsigned SuccNode = *itSucc; Node *pSuccNode = &Nodes[SuccNode]; auto s1 = pSuccNode->m_Pred.erase(N); DXASSERT_LOCALVAR(s1, s1, "otherwise check Pred/Succ sets"); } bChanged = true; continue; } // Could not reduce. pWaiting->PushBack(N); } if (pWaiting->Size() == 1) { break; } if (!bChanged) { m_bReducible = false; break; } std::swap(pReady, pWaiting); } if (!IsReducible()) { switch (m_Action) { case IrreducibilityAction::ThrowException: DEBUG(dbgs() << "Function '" << F.getName() << "' is irreducible. Aborting compilation.\n"); IFT(DXC_E_IRREDUCIBLE_CFG); break; case IrreducibilityAction::PrintLog: DEBUG(dbgs() << "Function '" << F.getName() << "' is irreducible\n"); break; case IrreducibilityAction::Ignore: break; default: DXASSERT(false, "otherwise incorrect action passed to the constructor"); } } return false; } } // namespace ReducibilityAnalysisNS using namespace ReducibilityAnalysisNS; // Publicly exposed interface to pass... char &llvm::ReducibilityAnalysisID = ReducibilityAnalysis::ID; INITIALIZE_PASS_BEGIN(ReducibilityAnalysis, "red", "Reducibility Analysis", true, true) INITIALIZE_PASS_END(ReducibilityAnalysis, "red", "Reducibility Analysis", true, true) namespace llvm { FunctionPass *createReducibilityAnalysisPass(IrreducibilityAction Action) { return new ReducibilityAnalysis(Action); } bool IsReducible(const Module &M, IrreducibilityAction Action) { PassManager PM; ReducibilityAnalysis *pRA = new ReducibilityAnalysis(Action); PM.add(pRA); PM.run(const_cast<Module &>(M)); return pRA->IsReducible(); } bool IsReducible(const Function &f, IrreducibilityAction Action) { Function &F = const_cast<Function &>(f); DXASSERT(!F.isDeclaration(), "otherwise the caller is asking to check an external function"); FunctionPassManager FPM(F.getParent()); ReducibilityAnalysis *pRA = new ReducibilityAnalysis(Action); FPM.add(pRA); FPM.doInitialization(); FPM.run(F); return pRA->IsReducible(); } } // namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/SparsePropagation.cpp
//===- SparsePropagation.cpp - Sparse Conditional Property Propagation ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements an abstract sparse conditional propagation algorithm, // modeled after SCCP, but with a customizable lattice function. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/SparsePropagation.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "sparseprop" //===----------------------------------------------------------------------===// // AbstractLatticeFunction Implementation //===----------------------------------------------------------------------===// AbstractLatticeFunction::~AbstractLatticeFunction() {} /// PrintValue - Render the specified lattice value to the specified stream. void AbstractLatticeFunction::PrintValue(LatticeVal V, raw_ostream &OS) { if (V == UndefVal) OS << "undefined"; else if (V == OverdefinedVal) OS << "overdefined"; else if (V == UntrackedVal) OS << "untracked"; else OS << "unknown lattice value"; } //===----------------------------------------------------------------------===// // SparseSolver Implementation //===----------------------------------------------------------------------===// /// getOrInitValueState - Return the LatticeVal object that corresponds to the /// value, initializing the value's state if it hasn't been entered into the /// map yet. This function is necessary because not all values should start /// out in the underdefined state... Arguments should be overdefined, and /// constants should be marked as constants. /// SparseSolver::LatticeVal SparseSolver::getOrInitValueState(Value *V) { DenseMap<Value*, LatticeVal>::iterator I = ValueState.find(V); if (I != ValueState.end()) return I->second; // Common case, in the map LatticeVal LV; if (LatticeFunc->IsUntrackedValue(V)) return LatticeFunc->getUntrackedVal(); else if (Constant *C = dyn_cast<Constant>(V)) LV = LatticeFunc->ComputeConstant(C); else if (Argument *A = dyn_cast<Argument>(V)) LV = LatticeFunc->ComputeArgument(A); else if (!isa<Instruction>(V)) // All other non-instructions are overdefined. LV = LatticeFunc->getOverdefinedVal(); else // All instructions are underdefined by default. LV = LatticeFunc->getUndefVal(); // If this value is untracked, don't add it to the map. if (LV == LatticeFunc->getUntrackedVal()) return LV; return ValueState[V] = LV; } /// UpdateState - When the state for some instruction is potentially updated, /// this function notices and adds I to the worklist if needed. void SparseSolver::UpdateState(Instruction &Inst, LatticeVal V) { DenseMap<Value*, LatticeVal>::iterator I = ValueState.find(&Inst); if (I != ValueState.end() && I->second == V) return; // No change. // An update. Visit uses of I. ValueState[&Inst] = V; InstWorkList.push_back(&Inst); } /// MarkBlockExecutable - This method can be used by clients to mark all of /// the blocks that are known to be intrinsically live in the processed unit. void SparseSolver::MarkBlockExecutable(BasicBlock *BB) { DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n"); BBExecutable.insert(BB); // Basic block is executable! BBWorkList.push_back(BB); // Add the block to the work list! } /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB /// work list if it is not already executable... void SparseSolver::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) { if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second) return; // This edge is already known to be executable! DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() << " -> " << Dest->getName() << "\n"); if (BBExecutable.count(Dest)) { // The destination is already executable, but we just made an edge // feasible that wasn't before. Revisit the PHI nodes in the block // because they have potentially new operands. for (BasicBlock::iterator I = Dest->begin(); isa<PHINode>(I); ++I) visitPHINode(*cast<PHINode>(I)); } else { MarkBlockExecutable(Dest); } } /// getFeasibleSuccessors - Return a vector of booleans to indicate which /// successors are reachable from a given terminator instruction. void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs, bool AggressiveUndef) { Succs.resize(TI.getNumSuccessors()); if (TI.getNumSuccessors() == 0) return; if (BranchInst *BI = dyn_cast<BranchInst>(&TI)) { if (BI->isUnconditional()) { Succs[0] = true; return; } LatticeVal BCValue; if (AggressiveUndef) BCValue = getOrInitValueState(BI->getCondition()); else BCValue = getLatticeState(BI->getCondition()); if (BCValue == LatticeFunc->getOverdefinedVal() || BCValue == LatticeFunc->getUntrackedVal()) { // Overdefined condition variables can branch either way. Succs[0] = Succs[1] = true; return; } // If undefined, neither is feasible yet. if (BCValue == LatticeFunc->getUndefVal()) return; Constant *C = LatticeFunc->GetConstant(BCValue, BI->getCondition(), *this); if (!C || !isa<ConstantInt>(C)) { // Non-constant values can go either way. Succs[0] = Succs[1] = true; return; } // Constant condition variables mean the branch can only go a single way Succs[C->isNullValue()] = true; return; } if (isa<InvokeInst>(TI)) { // Invoke instructions successors are always executable. // TODO: Could ask the lattice function if the value can throw. Succs[0] = Succs[1] = true; return; } if (isa<IndirectBrInst>(TI)) { Succs.assign(Succs.size(), true); return; } SwitchInst &SI = cast<SwitchInst>(TI); LatticeVal SCValue; if (AggressiveUndef) SCValue = getOrInitValueState(SI.getCondition()); else SCValue = getLatticeState(SI.getCondition()); if (SCValue == LatticeFunc->getOverdefinedVal() || SCValue == LatticeFunc->getUntrackedVal()) { // All destinations are executable! Succs.assign(TI.getNumSuccessors(), true); return; } // If undefined, neither is feasible yet. if (SCValue == LatticeFunc->getUndefVal()) return; Constant *C = LatticeFunc->GetConstant(SCValue, SI.getCondition(), *this); if (!C || !isa<ConstantInt>(C)) { // All destinations are executable! Succs.assign(TI.getNumSuccessors(), true); return; } SwitchInst::CaseIt Case = SI.findCaseValue(cast<ConstantInt>(C)); Succs[Case.getSuccessorIndex()] = true; } /// isEdgeFeasible - Return true if the control flow edge from the 'From' /// basic block to the 'To' basic block is currently feasible... bool SparseSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To, bool AggressiveUndef) { SmallVector<bool, 16> SuccFeasible; TerminatorInst *TI = From->getTerminator(); getFeasibleSuccessors(*TI, SuccFeasible, AggressiveUndef); for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) if (TI->getSuccessor(i) == To && SuccFeasible[i]) return true; return false; } void SparseSolver::visitTerminatorInst(TerminatorInst &TI) { SmallVector<bool, 16> SuccFeasible; getFeasibleSuccessors(TI, SuccFeasible, true); BasicBlock *BB = TI.getParent(); // Mark all feasible successors executable... for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i) if (SuccFeasible[i]) markEdgeExecutable(BB, TI.getSuccessor(i)); } void SparseSolver::visitPHINode(PHINode &PN) { // The lattice function may store more information on a PHINode than could be // computed from its incoming values. For example, SSI form stores its sigma // functions as PHINodes with a single incoming value. if (LatticeFunc->IsSpecialCasedPHI(&PN)) { LatticeVal IV = LatticeFunc->ComputeInstructionState(PN, *this); if (IV != LatticeFunc->getUntrackedVal()) UpdateState(PN, IV); return; } LatticeVal PNIV = getOrInitValueState(&PN); LatticeVal Overdefined = LatticeFunc->getOverdefinedVal(); // If this value is already overdefined (common) just return. if (PNIV == Overdefined || PNIV == LatticeFunc->getUntrackedVal()) return; // Quick exit // Super-extra-high-degree PHI nodes are unlikely to ever be interesting, // and slow us down a lot. Just mark them overdefined. if (PN.getNumIncomingValues() > 64) { UpdateState(PN, Overdefined); return; } // Look at all of the executable operands of the PHI node. If any of them // are overdefined, the PHI becomes overdefined as well. Otherwise, ask the // transfer function to give us the merge of the incoming values. for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { // If the edge is not yet known to be feasible, it doesn't impact the PHI. if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent(), true)) continue; // Merge in this value. LatticeVal OpVal = getOrInitValueState(PN.getIncomingValue(i)); if (OpVal != PNIV) PNIV = LatticeFunc->MergeValues(PNIV, OpVal); if (PNIV == Overdefined) break; // Rest of input values don't matter. } // Update the PHI with the compute value, which is the merge of the inputs. UpdateState(PN, PNIV); } void SparseSolver::visitInst(Instruction &I) { // PHIs are handled by the propagation logic, they are never passed into the // transfer functions. if (PHINode *PN = dyn_cast<PHINode>(&I)) return visitPHINode(*PN); // Otherwise, ask the transfer function what the result is. If this is // something that we care about, remember it. LatticeVal IV = LatticeFunc->ComputeInstructionState(I, *this); if (IV != LatticeFunc->getUntrackedVal()) UpdateState(I, IV); if (TerminatorInst *TI = dyn_cast<TerminatorInst>(&I)) visitTerminatorInst(*TI); } void SparseSolver::Solve(Function &F) { MarkBlockExecutable(&F.getEntryBlock()); // Process the work lists until they are empty! while (!BBWorkList.empty() || !InstWorkList.empty()) { // Process the instruction work list. while (!InstWorkList.empty()) { Instruction *I = InstWorkList.back(); InstWorkList.pop_back(); DEBUG(dbgs() << "\nPopped off I-WL: " << *I << "\n"); // "I" got into the work list because it made a transition. See if any // users are both live and in need of updating. for (User *U : I->users()) { Instruction *UI = cast<Instruction>(U); if (BBExecutable.count(UI->getParent())) // Inst is executable? visitInst(*UI); } } // Process the basic block work list. while (!BBWorkList.empty()) { BasicBlock *BB = BBWorkList.back(); BBWorkList.pop_back(); DEBUG(dbgs() << "\nPopped off BBWL: " << *BB); // Notify all instructions in this basic block that they are newly // executable. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) visitInst(*I); } } } void SparseSolver::Print(Function &F, raw_ostream &OS) const { OS << "\nFUNCTION: " << F.getName() << "\n"; for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { if (!BBExecutable.count(BB)) OS << "INFEASIBLE: "; OS << "\t"; if (BB->hasName()) OS << BB->getName() << ":\n"; else OS << "; anon bb\n"; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { LatticeFunc->PrintValue(getLatticeState(I), OS); OS << *I << "\n"; } OS << "\n"; } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/CGSCCPassManager.cpp
//===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/CGSCCPassManager.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" using namespace llvm; char CGSCCAnalysisManagerModuleProxy::PassID; CGSCCAnalysisManagerModuleProxy::Result CGSCCAnalysisManagerModuleProxy::run(Module &M) { assert(CGAM->empty() && "CGSCC analyses ran prior to the module proxy!"); return Result(*CGAM); } CGSCCAnalysisManagerModuleProxy::Result::~Result() { // Clear out the analysis manager if we're being destroyed -- it means we // didn't even see an invalidate call when we got invalidated. CGAM->clear(); } bool CGSCCAnalysisManagerModuleProxy::Result::invalidate( Module &M, const PreservedAnalyses &PA) { // If this proxy isn't marked as preserved, then we can't even invalidate // individual CGSCC analyses, there may be an invalid set of SCC objects in // the cache making it impossible to incrementally preserve them. // Just clear the entire manager. if (!PA.preserved(ID())) CGAM->clear(); // Return false to indicate that this result is still a valid proxy. return false; } char ModuleAnalysisManagerCGSCCProxy::PassID; char FunctionAnalysisManagerCGSCCProxy::PassID; FunctionAnalysisManagerCGSCCProxy::Result FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C) { assert(FAM->empty() && "Function analyses ran prior to the CGSCC proxy!"); return Result(*FAM); } FunctionAnalysisManagerCGSCCProxy::Result::~Result() { // Clear out the analysis manager if we're being destroyed -- it means we // didn't even see an invalidate call when we got invalidated. FAM->clear(); } bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate( LazyCallGraph::SCC &C, const PreservedAnalyses &PA) { // If this proxy isn't marked as preserved, then we can't even invalidate // individual function analyses, there may be an invalid set of Function // objects in the cache making it impossible to incrementally preserve them. // Just clear the entire manager. if (!PA.preserved(ID())) FAM->clear(); // Return false to indicate that this result is still a valid proxy. return false; } char CGSCCAnalysisManagerFunctionProxy::PassID;
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/CFGPrinter.cpp
//===- CFGPrinter.cpp - DOT printer for the control flow graph ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a '-dot-cfg' analysis pass, which emits the // cfg.<fnname>.dot file for each function in the program, with a graph of the // CFG for that function. // // The other main feature of this file is that it implements the // Function::viewCFG method, which is useful for debugging passes which operate // on the CFG. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/CFGPrinter.h" #include "llvm/Pass.h" #include "llvm/Support/FileSystem.h" using namespace llvm; namespace { struct CFGViewer : public FunctionPass { static char ID; // Pass identifcation, replacement for typeid CFGViewer() : FunctionPass(ID) { // initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry()); // HLSL Change - initialize up front } bool runOnFunction(Function &F) override { // HLSL Change Starts if (OSOverride != nullptr) { *OSOverride << "\ngraph: " << "cfg" << F.getName() << ".dot\n"; llvm::WriteGraph(*OSOverride, (const Function*)&F, false, F.getName()); return false; } // HLSL Change Ends F.viewCFG(); return false; } void print(raw_ostream &OS, const Module* = nullptr) const override {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; } char CFGViewer::ID = 0; INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true) namespace { struct CFGOnlyViewer : public FunctionPass { static char ID; // Pass identifcation, replacement for typeid CFGOnlyViewer() : FunctionPass(ID) { // initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry()); // HLSL Change - initialize up front } bool runOnFunction(Function &F) override { // HLSL Change Starts if (OSOverride != nullptr) { *OSOverride << "\ngraph: " << "cfg" << F.getName() << ".dot\n"; llvm::WriteGraph(*OSOverride, (const Function*)&F, true, F.getName()); return false; } // HLSL Change Ends F.viewCFGOnly(); return false; } void print(raw_ostream &OS, const Module* = nullptr) const override {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; } char CFGOnlyViewer::ID = 0; INITIALIZE_PASS(CFGOnlyViewer, "view-cfg-only", "View CFG of function (with no function bodies)", false, true) namespace { struct CFGPrinter : public FunctionPass { static char ID; // Pass identification, replacement for typeid CFGPrinter() : FunctionPass(ID) { // initializeCFGPrinterPass(*PassRegistry::getPassRegistry()); // HLSL Change - initialize up front } bool runOnFunction(Function &F) override { // HLSL Change Starts if (OSOverride != nullptr) { *OSOverride << "\ngraph: " << "cfg." << F.getName() << ".dot\n"; llvm::WriteGraph(*OSOverride, (const Function*)&F, false, F.getName()); return false; } // HLSL Change Ends std::string Filename = ("cfg." + F.getName() + ".dot").str(); errs() << "Writing '" << Filename << "'..."; std::error_code EC; raw_fd_ostream File(Filename, EC, sys::fs::F_Text); if (!EC) WriteGraph(File, (const Function*)&F); else errs() << " error opening file for writing!"; errs() << "\n"; return false; } void print(raw_ostream &OS, const Module* = nullptr) const override {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; } char CFGPrinter::ID = 0; INITIALIZE_PASS(CFGPrinter, "dot-cfg", "Print CFG of function to 'dot' file", false, true) namespace { struct CFGOnlyPrinter : public FunctionPass { static char ID; // Pass identification, replacement for typeid CFGOnlyPrinter() : FunctionPass(ID) { // initializeCFGOnlyPrinterPass(*PassRegistry::getPassRegistry()); // HLSL Change - initialize up front } bool runOnFunction(Function &F) override { // HLSL Change Starts if (OSOverride != nullptr) { *OSOverride << "\ngraph: " << "cfg." << F.getName() << ".dot\n"; llvm::WriteGraph(*OSOverride, (const Function*)&F, true, F.getName()); return false; } // HLSL Change Ends std::string Filename = ("cfg." + F.getName() + ".dot").str(); errs() << "Writing '" << Filename << "'..."; std::error_code EC; raw_fd_ostream File(Filename, EC, sys::fs::F_Text); if (!EC) WriteGraph(File, (const Function*)&F, true); else errs() << " error opening file for writing!"; errs() << "\n"; return false; } void print(raw_ostream &OS, const Module* = nullptr) const override {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; } char CFGOnlyPrinter::ID = 0; INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only", "Print CFG of function to 'dot' file (with no function bodies)", false, true) /// viewCFG - This function is meant for use from the debugger. You can just /// say 'call F->viewCFG()' and a ghostview window should pop up from the /// program, displaying the CFG of the current function. This depends on there /// being a 'dot' and 'gv' program in your path. /// void Function::viewCFG() const { ViewGraph(this, "cfg" + getName()); } /// viewCFGOnly - This function is meant for use from the debugger. It works /// just like viewCFG, but it does not include the contents of basic blocks /// into the nodes, just the label. If you are only interested in the CFG /// this can make the graph smaller. /// void Function::viewCFGOnly() const { ViewGraph(this, "cfg" + getName(), true); } FunctionPass *llvm::createCFGPrinterPass () { return new CFGPrinter(); } FunctionPass *llvm::createCFGOnlyPrinterPass () { return new CFGOnlyPrinter(); } // HLSL Change Starts void llvm::initializeCFGPrinterPasses(PassRegistry &Registry) { initializeCFGPrinterPass(Registry); initializeCFGOnlyPrinterPass(Registry); initializeCFGViewerPass(Registry); initializeCFGOnlyViewerPass(Registry); } // HLSL Change Ends
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/AliasDebugger.cpp
//===- AliasDebugger.cpp - Simple Alias Analysis Use Checker --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This simple pass checks alias analysis users to ensure that if they // create a new value, they do not query AA without informing it of the value. // It acts as a shim over any other AA pass you want. // // Yes keeping track of every value in the program is expensive, but this is // a debugging pass. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include <set> using namespace llvm; namespace { class AliasDebugger : public ModulePass, public AliasAnalysis { //What we do is simple. Keep track of every value the AA could //know about, and verify that queries are one of those. //A query to a value that didn't exist when the AA was created //means someone forgot to update the AA when creating new values std::set<const Value*> Vals; public: static char ID; // Class identification, replacement for typeinfo AliasDebugger() : ModulePass(ID) { initializeAliasDebuggerPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override { InitializeAliasAnalysis(this, &M.getDataLayout()); // set up super class for(Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { Vals.insert(&*I); for (User::const_op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) Vals.insert(*OI); } for(Module::iterator I = M.begin(), E = M.end(); I != E; ++I){ Vals.insert(&*I); if(!I->isDeclaration()) { for (Function::arg_iterator AI = I->arg_begin(), AE = I->arg_end(); AI != AE; ++AI) Vals.insert(&*AI); for (Function::const_iterator FI = I->begin(), FE = I->end(); FI != FE; ++FI) for (BasicBlock::const_iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) { Vals.insert(&*BI); for (User::const_op_iterator OI = BI->op_begin(), OE = BI->op_end(); OI != OE; ++OI) Vals.insert(*OI); } } } return false; } void getAnalysisUsage(AnalysisUsage &AU) const override { AliasAnalysis::getAnalysisUsage(AU); AU.setPreservesAll(); // Does not transform code } /// getAdjustedAnalysisPointer - This method is used when a pass implements /// an analysis interface through multiple inheritance. If needed, it /// should override this to adjust the this pointer as needed for the /// specified pass info. void *getAdjustedAnalysisPointer(AnalysisID PI) override { if (PI == &AliasAnalysis::ID) return (AliasAnalysis*)this; return this; } //------------------------------------------------ // Implement the AliasAnalysis API // AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) override { assert(Vals.find(LocA.Ptr) != Vals.end() && "Never seen value in AA before"); assert(Vals.find(LocB.Ptr) != Vals.end() && "Never seen value in AA before"); return AliasAnalysis::alias(LocA, LocB); } ModRefResult getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) override { assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before"); return AliasAnalysis::getModRefInfo(CS, Loc); } ModRefResult getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) override { return AliasAnalysis::getModRefInfo(CS1,CS2); } bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) override { assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before"); return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); } void deleteValue(Value *V) override { assert(Vals.find(V) != Vals.end() && "Never seen value in AA before"); AliasAnalysis::deleteValue(V); } }; } char AliasDebugger::ID = 0; INITIALIZE_AG_PASS(AliasDebugger, AliasAnalysis, "debug-aa", "AA use debugger", false, true, false) Pass *llvm::createAliasDebugger() { return new AliasDebugger(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/DxilSimplify.cpp
//===-- DxilSimplify.cpp - Fold dxil intrinsics into constants -----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // // Copyright (C) Microsoft Corporation. All rights reserved. // //===----------------------------------------------------------------------===// // // //===----------------------------------------------------------------------===// // simplify dxil op like mad 0, a, b->b. #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Module.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "llvm/Analysis/DxilConstantFolding.h" #include "llvm/Analysis/DxilSimplify.h" using namespace llvm; using namespace hlsl; namespace { DXIL::OpCode GetOpcode(Value *opArg) { if (ConstantInt *ci = dyn_cast<ConstantInt>(opArg)) { uint64_t opcode = ci->getLimitedValue(); if (opcode < static_cast<uint64_t>(OP::OpCode::NumOpCodes)) { return static_cast<OP::OpCode>(opcode); } } return DXIL::OpCode::NumOpCodes; } } // namespace namespace hlsl { bool CanSimplify(const llvm::Function *F) { // Only simplify dxil functions when we have a valid dxil module. if (!F->getParent()->HasDxilModule()) { assert(!OP::IsDxilOpFunc(F) && "dx.op function with no dxil module?"); return false; } if (CanConstantFoldCallTo(F)) return true; // Lookup opcode class in dxil module. Set default value to invalid class. OP::OpCodeClass opClass = OP::OpCodeClass::NumOpClasses; const bool found = F->getParent()->GetDxilModule().GetOP()->GetOpCodeClass(F, opClass); // Return true for those dxil operation classes we can simplify. if (found) { switch (opClass) { default: break; case OP::OpCodeClass::Tertiary: return true; } } return false; } /// \brief Given a function and set of arguments, see if we can fold the /// result as dxil operation. /// /// If this call could not be simplified returns null. Value *SimplifyDxilCall(llvm::Function *F, ArrayRef<Value *> Args, llvm::Instruction *I, bool MayInsert) { if (!F->getParent()->HasDxilModule()) { assert(!OP::IsDxilOpFunc(F) && "dx.op function with no dxil module?"); return nullptr; } DxilModule &DM = F->getParent()->GetDxilModule(); // Skip precise. if (DM.IsPrecise(I)) return nullptr; // Lookup opcode class in dxil module. Set default value to invalid class. OP::OpCodeClass opClass = OP::OpCodeClass::NumOpClasses; const bool found = DM.GetOP()->GetOpCodeClass(F, opClass); if (!found) return nullptr; DXIL::OpCode opcode = GetOpcode(Args[0]); if (opcode == DXIL::OpCode::NumOpCodes) return nullptr; if (CanConstantFoldCallTo(F)) { bool bAllConstant = true; SmallVector<Constant *, 4> ConstantArgs; ConstantArgs.reserve(Args.size()); for (Value *V : Args) { Constant *C = dyn_cast<Constant>(V); if (!C) { bAllConstant = false; break; } ConstantArgs.push_back(C); } if (bAllConstant) return hlsl::ConstantFoldScalarCall(F->getName(), F->getReturnType(), ConstantArgs); } switch (opcode) { default: return nullptr; case DXIL::OpCode::FMad: { Value *op0 = Args[DXIL::OperandIndex::kTrinarySrc0OpIdx]; Value *op2 = Args[DXIL::OperandIndex::kTrinarySrc2OpIdx]; Constant *zero = ConstantFP::get(op0->getType(), 0); if (op0 == zero) return op2; Value *op1 = Args[DXIL::OperandIndex::kTrinarySrc1OpIdx]; if (op1 == zero) return op2; if (MayInsert) { Constant *one = ConstantFP::get(op0->getType(), 1); if (op0 == one) { IRBuilder<> Builder(I); llvm::FastMathFlags FMF; FMF.setUnsafeAlgebraHLSL(); Builder.SetFastMathFlags(FMF); return Builder.CreateFAdd(op1, op2); } if (op1 == one) { IRBuilder<> Builder(I); llvm::FastMathFlags FMF; FMF.setUnsafeAlgebraHLSL(); Builder.SetFastMathFlags(FMF); return Builder.CreateFAdd(op0, op2); } } return nullptr; } break; case DXIL::OpCode::IMad: case DXIL::OpCode::UMad: { Value *op0 = Args[DXIL::OperandIndex::kTrinarySrc0OpIdx]; Value *op2 = Args[DXIL::OperandIndex::kTrinarySrc2OpIdx]; Constant *zero = ConstantInt::get(op0->getType(), 0); if (op0 == zero) return op2; Value *op1 = Args[DXIL::OperandIndex::kTrinarySrc1OpIdx]; if (op1 == zero) return op2; if (MayInsert) { Constant *one = ConstantInt::get(op0->getType(), 1); if (op0 == one) { IRBuilder<> Builder(I); return Builder.CreateAdd(op1, op2); } if (op1 == one) { IRBuilder<> Builder(I); return Builder.CreateAdd(op0, op2); } } return nullptr; } break; case DXIL::OpCode::UMax: { Value *op0 = Args[DXIL::OperandIndex::kBinarySrc0OpIdx]; Value *op1 = Args[DXIL::OperandIndex::kBinarySrc1OpIdx]; Constant *zero = ConstantInt::get(op0->getType(), 0); if (op0 == zero) return op1; if (op1 == zero) return op0; return nullptr; } break; } } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/TargetLibraryInfo.cpp
//===-- TargetLibraryInfo.cpp - Runtime library information ----------------==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the TargetLibraryInfo class. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/ADT/Triple.h" #include "llvm/Support/CommandLine.h" using namespace llvm; #if 0 // HLSL Change Starts - option pending static cl::opt<TargetLibraryInfoImpl::VectorLibrary> ClVectorLibrary( "vector-library", cl::Hidden, cl::desc("Vector functions library"), cl::init(TargetLibraryInfoImpl::NoLibrary), cl::values(clEnumValN(TargetLibraryInfoImpl::NoLibrary, "none", "No vector functions library"), clEnumValN(TargetLibraryInfoImpl::Accelerate, "Accelerate", "Accelerate framework"), clEnumValEnd)); #else static const TargetLibraryInfoImpl::VectorLibrary ClVectorLibrary = TargetLibraryInfoImpl::NoLibrary; #endif // HLSL Change Ends const char *const TargetLibraryInfoImpl::StandardNames[LibFunc::NumLibFuncs] = { #define TLI_DEFINE_STRING #include "llvm/Analysis/TargetLibraryInfo.def" }; static bool hasSinCosPiStret(const Triple &T) { // Only Darwin variants have _stret versions of combined trig functions. if (!T.isOSDarwin()) return false; // The ABI is rather complicated on x86, so don't do anything special there. if (T.getArch() == Triple::x86) return false; if (T.isMacOSX() && T.isMacOSXVersionLT(10, 9)) return false; if (T.isiOS() && T.isOSVersionLT(7, 0)) return false; return true; } /// initialize - Initialize the set of available library functions based on the /// specified target triple. This should be carefully written so that a missing /// target triple gets a sane set of defaults. static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, const char *const *StandardNames) { #ifndef NDEBUG // Verify that the StandardNames array is in alphabetical order. for (unsigned F = 1; F < LibFunc::NumLibFuncs; ++F) { if (strcmp(StandardNames[F-1], StandardNames[F]) >= 0) llvm_unreachable("TargetLibraryInfoImpl function names must be sorted"); } #endif // !NDEBUG // There are no library implementations of mempcy and memset for AMD gpus and // these can be difficult to lower in the backend. if (T.getArch() == Triple::r600 || T.getArch() == Triple::dxil || // HLSL Change T.getArch() == Triple::dxil64 || // HLSL Change T.getArch() == Triple::amdgcn) { TLI.setUnavailable(LibFunc::memcpy); TLI.setUnavailable(LibFunc::memset); TLI.setUnavailable(LibFunc::memset_pattern16); return; } // memset_pattern16 is only available on iOS 3.0 and Mac OS X 10.5 and later. if (T.isMacOSX()) { if (T.isMacOSXVersionLT(10, 5)) TLI.setUnavailable(LibFunc::memset_pattern16); } else if (T.isiOS()) { if (T.isOSVersionLT(3, 0)) TLI.setUnavailable(LibFunc::memset_pattern16); } else { TLI.setUnavailable(LibFunc::memset_pattern16); } if (!hasSinCosPiStret(T)) { TLI.setUnavailable(LibFunc::sinpi); TLI.setUnavailable(LibFunc::sinpif); TLI.setUnavailable(LibFunc::cospi); TLI.setUnavailable(LibFunc::cospif); TLI.setUnavailable(LibFunc::sincospi_stret); TLI.setUnavailable(LibFunc::sincospif_stret); } if (T.isMacOSX() && T.getArch() == Triple::x86 && !T.isMacOSXVersionLT(10, 7)) { // x86-32 OSX has a scheme where fwrite and fputs (and some other functions // we don't care about) have two versions; on recent OSX, the one we want // has a $UNIX2003 suffix. The two implementations are identical except // for the return value in some edge cases. However, we don't want to // generate code that depends on the old symbols. TLI.setAvailableWithName(LibFunc::fwrite, "fwrite$UNIX2003"); TLI.setAvailableWithName(LibFunc::fputs, "fputs$UNIX2003"); } // iprintf and friends are only available on XCore and TCE. if (T.getArch() != Triple::xcore && T.getArch() != Triple::tce) { TLI.setUnavailable(LibFunc::iprintf); TLI.setUnavailable(LibFunc::siprintf); TLI.setUnavailable(LibFunc::fiprintf); } if (T.isOSWindows() && !T.isOSCygMing()) { // Win32 does not support long double TLI.setUnavailable(LibFunc::acosl); TLI.setUnavailable(LibFunc::asinl); TLI.setUnavailable(LibFunc::atanl); TLI.setUnavailable(LibFunc::atan2l); TLI.setUnavailable(LibFunc::ceill); TLI.setUnavailable(LibFunc::copysignl); TLI.setUnavailable(LibFunc::cosl); TLI.setUnavailable(LibFunc::coshl); TLI.setUnavailable(LibFunc::expl); TLI.setUnavailable(LibFunc::fabsf); // Win32 and Win64 both lack fabsf TLI.setUnavailable(LibFunc::fabsl); TLI.setUnavailable(LibFunc::floorl); TLI.setUnavailable(LibFunc::fmaxl); TLI.setUnavailable(LibFunc::fminl); TLI.setUnavailable(LibFunc::fmodl); TLI.setUnavailable(LibFunc::frexpl); TLI.setUnavailable(LibFunc::ldexpf); TLI.setUnavailable(LibFunc::ldexpl); TLI.setUnavailable(LibFunc::logl); TLI.setUnavailable(LibFunc::modfl); TLI.setUnavailable(LibFunc::powl); TLI.setUnavailable(LibFunc::sinl); TLI.setUnavailable(LibFunc::sinhl); TLI.setUnavailable(LibFunc::sqrtl); TLI.setUnavailable(LibFunc::tanl); TLI.setUnavailable(LibFunc::tanhl); // Win32 only has C89 math TLI.setUnavailable(LibFunc::acosh); TLI.setUnavailable(LibFunc::acoshf); TLI.setUnavailable(LibFunc::acoshl); TLI.setUnavailable(LibFunc::asinh); TLI.setUnavailable(LibFunc::asinhf); TLI.setUnavailable(LibFunc::asinhl); TLI.setUnavailable(LibFunc::atanh); TLI.setUnavailable(LibFunc::atanhf); TLI.setUnavailable(LibFunc::atanhl); TLI.setUnavailable(LibFunc::cbrt); TLI.setUnavailable(LibFunc::cbrtf); TLI.setUnavailable(LibFunc::cbrtl); TLI.setUnavailable(LibFunc::exp2); TLI.setUnavailable(LibFunc::exp2f); TLI.setUnavailable(LibFunc::exp2l); TLI.setUnavailable(LibFunc::expm1); TLI.setUnavailable(LibFunc::expm1f); TLI.setUnavailable(LibFunc::expm1l); TLI.setUnavailable(LibFunc::log2); TLI.setUnavailable(LibFunc::log2f); TLI.setUnavailable(LibFunc::log2l); TLI.setUnavailable(LibFunc::log1p); TLI.setUnavailable(LibFunc::log1pf); TLI.setUnavailable(LibFunc::log1pl); TLI.setUnavailable(LibFunc::logb); TLI.setUnavailable(LibFunc::logbf); TLI.setUnavailable(LibFunc::logbl); TLI.setUnavailable(LibFunc::nearbyint); TLI.setUnavailable(LibFunc::nearbyintf); TLI.setUnavailable(LibFunc::nearbyintl); TLI.setUnavailable(LibFunc::rint); TLI.setUnavailable(LibFunc::rintf); TLI.setUnavailable(LibFunc::rintl); TLI.setUnavailable(LibFunc::round); TLI.setUnavailable(LibFunc::roundf); TLI.setUnavailable(LibFunc::roundl); TLI.setUnavailable(LibFunc::trunc); TLI.setUnavailable(LibFunc::truncf); TLI.setUnavailable(LibFunc::truncl); // Win32 provides some C99 math with mangled names TLI.setAvailableWithName(LibFunc::copysign, "_copysign"); if (T.getArch() == Triple::x86) { // Win32 on x86 implements single-precision math functions as macros TLI.setUnavailable(LibFunc::acosf); TLI.setUnavailable(LibFunc::asinf); TLI.setUnavailable(LibFunc::atanf); TLI.setUnavailable(LibFunc::atan2f); TLI.setUnavailable(LibFunc::ceilf); TLI.setUnavailable(LibFunc::copysignf); TLI.setUnavailable(LibFunc::cosf); TLI.setUnavailable(LibFunc::coshf); TLI.setUnavailable(LibFunc::expf); TLI.setUnavailable(LibFunc::floorf); TLI.setUnavailable(LibFunc::fminf); TLI.setUnavailable(LibFunc::fmaxf); TLI.setUnavailable(LibFunc::fmodf); TLI.setUnavailable(LibFunc::logf); TLI.setUnavailable(LibFunc::powf); TLI.setUnavailable(LibFunc::sinf); TLI.setUnavailable(LibFunc::sinhf); TLI.setUnavailable(LibFunc::sqrtf); TLI.setUnavailable(LibFunc::tanf); TLI.setUnavailable(LibFunc::tanhf); } // Win32 does *not* provide provide these functions, but they are // generally available on POSIX-compliant systems: TLI.setUnavailable(LibFunc::access); TLI.setUnavailable(LibFunc::bcmp); TLI.setUnavailable(LibFunc::bcopy); TLI.setUnavailable(LibFunc::bzero); TLI.setUnavailable(LibFunc::chmod); TLI.setUnavailable(LibFunc::chown); TLI.setUnavailable(LibFunc::closedir); TLI.setUnavailable(LibFunc::ctermid); TLI.setUnavailable(LibFunc::fdopen); TLI.setUnavailable(LibFunc::ffs); TLI.setUnavailable(LibFunc::fileno); TLI.setUnavailable(LibFunc::flockfile); TLI.setUnavailable(LibFunc::fseeko); TLI.setUnavailable(LibFunc::fstat); TLI.setUnavailable(LibFunc::fstatvfs); TLI.setUnavailable(LibFunc::ftello); TLI.setUnavailable(LibFunc::ftrylockfile); TLI.setUnavailable(LibFunc::funlockfile); TLI.setUnavailable(LibFunc::getc_unlocked); TLI.setUnavailable(LibFunc::getitimer); TLI.setUnavailable(LibFunc::getlogin_r); TLI.setUnavailable(LibFunc::getpwnam); TLI.setUnavailable(LibFunc::gettimeofday); TLI.setUnavailable(LibFunc::htonl); TLI.setUnavailable(LibFunc::htons); TLI.setUnavailable(LibFunc::lchown); TLI.setUnavailable(LibFunc::lstat); TLI.setUnavailable(LibFunc::memccpy); TLI.setUnavailable(LibFunc::mkdir); TLI.setUnavailable(LibFunc::ntohl); TLI.setUnavailable(LibFunc::ntohs); TLI.setUnavailable(LibFunc::open); TLI.setUnavailable(LibFunc::opendir); TLI.setUnavailable(LibFunc::pclose); TLI.setUnavailable(LibFunc::popen); TLI.setUnavailable(LibFunc::pread); TLI.setUnavailable(LibFunc::pwrite); TLI.setUnavailable(LibFunc::read); TLI.setUnavailable(LibFunc::readlink); TLI.setUnavailable(LibFunc::realpath); TLI.setUnavailable(LibFunc::rmdir); TLI.setUnavailable(LibFunc::setitimer); TLI.setUnavailable(LibFunc::stat); TLI.setUnavailable(LibFunc::statvfs); TLI.setUnavailable(LibFunc::stpcpy); TLI.setUnavailable(LibFunc::stpncpy); TLI.setUnavailable(LibFunc::strcasecmp); TLI.setUnavailable(LibFunc::strncasecmp); TLI.setUnavailable(LibFunc::times); TLI.setUnavailable(LibFunc::uname); TLI.setUnavailable(LibFunc::unlink); TLI.setUnavailable(LibFunc::unsetenv); TLI.setUnavailable(LibFunc::utime); TLI.setUnavailable(LibFunc::utimes); TLI.setUnavailable(LibFunc::write); // Win32 does *not* provide provide these functions, but they are // specified by C99: TLI.setUnavailable(LibFunc::atoll); TLI.setUnavailable(LibFunc::frexpf); TLI.setUnavailable(LibFunc::llabs); } switch (T.getOS()) { case Triple::MacOSX: // exp10 and exp10f are not available on OS X until 10.9 and iOS until 7.0 // and their names are __exp10 and __exp10f. exp10l is not available on // OS X or iOS. TLI.setUnavailable(LibFunc::exp10l); if (T.isMacOSXVersionLT(10, 9)) { TLI.setUnavailable(LibFunc::exp10); TLI.setUnavailable(LibFunc::exp10f); } else { TLI.setAvailableWithName(LibFunc::exp10, "__exp10"); TLI.setAvailableWithName(LibFunc::exp10f, "__exp10f"); } break; case Triple::IOS: TLI.setUnavailable(LibFunc::exp10l); if (T.isOSVersionLT(7, 0)) { TLI.setUnavailable(LibFunc::exp10); TLI.setUnavailable(LibFunc::exp10f); } else { TLI.setAvailableWithName(LibFunc::exp10, "__exp10"); TLI.setAvailableWithName(LibFunc::exp10f, "__exp10f"); } break; case Triple::Linux: // exp10, exp10f, exp10l is available on Linux (GLIBC) but are extremely // buggy prior to glibc version 2.18. Until this version is widely deployed // or we have a reasonable detection strategy, we cannot use exp10 reliably // on Linux. // // Fall through to disable all of them. default: TLI.setUnavailable(LibFunc::exp10); TLI.setUnavailable(LibFunc::exp10f); TLI.setUnavailable(LibFunc::exp10l); } // ffsl is available on at least Darwin, Mac OS X, iOS, FreeBSD, and // Linux (GLIBC): // http://developer.apple.com/library/mac/#documentation/Darwin/Reference/ManPages/man3/ffsl.3.html // http://svn.freebsd.org/base/user/eri/pf45/head/lib/libc/string/ffsl.c // http://www.gnu.org/software/gnulib/manual/html_node/ffsl.html switch (T.getOS()) { case Triple::Darwin: case Triple::MacOSX: case Triple::IOS: case Triple::FreeBSD: case Triple::Linux: break; default: TLI.setUnavailable(LibFunc::ffsl); } // ffsll is available on at least FreeBSD and Linux (GLIBC): // http://svn.freebsd.org/base/user/eri/pf45/head/lib/libc/string/ffsll.c // http://www.gnu.org/software/gnulib/manual/html_node/ffsll.html switch (T.getOS()) { case Triple::FreeBSD: case Triple::Linux: break; default: TLI.setUnavailable(LibFunc::ffsll); } // The following functions are available on at least Linux: if (!T.isOSLinux()) { TLI.setUnavailable(LibFunc::dunder_strdup); TLI.setUnavailable(LibFunc::dunder_strtok_r); TLI.setUnavailable(LibFunc::dunder_isoc99_scanf); TLI.setUnavailable(LibFunc::dunder_isoc99_sscanf); TLI.setUnavailable(LibFunc::under_IO_getc); TLI.setUnavailable(LibFunc::under_IO_putc); TLI.setUnavailable(LibFunc::memalign); //TLI.setUnavailable(LibFunc::fopen64); // HLSL Change - duplicate 64bit versions //TLI.setUnavailable(LibFunc::fseeko64); // HLSL Change - duplicate 64bit versions TLI.setUnavailable(LibFunc::fstat64); TLI.setUnavailable(LibFunc::fstatvfs64); //TLI.setUnavailable(LibFunc::ftello64); // HLSL Change - duplicate 64bit versions TLI.setUnavailable(LibFunc::lstat64); TLI.setUnavailable(LibFunc::open64); TLI.setUnavailable(LibFunc::stat64); TLI.setUnavailable(LibFunc::statvfs64); //TLI.setUnavailable(LibFunc::tmpfile64); // HLSL Change - duplicate 64bit versions } TLI.addVectorizableFunctionsFromVecLib(ClVectorLibrary); } TargetLibraryInfoImpl::TargetLibraryInfoImpl() { // Default to everything being available. memset(AvailableArray, -1, sizeof(AvailableArray)); initialize(*this, Triple(), StandardNames); } TargetLibraryInfoImpl::TargetLibraryInfoImpl(const Triple &T) { // Default to everything being available. memset(AvailableArray, -1, sizeof(AvailableArray)); initialize(*this, T, StandardNames); } TargetLibraryInfoImpl::TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI) : CustomNames(TLI.CustomNames) { memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray)); VectorDescs = TLI.VectorDescs; ScalarDescs = TLI.ScalarDescs; } TargetLibraryInfoImpl::TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI) : CustomNames(std::move(TLI.CustomNames)) { std::move(std::begin(TLI.AvailableArray), std::end(TLI.AvailableArray), AvailableArray); VectorDescs = TLI.VectorDescs; ScalarDescs = TLI.ScalarDescs; } TargetLibraryInfoImpl &TargetLibraryInfoImpl::operator=(const TargetLibraryInfoImpl &TLI) { CustomNames = TLI.CustomNames; memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray)); return *this; } TargetLibraryInfoImpl &TargetLibraryInfoImpl::operator=(TargetLibraryInfoImpl &&TLI) { CustomNames = std::move(TLI.CustomNames); std::move(std::begin(TLI.AvailableArray), std::end(TLI.AvailableArray), AvailableArray); return *this; } static StringRef sanitizeFunctionName(StringRef funcName) { // Filter out empty names and names containing null bytes, those can't be in // our table. if (funcName.empty() || funcName.find('\0') != StringRef::npos) return StringRef(); // Check for \01 prefix that is used to mangle __asm declarations and // strip it if present. return GlobalValue::getRealLinkageName(funcName); } bool TargetLibraryInfoImpl::getLibFunc(StringRef funcName, LibFunc::Func &F) const { const char *const *Start = &StandardNames[0]; const char *const *End = &StandardNames[LibFunc::NumLibFuncs]; funcName = sanitizeFunctionName(funcName); if (funcName.empty()) return false; const char *const *I = std::lower_bound( Start, End, funcName, [](const char *LHS, StringRef RHS) { return std::strncmp(LHS, RHS.data(), RHS.size()) < 0; }); if (I != End && *I == funcName) { F = (LibFunc::Func)(I - Start); return true; } return false; } void TargetLibraryInfoImpl::disableAllFunctions() { memset(AvailableArray, 0, sizeof(AvailableArray)); } static bool compareByScalarFnName(const VecDesc &LHS, const VecDesc &RHS) { return std::strncmp(LHS.ScalarFnName, RHS.ScalarFnName, std::strlen(RHS.ScalarFnName)) < 0; } static bool compareByVectorFnName(const VecDesc &LHS, const VecDesc &RHS) { return std::strncmp(LHS.VectorFnName, RHS.VectorFnName, std::strlen(RHS.VectorFnName)) < 0; } static bool compareWithScalarFnName(const VecDesc &LHS, StringRef S) { return std::strncmp(LHS.ScalarFnName, S.data(), S.size()) < 0; } static bool compareWithVectorFnName(const VecDesc &LHS, StringRef S) { return std::strncmp(LHS.VectorFnName, S.data(), S.size()) < 0; } void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) { VectorDescs.insert(VectorDescs.end(), Fns.begin(), Fns.end()); std::sort(VectorDescs.begin(), VectorDescs.end(), compareByScalarFnName); ScalarDescs.insert(ScalarDescs.end(), Fns.begin(), Fns.end()); std::sort(ScalarDescs.begin(), ScalarDescs.end(), compareByVectorFnName); } void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib( enum VectorLibrary VecLib) { switch (VecLib) { case Accelerate: { const VecDesc VecFuncs[] = { // Floating-Point Arithmetic and Auxiliary Functions {"ceilf", "vceilf", 4}, {"fabsf", "vfabsf", 4}, {"llvm.fabs.f32", "vfabsf", 4}, {"floorf", "vfloorf", 4}, {"sqrtf", "vsqrtf", 4}, {"llvm.sqrt.f32", "vsqrtf", 4}, // Exponential and Logarithmic Functions {"expf", "vexpf", 4}, {"llvm.exp.f32", "vexpf", 4}, {"expm1f", "vexpm1f", 4}, {"logf", "vlogf", 4}, {"llvm.log.f32", "vlogf", 4}, {"log1pf", "vlog1pf", 4}, {"log10f", "vlog10f", 4}, {"llvm.log10.f32", "vlog10f", 4}, {"logbf", "vlogbf", 4}, // Trigonometric Functions {"sinf", "vsinf", 4}, {"llvm.sin.f32", "vsinf", 4}, {"cosf", "vcosf", 4}, {"llvm.cos.f32", "vcosf", 4}, {"tanf", "vtanf", 4}, {"asinf", "vasinf", 4}, {"acosf", "vacosf", 4}, {"atanf", "vatanf", 4}, // Hyperbolic Functions {"sinhf", "vsinhf", 4}, {"coshf", "vcoshf", 4}, {"tanhf", "vtanhf", 4}, {"asinhf", "vasinhf", 4}, {"acoshf", "vacoshf", 4}, {"atanhf", "vatanhf", 4}, }; addVectorizableFunctions(VecFuncs); break; } case NoLibrary: break; } } bool TargetLibraryInfoImpl::isFunctionVectorizable(StringRef funcName) const { funcName = sanitizeFunctionName(funcName); if (funcName.empty()) return false; std::vector<VecDesc>::const_iterator I = std::lower_bound( VectorDescs.begin(), VectorDescs.end(), funcName, compareWithScalarFnName); return I != VectorDescs.end() && StringRef(I->ScalarFnName) == funcName; } StringRef TargetLibraryInfoImpl::getVectorizedFunction(StringRef F, unsigned VF) const { F = sanitizeFunctionName(F); if (F.empty()) return F; std::vector<VecDesc>::const_iterator I = std::lower_bound( VectorDescs.begin(), VectorDescs.end(), F, compareWithScalarFnName); while (I != VectorDescs.end() && StringRef(I->ScalarFnName) == F) { if (I->VectorizationFactor == VF) return I->VectorFnName; ++I; } return StringRef(); } StringRef TargetLibraryInfoImpl::getScalarizedFunction(StringRef F, unsigned &VF) const { F = sanitizeFunctionName(F); if (F.empty()) return F; std::vector<VecDesc>::const_iterator I = std::lower_bound( ScalarDescs.begin(), ScalarDescs.end(), F, compareWithVectorFnName); if (I == VectorDescs.end() || StringRef(I->VectorFnName) != F) return StringRef(); VF = I->VectorizationFactor; return I->ScalarFnName; } TargetLibraryInfo TargetLibraryAnalysis::run(Module &M) { if (PresetInfoImpl) return TargetLibraryInfo(*PresetInfoImpl); return TargetLibraryInfo(lookupInfoImpl(Triple(M.getTargetTriple()))); } TargetLibraryInfo TargetLibraryAnalysis::run(Function &F) { if (PresetInfoImpl) return TargetLibraryInfo(*PresetInfoImpl); return TargetLibraryInfo( lookupInfoImpl(Triple(F.getParent()->getTargetTriple()))); } TargetLibraryInfoImpl &TargetLibraryAnalysis::lookupInfoImpl(Triple T) { std::unique_ptr<TargetLibraryInfoImpl> &Impl = Impls[T.normalize()]; if (!Impl) Impl.reset(new TargetLibraryInfoImpl(T)); return *Impl; } TargetLibraryInfoWrapperPass::TargetLibraryInfoWrapperPass() : ImmutablePass(ID), TLIImpl(), TLI(TLIImpl) { initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry()); } TargetLibraryInfoWrapperPass::TargetLibraryInfoWrapperPass(const Triple &T) : ImmutablePass(ID), TLIImpl(T), TLI(TLIImpl) { initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry()); } TargetLibraryInfoWrapperPass::TargetLibraryInfoWrapperPass( const TargetLibraryInfoImpl &TLIImpl) : ImmutablePass(ID), TLIImpl(TLIImpl), TLI(this->TLIImpl) { initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry()); } char TargetLibraryAnalysis::PassID; // Register the basic pass. INITIALIZE_PASS(TargetLibraryInfoWrapperPass, "targetlibinfo", "Target Library Information", false, true) char TargetLibraryInfoWrapperPass::ID = 0; void TargetLibraryInfoWrapperPass::anchor() {}
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/DependenceAnalysis.cpp
//===-- DependenceAnalysis.cpp - DA Implementation --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // DependenceAnalysis is an LLVM pass that analyses dependences between memory // accesses. Currently, it is an (incomplete) implementation of the approach // described in // // Practical Dependence Testing // Goff, Kennedy, Tseng // PLDI 1991 // // There's a single entry point that analyzes the dependence between a pair // of memory references in a function, returning either NULL, for no dependence, // or a more-or-less detailed description of the dependence between them. // // Currently, the implementation cannot propagate constraints between // coupled RDIV subscripts and lacks a multi-subscript MIV test. // Both of these are conservative weaknesses; // that is, not a source of correctness problems. // // The implementation depends on the GEP instruction to differentiate // subscripts. Since Clang linearizes some array subscripts, the dependence // analysis is using SCEV->delinearize to recover the representation of multiple // subscripts, and thus avoid the more expensive and less precise MIV tests. The // delinearization is controlled by the flag -da-delinearize. // // We should pay some careful attention to the possibility of integer overflow // in the implementation of the various tests. This could happen with Add, // Subtract, or Multiply, with both APInt's and SCEV's. // // Some non-linear subscript pairs can be handled by the GCD test // (and perhaps other tests). // Should explore how often these things occur. // // Finally, it seems like certain test cases expose weaknesses in the SCEV // simplification, especially in the handling of sign and zero extensions. // It could be useful to spend time exploring these. // // Please note that this is work in progress and the interface is subject to // change. // //===----------------------------------------------------------------------===// // // // In memory of Ken Kennedy, 1945 - 2007 // // // //===----------------------------------------------------------------------===// #include "llvm/Analysis/DependenceAnalysis.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "da" //===----------------------------------------------------------------------===// // statistics STATISTIC(TotalArrayPairs, "Array pairs tested"); STATISTIC(SeparableSubscriptPairs, "Separable subscript pairs"); STATISTIC(CoupledSubscriptPairs, "Coupled subscript pairs"); STATISTIC(NonlinearSubscriptPairs, "Nonlinear subscript pairs"); STATISTIC(ZIVapplications, "ZIV applications"); STATISTIC(ZIVindependence, "ZIV independence"); STATISTIC(StrongSIVapplications, "Strong SIV applications"); STATISTIC(StrongSIVsuccesses, "Strong SIV successes"); STATISTIC(StrongSIVindependence, "Strong SIV independence"); STATISTIC(WeakCrossingSIVapplications, "Weak-Crossing SIV applications"); STATISTIC(WeakCrossingSIVsuccesses, "Weak-Crossing SIV successes"); STATISTIC(WeakCrossingSIVindependence, "Weak-Crossing SIV independence"); STATISTIC(ExactSIVapplications, "Exact SIV applications"); STATISTIC(ExactSIVsuccesses, "Exact SIV successes"); STATISTIC(ExactSIVindependence, "Exact SIV independence"); STATISTIC(WeakZeroSIVapplications, "Weak-Zero SIV applications"); STATISTIC(WeakZeroSIVsuccesses, "Weak-Zero SIV successes"); STATISTIC(WeakZeroSIVindependence, "Weak-Zero SIV independence"); STATISTIC(ExactRDIVapplications, "Exact RDIV applications"); STATISTIC(ExactRDIVindependence, "Exact RDIV independence"); STATISTIC(SymbolicRDIVapplications, "Symbolic RDIV applications"); STATISTIC(SymbolicRDIVindependence, "Symbolic RDIV independence"); STATISTIC(DeltaApplications, "Delta applications"); STATISTIC(DeltaSuccesses, "Delta successes"); STATISTIC(DeltaIndependence, "Delta independence"); STATISTIC(DeltaPropagations, "Delta propagations"); STATISTIC(GCDapplications, "GCD applications"); STATISTIC(GCDsuccesses, "GCD successes"); STATISTIC(GCDindependence, "GCD independence"); STATISTIC(BanerjeeApplications, "Banerjee applications"); STATISTIC(BanerjeeIndependence, "Banerjee independence"); STATISTIC(BanerjeeSuccesses, "Banerjee successes"); #if 0 // HLSL Change Starts - option pending static cl::opt<bool> Delinearize("da-delinearize", cl::init(false), cl::Hidden, cl::ZeroOrMore, cl::desc("Try to delinearize array references.")); #else static const bool Delinearize = false; #endif // HLSL Change Ends //===----------------------------------------------------------------------===// // basics INITIALIZE_PASS_BEGIN(DependenceAnalysis, "da", "Dependence Analysis", true, true) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(DependenceAnalysis, "da", "Dependence Analysis", true, true) char DependenceAnalysis::ID = 0; FunctionPass *llvm::createDependenceAnalysisPass() { return new DependenceAnalysis(); } bool DependenceAnalysis::runOnFunction(Function &F) { this->F = &F; AA = &getAnalysis<AliasAnalysis>(); SE = &getAnalysis<ScalarEvolution>(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); return false; } void DependenceAnalysis::releaseMemory() { } void DependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequiredTransitive<AliasAnalysis>(); AU.addRequiredTransitive<ScalarEvolution>(); AU.addRequiredTransitive<LoopInfoWrapperPass>(); } // Used to test the dependence analyzer. // Looks through the function, noting loads and stores. // Calls depends() on every possible pair and prints out the result. // Ignores all other instructions. static void dumpExampleDependence(raw_ostream &OS, Function *F, DependenceAnalysis *DA) { for (inst_iterator SrcI = inst_begin(F), SrcE = inst_end(F); SrcI != SrcE; ++SrcI) { if (isa<StoreInst>(*SrcI) || isa<LoadInst>(*SrcI)) { for (inst_iterator DstI = SrcI, DstE = inst_end(F); DstI != DstE; ++DstI) { if (isa<StoreInst>(*DstI) || isa<LoadInst>(*DstI)) { OS << "da analyze - "; if (auto D = DA->depends(&*SrcI, &*DstI, true)) { D->dump(OS); for (unsigned Level = 1; Level <= D->getLevels(); Level++) { if (D->isSplitable(Level)) { OS << "da analyze - split level = " << Level; OS << ", iteration = " << *DA->getSplitIteration(*D, Level); OS << "!\n"; } } } else OS << "none!\n"; } } } } } void DependenceAnalysis::print(raw_ostream &OS, const Module*) const { dumpExampleDependence(OS, F, const_cast<DependenceAnalysis *>(this)); } //===----------------------------------------------------------------------===// // Dependence methods // Returns true if this is an input dependence. bool Dependence::isInput() const { return Src->mayReadFromMemory() && Dst->mayReadFromMemory(); } // Returns true if this is an output dependence. bool Dependence::isOutput() const { return Src->mayWriteToMemory() && Dst->mayWriteToMemory(); } // Returns true if this is an flow (aka true) dependence. bool Dependence::isFlow() const { return Src->mayWriteToMemory() && Dst->mayReadFromMemory(); } // Returns true if this is an anti dependence. bool Dependence::isAnti() const { return Src->mayReadFromMemory() && Dst->mayWriteToMemory(); } // Returns true if a particular level is scalar; that is, // if no subscript in the source or destination mention the induction // variable associated with the loop at this level. // Leave this out of line, so it will serve as a virtual method anchor bool Dependence::isScalar(unsigned level) const { return false; } //===----------------------------------------------------------------------===// // FullDependence methods FullDependence::FullDependence(Instruction *Source, Instruction *Destination, bool PossiblyLoopIndependent, unsigned CommonLevels) : Dependence(Source, Destination), Levels(CommonLevels), LoopIndependent(PossiblyLoopIndependent) { Consistent = true; DV = CommonLevels ? new DVEntry[CommonLevels] : nullptr; } // The rest are simple getters that hide the implementation. // getDirection - Returns the direction associated with a particular level. unsigned FullDependence::getDirection(unsigned Level) const { assert(0 < Level && Level <= Levels && "Level out of range"); assert(0 < Level && Level <= Levels); // HLSL Change - TVS return DV[Level - 1].Direction; } // Returns the distance (or NULL) associated with a particular level. const SCEV *FullDependence::getDistance(unsigned Level) const { assert(0 < Level && Level <= Levels && "Level out of range"); assert(0 < Level && Level <= Levels); // HLSL Change - TVS return DV[Level - 1].Distance; } // Returns true if a particular level is scalar; that is, // if no subscript in the source or destination mention the induction // variable associated with the loop at this level. bool FullDependence::isScalar(unsigned Level) const { assert(0 < Level && Level <= Levels && "Level out of range"); assert(0 < Level && Level <= Levels); // HLSL Change - TVS return DV[Level - 1].Scalar; } // Returns true if peeling the first iteration from this loop // will break this dependence. bool FullDependence::isPeelFirst(unsigned Level) const { assert(0 < Level && Level <= Levels && "Level out of range"); assert(0 < Level && Level <= Levels); // HLSL Change - TVS return DV[Level - 1].PeelFirst; } // Returns true if peeling the last iteration from this loop // will break this dependence. bool FullDependence::isPeelLast(unsigned Level) const { assert(0 < Level && Level <= Levels && "Level out of range"); assert(0 < Level && Level <= Levels); // HLSL Change - TVS return DV[Level - 1].PeelLast; } // Returns true if splitting this loop will break the dependence. bool FullDependence::isSplitable(unsigned Level) const { assert(0 < Level && Level <= Levels && "Level out of range"); assert(0 < Level && Level <= Levels); // HLSL Change - TVS return DV[Level - 1].Splitable; } //===----------------------------------------------------------------------===// // DependenceAnalysis::Constraint methods // If constraint is a point <X, Y>, returns X. // Otherwise assert. const SCEV *DependenceAnalysis::Constraint::getX() const { assert(Kind == Point && "Kind should be Point"); return A; } // If constraint is a point <X, Y>, returns Y. // Otherwise assert. const SCEV *DependenceAnalysis::Constraint::getY() const { assert(Kind == Point && "Kind should be Point"); return B; } // If constraint is a line AX + BY = C, returns A. // Otherwise assert. const SCEV *DependenceAnalysis::Constraint::getA() const { assert((Kind == Line || Kind == Distance) && "Kind should be Line (or Distance)"); return A; } // If constraint is a line AX + BY = C, returns B. // Otherwise assert. const SCEV *DependenceAnalysis::Constraint::getB() const { assert((Kind == Line || Kind == Distance) && "Kind should be Line (or Distance)"); return B; } // If constraint is a line AX + BY = C, returns C. // Otherwise assert. const SCEV *DependenceAnalysis::Constraint::getC() const { assert((Kind == Line || Kind == Distance) && "Kind should be Line (or Distance)"); return C; } // If constraint is a distance, returns D. // Otherwise assert. const SCEV *DependenceAnalysis::Constraint::getD() const { assert(Kind == Distance && "Kind should be Distance"); return SE->getNegativeSCEV(C); } // Returns the loop associated with this constraint. const Loop *DependenceAnalysis::Constraint::getAssociatedLoop() const { assert((Kind == Distance || Kind == Line || Kind == Point) && "Kind should be Distance, Line, or Point"); return AssociatedLoop; } void DependenceAnalysis::Constraint::setPoint(const SCEV *X, const SCEV *Y, const Loop *CurLoop) { Kind = Point; A = X; B = Y; AssociatedLoop = CurLoop; } void DependenceAnalysis::Constraint::setLine(const SCEV *AA, const SCEV *BB, const SCEV *CC, const Loop *CurLoop) { Kind = Line; A = AA; B = BB; C = CC; AssociatedLoop = CurLoop; } void DependenceAnalysis::Constraint::setDistance(const SCEV *D, const Loop *CurLoop) { Kind = Distance; A = SE->getConstant(D->getType(), 1); B = SE->getNegativeSCEV(A); C = SE->getNegativeSCEV(D); AssociatedLoop = CurLoop; } void DependenceAnalysis::Constraint::setEmpty() { Kind = Empty; } void DependenceAnalysis::Constraint::setAny(ScalarEvolution *NewSE) { SE = NewSE; Kind = Any; } // For debugging purposes. Dumps the constraint out to OS. void DependenceAnalysis::Constraint::dump(raw_ostream &OS) const { if (isEmpty()) OS << " Empty\n"; else if (isAny()) OS << " Any\n"; else if (isPoint()) OS << " Point is <" << *getX() << ", " << *getY() << ">\n"; else if (isDistance()) OS << " Distance is " << *getD() << " (" << *getA() << "*X + " << *getB() << "*Y = " << *getC() << ")\n"; else if (isLine()) OS << " Line is " << *getA() << "*X + " << *getB() << "*Y = " << *getC() << "\n"; else llvm_unreachable("unknown constraint type in Constraint::dump"); } // Updates X with the intersection // of the Constraints X and Y. Returns true if X has changed. // Corresponds to Figure 4 from the paper // // Practical Dependence Testing // Goff, Kennedy, Tseng // PLDI 1991 bool DependenceAnalysis::intersectConstraints(Constraint *X, const Constraint *Y) { ++DeltaApplications; DEBUG(dbgs() << "\tintersect constraints\n"); DEBUG(dbgs() << "\t X ="; X->dump(dbgs())); DEBUG(dbgs() << "\t Y ="; Y->dump(dbgs())); assert(!Y->isPoint() && "Y must not be a Point"); if (X->isAny()) { if (Y->isAny()) return false; *X = *Y; return true; } if (X->isEmpty()) return false; if (Y->isEmpty()) { X->setEmpty(); return true; } if (X->isDistance() && Y->isDistance()) { DEBUG(dbgs() << "\t intersect 2 distances\n"); if (isKnownPredicate(CmpInst::ICMP_EQ, X->getD(), Y->getD())) return false; if (isKnownPredicate(CmpInst::ICMP_NE, X->getD(), Y->getD())) { X->setEmpty(); ++DeltaSuccesses; return true; } // Hmmm, interesting situation. // I guess if either is constant, keep it and ignore the other. if (isa<SCEVConstant>(Y->getD())) { *X = *Y; return true; } return false; } // At this point, the pseudo-code in Figure 4 of the paper // checks if (X->isPoint() && Y->isPoint()). // This case can't occur in our implementation, // since a Point can only arise as the result of intersecting // two Line constraints, and the right-hand value, Y, is never // the result of an intersection. assert(!(X->isPoint() && Y->isPoint()) && "We shouldn't ever see X->isPoint() && Y->isPoint()"); if (X->isLine() && Y->isLine()) { DEBUG(dbgs() << "\t intersect 2 lines\n"); const SCEV *Prod1 = SE->getMulExpr(X->getA(), Y->getB()); const SCEV *Prod2 = SE->getMulExpr(X->getB(), Y->getA()); if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2)) { // slopes are equal, so lines are parallel DEBUG(dbgs() << "\t\tsame slope\n"); Prod1 = SE->getMulExpr(X->getC(), Y->getB()); Prod2 = SE->getMulExpr(X->getB(), Y->getC()); if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2)) return false; if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) { X->setEmpty(); ++DeltaSuccesses; return true; } return false; } if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) { // slopes differ, so lines intersect DEBUG(dbgs() << "\t\tdifferent slopes\n"); const SCEV *C1B2 = SE->getMulExpr(X->getC(), Y->getB()); const SCEV *C1A2 = SE->getMulExpr(X->getC(), Y->getA()); const SCEV *C2B1 = SE->getMulExpr(Y->getC(), X->getB()); const SCEV *C2A1 = SE->getMulExpr(Y->getC(), X->getA()); const SCEV *A1B2 = SE->getMulExpr(X->getA(), Y->getB()); const SCEV *A2B1 = SE->getMulExpr(Y->getA(), X->getB()); const SCEVConstant *C1A2_C2A1 = dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1A2, C2A1)); const SCEVConstant *C1B2_C2B1 = dyn_cast<SCEVConstant>(SE->getMinusSCEV(C1B2, C2B1)); const SCEVConstant *A1B2_A2B1 = dyn_cast<SCEVConstant>(SE->getMinusSCEV(A1B2, A2B1)); const SCEVConstant *A2B1_A1B2 = dyn_cast<SCEVConstant>(SE->getMinusSCEV(A2B1, A1B2)); if (!C1B2_C2B1 || !C1A2_C2A1 || !A1B2_A2B1 || !A2B1_A1B2) return false; APInt Xtop = C1B2_C2B1->getValue()->getValue(); APInt Xbot = A1B2_A2B1->getValue()->getValue(); APInt Ytop = C1A2_C2A1->getValue()->getValue(); APInt Ybot = A2B1_A1B2->getValue()->getValue(); DEBUG(dbgs() << "\t\tXtop = " << Xtop << "\n"); DEBUG(dbgs() << "\t\tXbot = " << Xbot << "\n"); DEBUG(dbgs() << "\t\tYtop = " << Ytop << "\n"); DEBUG(dbgs() << "\t\tYbot = " << Ybot << "\n"); APInt Xq = Xtop; // these need to be initialized, even APInt Xr = Xtop; // though they're just going to be overwritten APInt::sdivrem(Xtop, Xbot, Xq, Xr); APInt Yq = Ytop; APInt Yr = Ytop; APInt::sdivrem(Ytop, Ybot, Yq, Yr); if (Xr != 0 || Yr != 0) { X->setEmpty(); ++DeltaSuccesses; return true; } DEBUG(dbgs() << "\t\tX = " << Xq << ", Y = " << Yq << "\n"); if (Xq.slt(0) || Yq.slt(0)) { X->setEmpty(); ++DeltaSuccesses; return true; } if (const SCEVConstant *CUB = collectConstantUpperBound(X->getAssociatedLoop(), Prod1->getType())) { APInt UpperBound = CUB->getValue()->getValue(); DEBUG(dbgs() << "\t\tupper bound = " << UpperBound << "\n"); if (Xq.sgt(UpperBound) || Yq.sgt(UpperBound)) { X->setEmpty(); ++DeltaSuccesses; return true; } } X->setPoint(SE->getConstant(Xq), SE->getConstant(Yq), X->getAssociatedLoop()); ++DeltaSuccesses; return true; } return false; } // if (X->isLine() && Y->isPoint()) This case can't occur. assert(!(X->isLine() && Y->isPoint()) && "This case should never occur"); if (X->isPoint() && Y->isLine()) { DEBUG(dbgs() << "\t intersect Point and Line\n"); const SCEV *A1X1 = SE->getMulExpr(Y->getA(), X->getX()); const SCEV *B1Y1 = SE->getMulExpr(Y->getB(), X->getY()); const SCEV *Sum = SE->getAddExpr(A1X1, B1Y1); if (isKnownPredicate(CmpInst::ICMP_EQ, Sum, Y->getC())) return false; if (isKnownPredicate(CmpInst::ICMP_NE, Sum, Y->getC())) { X->setEmpty(); ++DeltaSuccesses; return true; } return false; } llvm_unreachable("shouldn't reach the end of Constraint intersection"); return false; } //===----------------------------------------------------------------------===// // DependenceAnalysis methods // For debugging purposes. Dumps a dependence to OS. void Dependence::dump(raw_ostream &OS) const { bool Splitable = false; if (isConfused()) OS << "confused"; else { if (isConsistent()) OS << "consistent "; if (isFlow()) OS << "flow"; else if (isOutput()) OS << "output"; else if (isAnti()) OS << "anti"; else if (isInput()) OS << "input"; unsigned Levels = getLevels(); OS << " ["; for (unsigned II = 1; II <= Levels; ++II) { if (isSplitable(II)) Splitable = true; if (isPeelFirst(II)) OS << 'p'; const SCEV *Distance = getDistance(II); if (Distance) OS << *Distance; else if (isScalar(II)) OS << "S"; else { unsigned Direction = getDirection(II); if (Direction == DVEntry::ALL) OS << "*"; else { if (Direction & DVEntry::LT) OS << "<"; if (Direction & DVEntry::EQ) OS << "="; if (Direction & DVEntry::GT) OS << ">"; } } if (isPeelLast(II)) OS << 'p'; if (II < Levels) OS << " "; } if (isLoopIndependent()) OS << "|<"; OS << "]"; if (Splitable) OS << " splitable"; } OS << "!\n"; } static AliasResult underlyingObjectsAlias(AliasAnalysis *AA, const DataLayout &DL, const Value *A, const Value *B) { const Value *AObj = GetUnderlyingObject(A, DL); const Value *BObj = GetUnderlyingObject(B, DL); return AA->alias(AObj, AA->getTypeStoreSize(AObj->getType()), BObj, AA->getTypeStoreSize(BObj->getType())); } // Returns true if the load or store can be analyzed. Atomic and volatile // operations have properties which this analysis does not understand. static bool isLoadOrStore(const Instruction *I) { if (const LoadInst *LI = dyn_cast<LoadInst>(I)) return LI->isUnordered(); else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->isUnordered(); return false; } static Value *getPointerOperand(Instruction *I) { if (LoadInst *LI = dyn_cast<LoadInst>(I)) return LI->getPointerOperand(); if (StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->getPointerOperand(); llvm_unreachable("Value is not load or store instruction"); return nullptr; } // Examines the loop nesting of the Src and Dst // instructions and establishes their shared loops. Sets the variables // CommonLevels, SrcLevels, and MaxLevels. // The source and destination instructions needn't be contained in the same // loop. The routine establishNestingLevels finds the level of most deeply // nested loop that contains them both, CommonLevels. An instruction that's // not contained in a loop is at level = 0. MaxLevels is equal to the level // of the source plus the level of the destination, minus CommonLevels. // This lets us allocate vectors MaxLevels in length, with room for every // distinct loop referenced in both the source and destination subscripts. // The variable SrcLevels is the nesting depth of the source instruction. // It's used to help calculate distinct loops referenced by the destination. // Here's the map from loops to levels: // 0 - unused // 1 - outermost common loop // ... - other common loops // CommonLevels - innermost common loop // ... - loops containing Src but not Dst // SrcLevels - innermost loop containing Src but not Dst // ... - loops containing Dst but not Src // MaxLevels - innermost loops containing Dst but not Src // Consider the follow code fragment: // for (a = ...) { // for (b = ...) { // for (c = ...) { // for (d = ...) { // A[] = ...; // } // } // for (e = ...) { // for (f = ...) { // for (g = ...) { // ... = A[]; // } // } // } // } // } // If we're looking at the possibility of a dependence between the store // to A (the Src) and the load from A (the Dst), we'll note that they // have 2 loops in common, so CommonLevels will equal 2 and the direction // vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7. // A map from loop names to loop numbers would look like // a - 1 // b - 2 = CommonLevels // c - 3 // d - 4 = SrcLevels // e - 5 // f - 6 // g - 7 = MaxLevels void DependenceAnalysis::establishNestingLevels(const Instruction *Src, const Instruction *Dst) { const BasicBlock *SrcBlock = Src->getParent(); const BasicBlock *DstBlock = Dst->getParent(); unsigned SrcLevel = LI->getLoopDepth(SrcBlock); unsigned DstLevel = LI->getLoopDepth(DstBlock); const Loop *SrcLoop = LI->getLoopFor(SrcBlock); const Loop *DstLoop = LI->getLoopFor(DstBlock); SrcLevels = SrcLevel; MaxLevels = SrcLevel + DstLevel; while (SrcLevel > DstLevel) { SrcLoop = SrcLoop->getParentLoop(); SrcLevel--; } while (DstLevel > SrcLevel) { DstLoop = DstLoop->getParentLoop(); DstLevel--; } while (SrcLoop != DstLoop) { SrcLoop = SrcLoop->getParentLoop(); DstLoop = DstLoop->getParentLoop(); SrcLevel--; } CommonLevels = SrcLevel; MaxLevels -= CommonLevels; } // Given one of the loops containing the source, return // its level index in our numbering scheme. unsigned DependenceAnalysis::mapSrcLoop(const Loop *SrcLoop) const { return SrcLoop->getLoopDepth(); } // Given one of the loops containing the destination, // return its level index in our numbering scheme. unsigned DependenceAnalysis::mapDstLoop(const Loop *DstLoop) const { unsigned D = DstLoop->getLoopDepth(); if (D > CommonLevels) return D - CommonLevels + SrcLevels; else return D; } // Returns true if Expression is loop invariant in LoopNest. bool DependenceAnalysis::isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const { if (!LoopNest) return true; return SE->isLoopInvariant(Expression, LoopNest) && isLoopInvariant(Expression, LoopNest->getParentLoop()); } // Finds the set of loops from the LoopNest that // have a level <= CommonLevels and are referred to by the SCEV Expression. void DependenceAnalysis::collectCommonLoops(const SCEV *Expression, const Loop *LoopNest, SmallBitVector &Loops) const { while (LoopNest) { unsigned Level = LoopNest->getLoopDepth(); if (Level <= CommonLevels && !SE->isLoopInvariant(Expression, LoopNest)) Loops.set(Level); LoopNest = LoopNest->getParentLoop(); } } void DependenceAnalysis::unifySubscriptType(ArrayRef<Subscript *> Pairs) { unsigned widestWidthSeen = 0; Type *widestType; // Go through each pair and find the widest bit to which we need // to extend all of them. for (unsigned i = 0; i < Pairs.size(); i++) { const SCEV *Src = Pairs[i]->Src; const SCEV *Dst = Pairs[i]->Dst; IntegerType *SrcTy = dyn_cast<IntegerType>(Src->getType()); IntegerType *DstTy = dyn_cast<IntegerType>(Dst->getType()); if (SrcTy == nullptr || DstTy == nullptr) { assert(SrcTy == DstTy && "This function only unify integer types and " "expect Src and Dst share the same type " "otherwise."); continue; } if (SrcTy->getBitWidth() > widestWidthSeen) { widestWidthSeen = SrcTy->getBitWidth(); widestType = SrcTy; } if (DstTy->getBitWidth() > widestWidthSeen) { widestWidthSeen = DstTy->getBitWidth(); widestType = DstTy; } } assert(widestWidthSeen > 0); // Now extend each pair to the widest seen. for (unsigned i = 0; i < Pairs.size(); i++) { const SCEV *Src = Pairs[i]->Src; const SCEV *Dst = Pairs[i]->Dst; IntegerType *SrcTy = dyn_cast<IntegerType>(Src->getType()); IntegerType *DstTy = dyn_cast<IntegerType>(Dst->getType()); if (SrcTy == nullptr || DstTy == nullptr) { assert(SrcTy == DstTy && "This function only unify integer types and " "expect Src and Dst share the same type " "otherwise."); continue; } if (SrcTy->getBitWidth() < widestWidthSeen) // Sign-extend Src to widestType Pairs[i]->Src = SE->getSignExtendExpr(Src, widestType); if (DstTy->getBitWidth() < widestWidthSeen) { // Sign-extend Dst to widestType Pairs[i]->Dst = SE->getSignExtendExpr(Dst, widestType); } } } // removeMatchingExtensions - Examines a subscript pair. // If the source and destination are identically sign (or zero) // extended, it strips off the extension in an effect to simplify // the actual analysis. void DependenceAnalysis::removeMatchingExtensions(Subscript *Pair) { const SCEV *Src = Pair->Src; const SCEV *Dst = Pair->Dst; if ((isa<SCEVZeroExtendExpr>(Src) && isa<SCEVZeroExtendExpr>(Dst)) || (isa<SCEVSignExtendExpr>(Src) && isa<SCEVSignExtendExpr>(Dst))) { const SCEVCastExpr *SrcCast = cast<SCEVCastExpr>(Src); const SCEVCastExpr *DstCast = cast<SCEVCastExpr>(Dst); const SCEV *SrcCastOp = SrcCast->getOperand(); const SCEV *DstCastOp = DstCast->getOperand(); if (SrcCastOp->getType() == DstCastOp->getType()) { Pair->Src = SrcCastOp; Pair->Dst = DstCastOp; } } } // Examine the scev and return true iff it's linear. // Collect any loops mentioned in the set of "Loops". bool DependenceAnalysis::checkSrcSubscript(const SCEV *Src, const Loop *LoopNest, SmallBitVector &Loops) { const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Src); if (!AddRec) return isLoopInvariant(Src, LoopNest); const SCEV *Start = AddRec->getStart(); const SCEV *Step = AddRec->getStepRecurrence(*SE); const SCEV *UB = SE->getBackedgeTakenCount(AddRec->getLoop()); if (!isa<SCEVCouldNotCompute>(UB)) { if (SE->getTypeSizeInBits(Start->getType()) < SE->getTypeSizeInBits(UB->getType())) { if (!AddRec->getNoWrapFlags()) return false; } } if (!isLoopInvariant(Step, LoopNest)) return false; Loops.set(mapSrcLoop(AddRec->getLoop())); return checkSrcSubscript(Start, LoopNest, Loops); } // Examine the scev and return true iff it's linear. // Collect any loops mentioned in the set of "Loops". bool DependenceAnalysis::checkDstSubscript(const SCEV *Dst, const Loop *LoopNest, SmallBitVector &Loops) { const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Dst); if (!AddRec) return isLoopInvariant(Dst, LoopNest); const SCEV *Start = AddRec->getStart(); const SCEV *Step = AddRec->getStepRecurrence(*SE); const SCEV *UB = SE->getBackedgeTakenCount(AddRec->getLoop()); if (!isa<SCEVCouldNotCompute>(UB)) { if (SE->getTypeSizeInBits(Start->getType()) < SE->getTypeSizeInBits(UB->getType())) { if (!AddRec->getNoWrapFlags()) return false; } } if (!isLoopInvariant(Step, LoopNest)) return false; Loops.set(mapDstLoop(AddRec->getLoop())); return checkDstSubscript(Start, LoopNest, Loops); } // Examines the subscript pair (the Src and Dst SCEVs) // and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear. // Collects the associated loops in a set. DependenceAnalysis::Subscript::ClassificationKind DependenceAnalysis::classifyPair(const SCEV *Src, const Loop *SrcLoopNest, const SCEV *Dst, const Loop *DstLoopNest, SmallBitVector &Loops) { SmallBitVector SrcLoops(MaxLevels + 1); SmallBitVector DstLoops(MaxLevels + 1); if (!checkSrcSubscript(Src, SrcLoopNest, SrcLoops)) return Subscript::NonLinear; if (!checkDstSubscript(Dst, DstLoopNest, DstLoops)) return Subscript::NonLinear; Loops = SrcLoops; Loops |= DstLoops; unsigned N = Loops.count(); if (N == 0) return Subscript::ZIV; if (N == 1) return Subscript::SIV; if (N == 2 && (SrcLoops.count() == 0 || DstLoops.count() == 0 || (SrcLoops.count() == 1 && DstLoops.count() == 1))) return Subscript::RDIV; return Subscript::MIV; } // A wrapper around SCEV::isKnownPredicate. // Looks for cases where we're interested in comparing for equality. // If both X and Y have been identically sign or zero extended, // it strips off the (confusing) extensions before invoking // SCEV::isKnownPredicate. Perhaps, someday, the ScalarEvolution package // will be similarly updated. // // If SCEV::isKnownPredicate can't prove the predicate, // we try simple subtraction, which seems to help in some cases // involving symbolics. bool DependenceAnalysis::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *X, const SCEV *Y) const { if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { if ((isa<SCEVSignExtendExpr>(X) && isa<SCEVSignExtendExpr>(Y)) || (isa<SCEVZeroExtendExpr>(X) && isa<SCEVZeroExtendExpr>(Y))) { const SCEVCastExpr *CX = cast<SCEVCastExpr>(X); const SCEVCastExpr *CY = cast<SCEVCastExpr>(Y); const SCEV *Xop = CX->getOperand(); const SCEV *Yop = CY->getOperand(); if (Xop->getType() == Yop->getType()) { X = Xop; Y = Yop; } } } if (SE->isKnownPredicate(Pred, X, Y)) return true; // If SE->isKnownPredicate can't prove the condition, // we try the brute-force approach of subtracting // and testing the difference. // By testing with SE->isKnownPredicate first, we avoid // the possibility of overflow when the arguments are constants. const SCEV *Delta = SE->getMinusSCEV(X, Y); switch (Pred) { case CmpInst::ICMP_EQ: return Delta->isZero(); case CmpInst::ICMP_NE: return SE->isKnownNonZero(Delta); case CmpInst::ICMP_SGE: return SE->isKnownNonNegative(Delta); case CmpInst::ICMP_SLE: return SE->isKnownNonPositive(Delta); case CmpInst::ICMP_SGT: return SE->isKnownPositive(Delta); case CmpInst::ICMP_SLT: return SE->isKnownNegative(Delta); default: llvm_unreachable("unexpected predicate in isKnownPredicate"); } } // All subscripts are all the same type. // Loop bound may be smaller (e.g., a char). // Should zero extend loop bound, since it's always >= 0. // This routine collects upper bound and extends or truncates if needed. // Truncating is safe when subscripts are known not to wrap. Cases without // nowrap flags should have been rejected earlier. // Return null if no bound available. const SCEV *DependenceAnalysis::collectUpperBound(const Loop *L, Type *T) const { if (SE->hasLoopInvariantBackedgeTakenCount(L)) { const SCEV *UB = SE->getBackedgeTakenCount(L); return SE->getTruncateOrZeroExtend(UB, T); } return nullptr; } // Calls collectUpperBound(), then attempts to cast it to SCEVConstant. // If the cast fails, returns NULL. const SCEVConstant *DependenceAnalysis::collectConstantUpperBound(const Loop *L, Type *T ) const { if (const SCEV *UB = collectUpperBound(L, T)) return dyn_cast<SCEVConstant>(UB); return nullptr; } // testZIV - // When we have a pair of subscripts of the form [c1] and [c2], // where c1 and c2 are both loop invariant, we attack it using // the ZIV test. Basically, we test by comparing the two values, // but there are actually three possible results: // 1) the values are equal, so there's a dependence // 2) the values are different, so there's no dependence // 3) the values might be equal, so we have to assume a dependence. // // Return true if dependence disproved. bool DependenceAnalysis::testZIV(const SCEV *Src, const SCEV *Dst, FullDependence &Result) const { DEBUG(dbgs() << " src = " << *Src << "\n"); DEBUG(dbgs() << " dst = " << *Dst << "\n"); ++ZIVapplications; if (isKnownPredicate(CmpInst::ICMP_EQ, Src, Dst)) { DEBUG(dbgs() << " provably dependent\n"); return false; // provably dependent } if (isKnownPredicate(CmpInst::ICMP_NE, Src, Dst)) { DEBUG(dbgs() << " provably independent\n"); ++ZIVindependence; return true; // provably independent } DEBUG(dbgs() << " possibly dependent\n"); Result.Consistent = false; return false; // possibly dependent } // strongSIVtest - // From the paper, Practical Dependence Testing, Section 4.2.1 // // When we have a pair of subscripts of the form [c1 + a*i] and [c2 + a*i], // where i is an induction variable, c1 and c2 are loop invariant, // and a is a constant, we can solve it exactly using the Strong SIV test. // // Can prove independence. Failing that, can compute distance (and direction). // In the presence of symbolic terms, we can sometimes make progress. // // If there's a dependence, // // c1 + a*i = c2 + a*i' // // The dependence distance is // // d = i' - i = (c1 - c2)/a // // A dependence only exists if d is an integer and abs(d) <= U, where U is the // loop's upper bound. If a dependence exists, the dependence direction is // defined as // // { < if d > 0 // direction = { = if d = 0 // { > if d < 0 // // Return true if dependence disproved. bool DependenceAnalysis::strongSIVtest(const SCEV *Coeff, const SCEV *SrcConst, const SCEV *DstConst, const Loop *CurLoop, unsigned Level, FullDependence &Result, Constraint &NewConstraint) const { DEBUG(dbgs() << "\tStrong SIV test\n"); DEBUG(dbgs() << "\t Coeff = " << *Coeff); DEBUG(dbgs() << ", " << *Coeff->getType() << "\n"); DEBUG(dbgs() << "\t SrcConst = " << *SrcConst); DEBUG(dbgs() << ", " << *SrcConst->getType() << "\n"); DEBUG(dbgs() << "\t DstConst = " << *DstConst); DEBUG(dbgs() << ", " << *DstConst->getType() << "\n"); ++StrongSIVapplications; assert(0 < Level && Level <= CommonLevels && "level out of range"); assert(0 < Level && Level <= CommonLevels); // HLSL Change - TVS assert(Result.Levels == CommonLevels); // HLSL Change - TVS Level--; const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst); DEBUG(dbgs() << "\t Delta = " << *Delta); DEBUG(dbgs() << ", " << *Delta->getType() << "\n"); // check that |Delta| < iteration count if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { DEBUG(dbgs() << "\t UpperBound = " << *UpperBound); DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n"); const SCEV *AbsDelta = SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta); const SCEV *AbsCoeff = SE->isKnownNonNegative(Coeff) ? Coeff : SE->getNegativeSCEV(Coeff); const SCEV *Product = SE->getMulExpr(UpperBound, AbsCoeff); if (isKnownPredicate(CmpInst::ICMP_SGT, AbsDelta, Product)) { // Distance greater than trip count - no dependence ++StrongSIVindependence; ++StrongSIVsuccesses; return true; } } // Can we compute distance? if (isa<SCEVConstant>(Delta) && isa<SCEVConstant>(Coeff)) { APInt ConstDelta = cast<SCEVConstant>(Delta)->getValue()->getValue(); APInt ConstCoeff = cast<SCEVConstant>(Coeff)->getValue()->getValue(); APInt Distance = ConstDelta; // these need to be initialized APInt Remainder = ConstDelta; APInt::sdivrem(ConstDelta, ConstCoeff, Distance, Remainder); DEBUG(dbgs() << "\t Distance = " << Distance << "\n"); DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n"); // Make sure Coeff divides Delta exactly if (Remainder != 0) { // Coeff doesn't divide Distance, no dependence ++StrongSIVindependence; ++StrongSIVsuccesses; return true; } Result.DV[Level].Distance = SE->getConstant(Distance); NewConstraint.setDistance(SE->getConstant(Distance), CurLoop); if (Distance.sgt(0)) Result.DV[Level].Direction &= Dependence::DVEntry::LT; else if (Distance.slt(0)) Result.DV[Level].Direction &= Dependence::DVEntry::GT; else Result.DV[Level].Direction &= Dependence::DVEntry::EQ; ++StrongSIVsuccesses; } else if (Delta->isZero()) { // since 0/X == 0 Result.DV[Level].Distance = Delta; NewConstraint.setDistance(Delta, CurLoop); Result.DV[Level].Direction &= Dependence::DVEntry::EQ; ++StrongSIVsuccesses; } else { if (Coeff->isOne()) { DEBUG(dbgs() << "\t Distance = " << *Delta << "\n"); Result.DV[Level].Distance = Delta; // since X/1 == X NewConstraint.setDistance(Delta, CurLoop); } else { Result.Consistent = false; NewConstraint.setLine(Coeff, SE->getNegativeSCEV(Coeff), SE->getNegativeSCEV(Delta), CurLoop); } // maybe we can get a useful direction bool DeltaMaybeZero = !SE->isKnownNonZero(Delta); bool DeltaMaybePositive = !SE->isKnownNonPositive(Delta); bool DeltaMaybeNegative = !SE->isKnownNonNegative(Delta); bool CoeffMaybePositive = !SE->isKnownNonPositive(Coeff); bool CoeffMaybeNegative = !SE->isKnownNonNegative(Coeff); // The double negatives above are confusing. // It helps to read !SE->isKnownNonZero(Delta) // as "Delta might be Zero" unsigned NewDirection = Dependence::DVEntry::NONE; if ((DeltaMaybePositive && CoeffMaybePositive) || (DeltaMaybeNegative && CoeffMaybeNegative)) NewDirection = Dependence::DVEntry::LT; if (DeltaMaybeZero) NewDirection |= Dependence::DVEntry::EQ; if ((DeltaMaybeNegative && CoeffMaybePositive) || (DeltaMaybePositive && CoeffMaybeNegative)) NewDirection |= Dependence::DVEntry::GT; if (NewDirection < Result.DV[Level].Direction) ++StrongSIVsuccesses; Result.DV[Level].Direction &= NewDirection; } return false; } // weakCrossingSIVtest - // From the paper, Practical Dependence Testing, Section 4.2.2 // // When we have a pair of subscripts of the form [c1 + a*i] and [c2 - a*i], // where i is an induction variable, c1 and c2 are loop invariant, // and a is a constant, we can solve it exactly using the // Weak-Crossing SIV test. // // Given c1 + a*i = c2 - a*i', we can look for the intersection of // the two lines, where i = i', yielding // // c1 + a*i = c2 - a*i // 2a*i = c2 - c1 // i = (c2 - c1)/2a // // If i < 0, there is no dependence. // If i > upperbound, there is no dependence. // If i = 0 (i.e., if c1 = c2), there's a dependence with distance = 0. // If i = upperbound, there's a dependence with distance = 0. // If i is integral, there's a dependence (all directions). // If the non-integer part = 1/2, there's a dependence (<> directions). // Otherwise, there's no dependence. // // Can prove independence. Failing that, // can sometimes refine the directions. // Can determine iteration for splitting. // // Return true if dependence disproved. bool DependenceAnalysis::weakCrossingSIVtest(const SCEV *Coeff, const SCEV *SrcConst, const SCEV *DstConst, const Loop *CurLoop, unsigned Level, FullDependence &Result, Constraint &NewConstraint, const SCEV *&SplitIter) const { DEBUG(dbgs() << "\tWeak-Crossing SIV test\n"); DEBUG(dbgs() << "\t Coeff = " << *Coeff << "\n"); DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); ++WeakCrossingSIVapplications; assert(0 < Level && Level <= CommonLevels && "Level out of range"); assert(0 < Level && Level <= CommonLevels); // HLSL Change - TVS assert(Result.Levels == CommonLevels); // HLSL Change - TVS Level--; Result.Consistent = false; const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); NewConstraint.setLine(Coeff, Coeff, Delta, CurLoop); if (Delta->isZero()) { Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT); Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT); ++WeakCrossingSIVsuccesses; if (!Result.DV[Level].Direction) { ++WeakCrossingSIVindependence; return true; } Result.DV[Level].Distance = Delta; // = 0 return false; } const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(Coeff); if (!ConstCoeff) return false; Result.DV[Level].Splitable = true; if (SE->isKnownNegative(ConstCoeff)) { ConstCoeff = dyn_cast<SCEVConstant>(SE->getNegativeSCEV(ConstCoeff)); assert(ConstCoeff && "dynamic cast of negative of ConstCoeff should yield constant"); Delta = SE->getNegativeSCEV(Delta); } assert(SE->isKnownPositive(ConstCoeff) && "ConstCoeff should be positive"); // compute SplitIter for use by DependenceAnalysis::getSplitIteration() SplitIter = SE->getUDivExpr(SE->getSMaxExpr(SE->getConstant(Delta->getType(), 0), Delta), SE->getMulExpr(SE->getConstant(Delta->getType(), 2), ConstCoeff)); DEBUG(dbgs() << "\t Split iter = " << *SplitIter << "\n"); const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta); if (!ConstDelta) return false; // We're certain that ConstCoeff > 0; therefore, // if Delta < 0, then no dependence. DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); DEBUG(dbgs() << "\t ConstCoeff = " << *ConstCoeff << "\n"); if (SE->isKnownNegative(Delta)) { // No dependence, Delta < 0 ++WeakCrossingSIVindependence; ++WeakCrossingSIVsuccesses; return true; } // We're certain that Delta > 0 and ConstCoeff > 0. // Check Delta/(2*ConstCoeff) against upper loop bound if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n"); const SCEV *ConstantTwo = SE->getConstant(UpperBound->getType(), 2); const SCEV *ML = SE->getMulExpr(SE->getMulExpr(ConstCoeff, UpperBound), ConstantTwo); DEBUG(dbgs() << "\t ML = " << *ML << "\n"); if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, ML)) { // Delta too big, no dependence ++WeakCrossingSIVindependence; ++WeakCrossingSIVsuccesses; return true; } if (isKnownPredicate(CmpInst::ICMP_EQ, Delta, ML)) { // i = i' = UB Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT); Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT); ++WeakCrossingSIVsuccesses; if (!Result.DV[Level].Direction) { ++WeakCrossingSIVindependence; return true; } Result.DV[Level].Splitable = false; Result.DV[Level].Distance = SE->getConstant(Delta->getType(), 0); return false; } } // check that Coeff divides Delta APInt APDelta = ConstDelta->getValue()->getValue(); APInt APCoeff = ConstCoeff->getValue()->getValue(); APInt Distance = APDelta; // these need to be initialzed APInt Remainder = APDelta; APInt::sdivrem(APDelta, APCoeff, Distance, Remainder); DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n"); if (Remainder != 0) { // Coeff doesn't divide Delta, no dependence ++WeakCrossingSIVindependence; ++WeakCrossingSIVsuccesses; return true; } DEBUG(dbgs() << "\t Distance = " << Distance << "\n"); // if 2*Coeff doesn't divide Delta, then the equal direction isn't possible APInt Two = APInt(Distance.getBitWidth(), 2, true); Remainder = Distance.srem(Two); DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n"); if (Remainder != 0) { // Equal direction isn't possible Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::EQ); ++WeakCrossingSIVsuccesses; } return false; } // Kirch's algorithm, from // // Optimizing Supercompilers for Supercomputers // Michael Wolfe // MIT Press, 1989 // // Program 2.1, page 29. // Computes the GCD of AM and BM. // Also finds a solution to the equation ax - by = gcd(a, b). // Returns true if dependence disproved; i.e., gcd does not divide Delta. static bool findGCD(unsigned Bits, APInt AM, APInt BM, APInt Delta, APInt &G, APInt &X, APInt &Y) { APInt A0(Bits, 1, true), A1(Bits, 0, true); APInt B0(Bits, 0, true), B1(Bits, 1, true); APInt G0 = AM.abs(); APInt G1 = BM.abs(); APInt Q = G0; // these need to be initialized APInt R = G0; APInt::sdivrem(G0, G1, Q, R); while (R != 0) { APInt A2 = A0 - Q*A1; A0 = A1; A1 = A2; APInt B2 = B0 - Q*B1; B0 = B1; B1 = B2; G0 = G1; G1 = R; APInt::sdivrem(G0, G1, Q, R); } G = G1; DEBUG(dbgs() << "\t GCD = " << G << "\n"); X = AM.slt(0) ? -A1 : A1; Y = BM.slt(0) ? B1 : -B1; // make sure gcd divides Delta R = Delta.srem(G); if (R != 0) return true; // gcd doesn't divide Delta, no dependence Q = Delta.sdiv(G); X *= Q; Y *= Q; return false; } static APInt floorOfQuotient(APInt A, APInt B) { APInt Q = A; // these need to be initialized APInt R = A; APInt::sdivrem(A, B, Q, R); if (R == 0) return Q; if ((A.sgt(0) && B.sgt(0)) || (A.slt(0) && B.slt(0))) return Q; else return Q - 1; } static APInt ceilingOfQuotient(APInt A, APInt B) { APInt Q = A; // these need to be initialized APInt R = A; APInt::sdivrem(A, B, Q, R); if (R == 0) return Q; if ((A.sgt(0) && B.sgt(0)) || (A.slt(0) && B.slt(0))) return Q + 1; else return Q; } static APInt maxAPInt(APInt A, APInt B) { return A.sgt(B) ? A : B; } static APInt minAPInt(APInt A, APInt B) { return A.slt(B) ? A : B; } // exactSIVtest - // When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*i], // where i is an induction variable, c1 and c2 are loop invariant, and a1 // and a2 are constant, we can solve it exactly using an algorithm developed // by Banerjee and Wolfe. See Section 2.5.3 in // // Optimizing Supercompilers for Supercomputers // Michael Wolfe // MIT Press, 1989 // // It's slower than the specialized tests (strong SIV, weak-zero SIV, etc), // so use them if possible. They're also a bit better with symbolics and, // in the case of the strong SIV test, can compute Distances. // // Return true if dependence disproved. bool DependenceAnalysis::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff, const SCEV *SrcConst, const SCEV *DstConst, const Loop *CurLoop, unsigned Level, FullDependence &Result, Constraint &NewConstraint) const { DEBUG(dbgs() << "\tExact SIV test\n"); DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n"); DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n"); DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); ++ExactSIVapplications; assert(0 < Level && Level <= CommonLevels && "Level out of range"); assert(0 < Level && Level <= CommonLevels); // HLSL Change - TVS assert(Result.Levels == CommonLevels); // HLSL Change - TVS Level--; Result.Consistent = false; const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff), Delta, CurLoop); const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta); const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff); const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff); if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff) return false; // find gcd APInt G, X, Y; APInt AM = ConstSrcCoeff->getValue()->getValue(); APInt BM = ConstDstCoeff->getValue()->getValue(); unsigned Bits = AM.getBitWidth(); if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) { // gcd doesn't divide Delta, no dependence ++ExactSIVindependence; ++ExactSIVsuccesses; return true; } DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n"); // since SCEV construction normalizes, LM = 0 APInt UM(Bits, 1, true); bool UMvalid = false; // UM is perhaps unavailable, let's check if (const SCEVConstant *CUB = collectConstantUpperBound(CurLoop, Delta->getType())) { UM = CUB->getValue()->getValue(); DEBUG(dbgs() << "\t UM = " << UM << "\n"); UMvalid = true; } APInt TU(APInt::getSignedMaxValue(Bits)); APInt TL(APInt::getSignedMinValue(Bits)); // test(BM/G, LM-X) and test(-BM/G, X-UM) APInt TMUL = BM.sdiv(G); if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); if (UMvalid) { TU = minAPInt(TU, floorOfQuotient(UM - X, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); } } else { TU = minAPInt(TU, floorOfQuotient(-X, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); if (UMvalid) { TL = maxAPInt(TL, ceilingOfQuotient(UM - X, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); } } // test(AM/G, LM-Y) and test(-AM/G, Y-UM) TMUL = AM.sdiv(G); if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); if (UMvalid) { TU = minAPInt(TU, floorOfQuotient(UM - Y, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); } } else { TU = minAPInt(TU, floorOfQuotient(-Y, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); if (UMvalid) { TL = maxAPInt(TL, ceilingOfQuotient(UM - Y, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); } } if (TL.sgt(TU)) { ++ExactSIVindependence; ++ExactSIVsuccesses; return true; } // explore directions unsigned NewDirection = Dependence::DVEntry::NONE; // less than APInt SaveTU(TU); // save these APInt SaveTL(TL); DEBUG(dbgs() << "\t exploring LT direction\n"); TMUL = AM - BM; if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(X - Y + 1, TMUL)); DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); } else { TU = minAPInt(TU, floorOfQuotient(X - Y + 1, TMUL)); DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); } if (TL.sle(TU)) { NewDirection |= Dependence::DVEntry::LT; ++ExactSIVsuccesses; } // equal TU = SaveTU; // restore TL = SaveTL; DEBUG(dbgs() << "\t exploring EQ direction\n"); if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(X - Y, TMUL)); DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); } else { TU = minAPInt(TU, floorOfQuotient(X - Y, TMUL)); DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); } TMUL = BM - AM; if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(Y - X, TMUL)); DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); } else { TU = minAPInt(TU, floorOfQuotient(Y - X, TMUL)); DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); } if (TL.sle(TU)) { NewDirection |= Dependence::DVEntry::EQ; ++ExactSIVsuccesses; } // greater than TU = SaveTU; // restore TL = SaveTL; DEBUG(dbgs() << "\t exploring GT direction\n"); if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(Y - X + 1, TMUL)); DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); } else { TU = minAPInt(TU, floorOfQuotient(Y - X + 1, TMUL)); DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); } if (TL.sle(TU)) { NewDirection |= Dependence::DVEntry::GT; ++ExactSIVsuccesses; } // finished Result.DV[Level].Direction &= NewDirection; if (Result.DV[Level].Direction == Dependence::DVEntry::NONE) ++ExactSIVindependence; return Result.DV[Level].Direction == Dependence::DVEntry::NONE; } // Return true if the divisor evenly divides the dividend. static bool isRemainderZero(const SCEVConstant *Dividend, const SCEVConstant *Divisor) { APInt ConstDividend = Dividend->getValue()->getValue(); APInt ConstDivisor = Divisor->getValue()->getValue(); return ConstDividend.srem(ConstDivisor) == 0; } // weakZeroSrcSIVtest - // From the paper, Practical Dependence Testing, Section 4.2.2 // // When we have a pair of subscripts of the form [c1] and [c2 + a*i], // where i is an induction variable, c1 and c2 are loop invariant, // and a is a constant, we can solve it exactly using the // Weak-Zero SIV test. // // Given // // c1 = c2 + a*i // // we get // // (c1 - c2)/a = i // // If i is not an integer, there's no dependence. // If i < 0 or > UB, there's no dependence. // If i = 0, the direction is <= and peeling the // 1st iteration will break the dependence. // If i = UB, the direction is >= and peeling the // last iteration will break the dependence. // Otherwise, the direction is *. // // Can prove independence. Failing that, we can sometimes refine // the directions. Can sometimes show that first or last // iteration carries all the dependences (so worth peeling). // // (see also weakZeroDstSIVtest) // // Return true if dependence disproved. bool DependenceAnalysis::weakZeroSrcSIVtest(const SCEV *DstCoeff, const SCEV *SrcConst, const SCEV *DstConst, const Loop *CurLoop, unsigned Level, FullDependence &Result, Constraint &NewConstraint) const { // For the WeakSIV test, it's possible the loop isn't common to // the Src and Dst loops. If it isn't, then there's no need to // record a direction. DEBUG(dbgs() << "\tWeak-Zero (src) SIV test\n"); DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << "\n"); DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); ++WeakZeroSIVapplications; assert(0 < Level && Level <= MaxLevels && "Level out of range"); assert(0 < Level && Level <= MaxLevels); // HLSL Change - TVS assert(Result.Levels == CommonLevels); // HLSL Change - TVS Level--; Result.Consistent = false; const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst); NewConstraint.setLine(SE->getConstant(Delta->getType(), 0), DstCoeff, Delta, CurLoop); DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); if (isKnownPredicate(CmpInst::ICMP_EQ, SrcConst, DstConst)) { if (Level < CommonLevels) { Result.DV[Level].Direction &= Dependence::DVEntry::LE; Result.DV[Level].PeelFirst = true; ++WeakZeroSIVsuccesses; } return false; // dependences caused by first iteration } const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(DstCoeff); if (!ConstCoeff) return false; const SCEV *AbsCoeff = SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(ConstCoeff) : ConstCoeff; const SCEV *NewDelta = SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta; // check that Delta/SrcCoeff < iteration count // really check NewDelta < count*AbsCoeff if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n"); const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound); if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) { ++WeakZeroSIVindependence; ++WeakZeroSIVsuccesses; return true; } if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) { // dependences caused by last iteration if (Level < CommonLevels) { Result.DV[Level].Direction &= Dependence::DVEntry::GE; Result.DV[Level].PeelLast = true; ++WeakZeroSIVsuccesses; } return false; } } // check that Delta/SrcCoeff >= 0 // really check that NewDelta >= 0 if (SE->isKnownNegative(NewDelta)) { // No dependence, newDelta < 0 ++WeakZeroSIVindependence; ++WeakZeroSIVsuccesses; return true; } // if SrcCoeff doesn't divide Delta, then no dependence if (isa<SCEVConstant>(Delta) && !isRemainderZero(cast<SCEVConstant>(Delta), ConstCoeff)) { ++WeakZeroSIVindependence; ++WeakZeroSIVsuccesses; return true; } return false; } // weakZeroDstSIVtest - // From the paper, Practical Dependence Testing, Section 4.2.2 // // When we have a pair of subscripts of the form [c1 + a*i] and [c2], // where i is an induction variable, c1 and c2 are loop invariant, // and a is a constant, we can solve it exactly using the // Weak-Zero SIV test. // // Given // // c1 + a*i = c2 // // we get // // i = (c2 - c1)/a // // If i is not an integer, there's no dependence. // If i < 0 or > UB, there's no dependence. // If i = 0, the direction is <= and peeling the // 1st iteration will break the dependence. // If i = UB, the direction is >= and peeling the // last iteration will break the dependence. // Otherwise, the direction is *. // // Can prove independence. Failing that, we can sometimes refine // the directions. Can sometimes show that first or last // iteration carries all the dependences (so worth peeling). // // (see also weakZeroSrcSIVtest) // // Return true if dependence disproved. bool DependenceAnalysis::weakZeroDstSIVtest(const SCEV *SrcCoeff, const SCEV *SrcConst, const SCEV *DstConst, const Loop *CurLoop, unsigned Level, FullDependence &Result, Constraint &NewConstraint) const { // For the WeakSIV test, it's possible the loop isn't common to the // Src and Dst loops. If it isn't, then there's no need to record a direction. DEBUG(dbgs() << "\tWeak-Zero (dst) SIV test\n"); DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << "\n"); DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); ++WeakZeroSIVapplications; assert(0 < Level && Level <= SrcLevels && "Level out of range"); assert(0 < Level && Level <= SrcLevels); // HLSL Change - TVS assert(Result.Levels == CommonLevels); // HLSL Change - TVS Level--; Result.Consistent = false; const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); NewConstraint.setLine(SrcCoeff, SE->getConstant(Delta->getType(), 0), Delta, CurLoop); DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); if (isKnownPredicate(CmpInst::ICMP_EQ, DstConst, SrcConst)) { if (Level < CommonLevels) { Result.DV[Level].Direction &= Dependence::DVEntry::LE; Result.DV[Level].PeelFirst = true; ++WeakZeroSIVsuccesses; } return false; // dependences caused by first iteration } const SCEVConstant *ConstCoeff = dyn_cast<SCEVConstant>(SrcCoeff); if (!ConstCoeff) return false; const SCEV *AbsCoeff = SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(ConstCoeff) : ConstCoeff; const SCEV *NewDelta = SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta; // check that Delta/SrcCoeff < iteration count // really check NewDelta < count*AbsCoeff if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n"); const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound); if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) { ++WeakZeroSIVindependence; ++WeakZeroSIVsuccesses; return true; } if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) { // dependences caused by last iteration if (Level < CommonLevels) { Result.DV[Level].Direction &= Dependence::DVEntry::GE; Result.DV[Level].PeelLast = true; ++WeakZeroSIVsuccesses; } return false; } } // check that Delta/SrcCoeff >= 0 // really check that NewDelta >= 0 if (SE->isKnownNegative(NewDelta)) { // No dependence, newDelta < 0 ++WeakZeroSIVindependence; ++WeakZeroSIVsuccesses; return true; } // if SrcCoeff doesn't divide Delta, then no dependence if (isa<SCEVConstant>(Delta) && !isRemainderZero(cast<SCEVConstant>(Delta), ConstCoeff)) { ++WeakZeroSIVindependence; ++WeakZeroSIVsuccesses; return true; } return false; } // exactRDIVtest - Tests the RDIV subscript pair for dependence. // Things of the form [c1 + a*i] and [c2 + b*j], // where i and j are induction variable, c1 and c2 are loop invariant, // and a and b are constants. // Returns true if any possible dependence is disproved. // Marks the result as inconsistent. // Works in some cases that symbolicRDIVtest doesn't, and vice versa. bool DependenceAnalysis::exactRDIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff, const SCEV *SrcConst, const SCEV *DstConst, const Loop *SrcLoop, const Loop *DstLoop, FullDependence &Result) const { DEBUG(dbgs() << "\tExact RDIV test\n"); DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n"); DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n"); DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); ++ExactRDIVapplications; Result.Consistent = false; const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta); const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff); const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff); if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff) return false; // find gcd APInt G, X, Y; APInt AM = ConstSrcCoeff->getValue()->getValue(); APInt BM = ConstDstCoeff->getValue()->getValue(); unsigned Bits = AM.getBitWidth(); if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) { // gcd doesn't divide Delta, no dependence ++ExactRDIVindependence; return true; } DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n"); // since SCEV construction seems to normalize, LM = 0 APInt SrcUM(Bits, 1, true); bool SrcUMvalid = false; // SrcUM is perhaps unavailable, let's check if (const SCEVConstant *UpperBound = collectConstantUpperBound(SrcLoop, Delta->getType())) { SrcUM = UpperBound->getValue()->getValue(); DEBUG(dbgs() << "\t SrcUM = " << SrcUM << "\n"); SrcUMvalid = true; } APInt DstUM(Bits, 1, true); bool DstUMvalid = false; // UM is perhaps unavailable, let's check if (const SCEVConstant *UpperBound = collectConstantUpperBound(DstLoop, Delta->getType())) { DstUM = UpperBound->getValue()->getValue(); DEBUG(dbgs() << "\t DstUM = " << DstUM << "\n"); DstUMvalid = true; } APInt TU(APInt::getSignedMaxValue(Bits)); APInt TL(APInt::getSignedMinValue(Bits)); // test(BM/G, LM-X) and test(-BM/G, X-UM) APInt TMUL = BM.sdiv(G); if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); if (SrcUMvalid) { TU = minAPInt(TU, floorOfQuotient(SrcUM - X, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); } } else { TU = minAPInt(TU, floorOfQuotient(-X, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); if (SrcUMvalid) { TL = maxAPInt(TL, ceilingOfQuotient(SrcUM - X, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); } } // test(AM/G, LM-Y) and test(-AM/G, Y-UM) TMUL = AM.sdiv(G); if (TMUL.sgt(0)) { TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); if (DstUMvalid) { TU = minAPInt(TU, floorOfQuotient(DstUM - Y, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); } } else { TU = minAPInt(TU, floorOfQuotient(-Y, TMUL)); DEBUG(dbgs() << "\t TU = " << TU << "\n"); if (DstUMvalid) { TL = maxAPInt(TL, ceilingOfQuotient(DstUM - Y, TMUL)); DEBUG(dbgs() << "\t TL = " << TL << "\n"); } } if (TL.sgt(TU)) ++ExactRDIVindependence; return TL.sgt(TU); } // symbolicRDIVtest - // In Section 4.5 of the Practical Dependence Testing paper,the authors // introduce a special case of Banerjee's Inequalities (also called the // Extreme-Value Test) that can handle some of the SIV and RDIV cases, // particularly cases with symbolics. Since it's only able to disprove // dependence (not compute distances or directions), we'll use it as a // fall back for the other tests. // // When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j] // where i and j are induction variables and c1 and c2 are loop invariants, // we can use the symbolic tests to disprove some dependences, serving as a // backup for the RDIV test. Note that i and j can be the same variable, // letting this test serve as a backup for the various SIV tests. // // For a dependence to exist, c1 + a1*i must equal c2 + a2*j for some // 0 <= i <= N1 and some 0 <= j <= N2, where N1 and N2 are the (normalized) // loop bounds for the i and j loops, respectively. So, ... // // c1 + a1*i = c2 + a2*j // a1*i - a2*j = c2 - c1 // // To test for a dependence, we compute c2 - c1 and make sure it's in the // range of the maximum and minimum possible values of a1*i - a2*j. // Considering the signs of a1 and a2, we have 4 possible cases: // // 1) If a1 >= 0 and a2 >= 0, then // a1*0 - a2*N2 <= c2 - c1 <= a1*N1 - a2*0 // -a2*N2 <= c2 - c1 <= a1*N1 // // 2) If a1 >= 0 and a2 <= 0, then // a1*0 - a2*0 <= c2 - c1 <= a1*N1 - a2*N2 // 0 <= c2 - c1 <= a1*N1 - a2*N2 // // 3) If a1 <= 0 and a2 >= 0, then // a1*N1 - a2*N2 <= c2 - c1 <= a1*0 - a2*0 // a1*N1 - a2*N2 <= c2 - c1 <= 0 // // 4) If a1 <= 0 and a2 <= 0, then // a1*N1 - a2*0 <= c2 - c1 <= a1*0 - a2*N2 // a1*N1 <= c2 - c1 <= -a2*N2 // // return true if dependence disproved bool DependenceAnalysis::symbolicRDIVtest(const SCEV *A1, const SCEV *A2, const SCEV *C1, const SCEV *C2, const Loop *Loop1, const Loop *Loop2) const { ++SymbolicRDIVapplications; DEBUG(dbgs() << "\ttry symbolic RDIV test\n"); DEBUG(dbgs() << "\t A1 = " << *A1); DEBUG(dbgs() << ", type = " << *A1->getType() << "\n"); DEBUG(dbgs() << "\t A2 = " << *A2 << "\n"); DEBUG(dbgs() << "\t C1 = " << *C1 << "\n"); DEBUG(dbgs() << "\t C2 = " << *C2 << "\n"); const SCEV *N1 = collectUpperBound(Loop1, A1->getType()); const SCEV *N2 = collectUpperBound(Loop2, A1->getType()); DEBUG(if (N1) dbgs() << "\t N1 = " << *N1 << "\n"); DEBUG(if (N2) dbgs() << "\t N2 = " << *N2 << "\n"); const SCEV *C2_C1 = SE->getMinusSCEV(C2, C1); const SCEV *C1_C2 = SE->getMinusSCEV(C1, C2); DEBUG(dbgs() << "\t C2 - C1 = " << *C2_C1 << "\n"); DEBUG(dbgs() << "\t C1 - C2 = " << *C1_C2 << "\n"); if (SE->isKnownNonNegative(A1)) { if (SE->isKnownNonNegative(A2)) { // A1 >= 0 && A2 >= 0 if (N1) { // make sure that c2 - c1 <= a1*N1 const SCEV *A1N1 = SE->getMulExpr(A1, N1); DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n"); if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1)) { ++SymbolicRDIVindependence; return true; } } if (N2) { // make sure that -a2*N2 <= c2 - c1, or a2*N2 >= c1 - c2 const SCEV *A2N2 = SE->getMulExpr(A2, N2); DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n"); if (isKnownPredicate(CmpInst::ICMP_SLT, A2N2, C1_C2)) { ++SymbolicRDIVindependence; return true; } } } else if (SE->isKnownNonPositive(A2)) { // a1 >= 0 && a2 <= 0 if (N1 && N2) { // make sure that c2 - c1 <= a1*N1 - a2*N2 const SCEV *A1N1 = SE->getMulExpr(A1, N1); const SCEV *A2N2 = SE->getMulExpr(A2, N2); const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2); DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n"); if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1_A2N2)) { ++SymbolicRDIVindependence; return true; } } // make sure that 0 <= c2 - c1 if (SE->isKnownNegative(C2_C1)) { ++SymbolicRDIVindependence; return true; } } } else if (SE->isKnownNonPositive(A1)) { if (SE->isKnownNonNegative(A2)) { // a1 <= 0 && a2 >= 0 if (N1 && N2) { // make sure that a1*N1 - a2*N2 <= c2 - c1 const SCEV *A1N1 = SE->getMulExpr(A1, N1); const SCEV *A2N2 = SE->getMulExpr(A2, N2); const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2); DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n"); if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1_A2N2, C2_C1)) { ++SymbolicRDIVindependence; return true; } } // make sure that c2 - c1 <= 0 if (SE->isKnownPositive(C2_C1)) { ++SymbolicRDIVindependence; return true; } } else if (SE->isKnownNonPositive(A2)) { // a1 <= 0 && a2 <= 0 if (N1) { // make sure that a1*N1 <= c2 - c1 const SCEV *A1N1 = SE->getMulExpr(A1, N1); DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n"); if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1, C2_C1)) { ++SymbolicRDIVindependence; return true; } } if (N2) { // make sure that c2 - c1 <= -a2*N2, or c1 - c2 >= a2*N2 const SCEV *A2N2 = SE->getMulExpr(A2, N2); DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n"); if (isKnownPredicate(CmpInst::ICMP_SLT, C1_C2, A2N2)) { ++SymbolicRDIVindependence; return true; } } } } return false; } // testSIV - // When we have a pair of subscripts of the form [c1 + a1*i] and [c2 - a2*i] // where i is an induction variable, c1 and c2 are loop invariant, and a1 and // a2 are constant, we attack it with an SIV test. While they can all be // solved with the Exact SIV test, it's worthwhile to use simpler tests when // they apply; they're cheaper and sometimes more precise. // // Return true if dependence disproved. bool DependenceAnalysis::testSIV(const SCEV *Src, const SCEV *Dst, unsigned &Level, FullDependence &Result, Constraint &NewConstraint, const SCEV *&SplitIter) const { DEBUG(dbgs() << " src = " << *Src << "\n"); DEBUG(dbgs() << " dst = " << *Dst << "\n"); const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src); const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst); if (SrcAddRec && DstAddRec) { const SCEV *SrcConst = SrcAddRec->getStart(); const SCEV *DstConst = DstAddRec->getStart(); const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE); const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE); const Loop *CurLoop = SrcAddRec->getLoop(); assert(CurLoop == DstAddRec->getLoop() && "both loops in SIV should be same"); Level = mapSrcLoop(CurLoop); bool disproven; if (SrcCoeff == DstCoeff) disproven = strongSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, Level, Result, NewConstraint); else if (SrcCoeff == SE->getNegativeSCEV(DstCoeff)) disproven = weakCrossingSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, Level, Result, NewConstraint, SplitIter); else disproven = exactSIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop, Level, Result, NewConstraint); return disproven || gcdMIVtest(Src, Dst, Result) || symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop, CurLoop); } if (SrcAddRec) { const SCEV *SrcConst = SrcAddRec->getStart(); const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE); const SCEV *DstConst = Dst; const Loop *CurLoop = SrcAddRec->getLoop(); Level = mapSrcLoop(CurLoop); return weakZeroDstSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, Level, Result, NewConstraint) || gcdMIVtest(Src, Dst, Result); } if (DstAddRec) { const SCEV *DstConst = DstAddRec->getStart(); const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE); const SCEV *SrcConst = Src; const Loop *CurLoop = DstAddRec->getLoop(); Level = mapDstLoop(CurLoop); return weakZeroSrcSIVtest(DstCoeff, SrcConst, DstConst, CurLoop, Level, Result, NewConstraint) || gcdMIVtest(Src, Dst, Result); } llvm_unreachable("SIV test expected at least one AddRec"); return false; } // testRDIV - // When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j] // where i and j are induction variables, c1 and c2 are loop invariant, // and a1 and a2 are constant, we can solve it exactly with an easy adaptation // of the Exact SIV test, the Restricted Double Index Variable (RDIV) test. // It doesn't make sense to talk about distance or direction in this case, // so there's no point in making special versions of the Strong SIV test or // the Weak-crossing SIV test. // // With minor algebra, this test can also be used for things like // [c1 + a1*i + a2*j][c2]. // // Return true if dependence disproved. bool DependenceAnalysis::testRDIV(const SCEV *Src, const SCEV *Dst, FullDependence &Result) const { // we have 3 possible situations here: // 1) [a*i + b] and [c*j + d] // 2) [a*i + c*j + b] and [d] // 3) [b] and [a*i + c*j + d] // We need to find what we've got and get organized const SCEV *SrcConst, *DstConst; const SCEV *SrcCoeff, *DstCoeff; const Loop *SrcLoop, *DstLoop; DEBUG(dbgs() << " src = " << *Src << "\n"); DEBUG(dbgs() << " dst = " << *Dst << "\n"); const SCEVAddRecExpr *SrcAddRec = dyn_cast<SCEVAddRecExpr>(Src); const SCEVAddRecExpr *DstAddRec = dyn_cast<SCEVAddRecExpr>(Dst); if (SrcAddRec && DstAddRec) { SrcConst = SrcAddRec->getStart(); SrcCoeff = SrcAddRec->getStepRecurrence(*SE); SrcLoop = SrcAddRec->getLoop(); DstConst = DstAddRec->getStart(); DstCoeff = DstAddRec->getStepRecurrence(*SE); DstLoop = DstAddRec->getLoop(); } else if (SrcAddRec) { if (const SCEVAddRecExpr *tmpAddRec = dyn_cast<SCEVAddRecExpr>(SrcAddRec->getStart())) { SrcConst = tmpAddRec->getStart(); SrcCoeff = tmpAddRec->getStepRecurrence(*SE); SrcLoop = tmpAddRec->getLoop(); DstConst = Dst; DstCoeff = SE->getNegativeSCEV(SrcAddRec->getStepRecurrence(*SE)); DstLoop = SrcAddRec->getLoop(); } else llvm_unreachable("RDIV reached by surprising SCEVs"); } else if (DstAddRec) { if (const SCEVAddRecExpr *tmpAddRec = dyn_cast<SCEVAddRecExpr>(DstAddRec->getStart())) { DstConst = tmpAddRec->getStart(); DstCoeff = tmpAddRec->getStepRecurrence(*SE); DstLoop = tmpAddRec->getLoop(); SrcConst = Src; SrcCoeff = SE->getNegativeSCEV(DstAddRec->getStepRecurrence(*SE)); SrcLoop = DstAddRec->getLoop(); } else llvm_unreachable("RDIV reached by surprising SCEVs"); } else llvm_unreachable("RDIV expected at least one AddRec"); return exactRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, SrcLoop, DstLoop, Result) || gcdMIVtest(Src, Dst, Result) || symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, SrcLoop, DstLoop); } // Tests the single-subscript MIV pair (Src and Dst) for dependence. // Return true if dependence disproved. // Can sometimes refine direction vectors. bool DependenceAnalysis::testMIV(const SCEV *Src, const SCEV *Dst, const SmallBitVector &Loops, FullDependence &Result) const { DEBUG(dbgs() << " src = " << *Src << "\n"); DEBUG(dbgs() << " dst = " << *Dst << "\n"); Result.Consistent = false; return gcdMIVtest(Src, Dst, Result) || banerjeeMIVtest(Src, Dst, Loops, Result); } // Given a product, e.g., 10*X*Y, returns the first constant operand, // in this case 10. If there is no constant part, returns NULL. static const SCEVConstant *getConstantPart(const SCEVMulExpr *Product) { for (unsigned Op = 0, Ops = Product->getNumOperands(); Op < Ops; Op++) { if (const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Product->getOperand(Op))) return Constant; } return nullptr; } //===----------------------------------------------------------------------===// // gcdMIVtest - // Tests an MIV subscript pair for dependence. // Returns true if any possible dependence is disproved. // Marks the result as inconsistent. // Can sometimes disprove the equal direction for 1 or more loops, // as discussed in Michael Wolfe's book, // High Performance Compilers for Parallel Computing, page 235. // // We spend some effort (code!) to handle cases like // [10*i + 5*N*j + 15*M + 6], where i and j are induction variables, // but M and N are just loop-invariant variables. // This should help us handle linearized subscripts; // also makes this test a useful backup to the various SIV tests. // // It occurs to me that the presence of loop-invariant variables // changes the nature of the test from "greatest common divisor" // to "a common divisor". bool DependenceAnalysis::gcdMIVtest(const SCEV *Src, const SCEV *Dst, FullDependence &Result) const { DEBUG(dbgs() << "starting gcd\n"); ++GCDapplications; unsigned BitWidth = SE->getTypeSizeInBits(Src->getType()); APInt RunningGCD = APInt::getNullValue(BitWidth); // Examine Src coefficients. // Compute running GCD and record source constant. // Because we're looking for the constant at the end of the chain, // we can't quit the loop just because the GCD == 1. const SCEV *Coefficients = Src; while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Coefficients)) { const SCEV *Coeff = AddRec->getStepRecurrence(*SE); const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Coeff); if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff)) // If the coefficient is the product of a constant and other stuff, // we can use the constant in the GCD computation. Constant = getConstantPart(Product); if (!Constant) return false; APInt ConstCoeff = Constant->getValue()->getValue(); RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); Coefficients = AddRec->getStart(); } const SCEV *SrcConst = Coefficients; // Examine Dst coefficients. // Compute running GCD and record destination constant. // Because we're looking for the constant at the end of the chain, // we can't quit the loop just because the GCD == 1. Coefficients = Dst; while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Coefficients)) { const SCEV *Coeff = AddRec->getStepRecurrence(*SE); const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Coeff); if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff)) // If the coefficient is the product of a constant and other stuff, // we can use the constant in the GCD computation. Constant = getConstantPart(Product); if (!Constant) return false; APInt ConstCoeff = Constant->getValue()->getValue(); RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); Coefficients = AddRec->getStart(); } const SCEV *DstConst = Coefficients; APInt ExtraGCD = APInt::getNullValue(BitWidth); const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); DEBUG(dbgs() << " Delta = " << *Delta << "\n"); const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Delta); if (const SCEVAddExpr *Sum = dyn_cast<SCEVAddExpr>(Delta)) { // If Delta is a sum of products, we may be able to make further progress. for (unsigned Op = 0, Ops = Sum->getNumOperands(); Op < Ops; Op++) { const SCEV *Operand = Sum->getOperand(Op); if (isa<SCEVConstant>(Operand)) { assert(!Constant && "Surprised to find multiple constants"); Constant = cast<SCEVConstant>(Operand); } else if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Operand)) { // Search for constant operand to participate in GCD; // If none found; return false. const SCEVConstant *ConstOp = getConstantPart(Product); if (!ConstOp) return false; APInt ConstOpValue = ConstOp->getValue()->getValue(); ExtraGCD = APIntOps::GreatestCommonDivisor(ExtraGCD, ConstOpValue.abs()); } else return false; } } if (!Constant) return false; APInt ConstDelta = cast<SCEVConstant>(Constant)->getValue()->getValue(); DEBUG(dbgs() << " ConstDelta = " << ConstDelta << "\n"); if (ConstDelta == 0) return false; RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ExtraGCD); DEBUG(dbgs() << " RunningGCD = " << RunningGCD << "\n"); APInt Remainder = ConstDelta.srem(RunningGCD); if (Remainder != 0) { ++GCDindependence; return true; } // Try to disprove equal directions. // For example, given a subscript pair [3*i + 2*j] and [i' + 2*j' - 1], // the code above can't disprove the dependence because the GCD = 1. // So we consider what happen if i = i' and what happens if j = j'. // If i = i', we can simplify the subscript to [2*i + 2*j] and [2*j' - 1], // which is infeasible, so we can disallow the = direction for the i level. // Setting j = j' doesn't help matters, so we end up with a direction vector // of [<>, *] // // Given A[5*i + 10*j*M + 9*M*N] and A[15*i + 20*j*M - 21*N*M + 5], // we need to remember that the constant part is 5 and the RunningGCD should // be initialized to ExtraGCD = 30. DEBUG(dbgs() << " ExtraGCD = " << ExtraGCD << '\n'); bool Improved = false; Coefficients = Src; while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Coefficients)) { Coefficients = AddRec->getStart(); const Loop *CurLoop = AddRec->getLoop(); RunningGCD = ExtraGCD; const SCEV *SrcCoeff = AddRec->getStepRecurrence(*SE); const SCEV *DstCoeff = SE->getMinusSCEV(SrcCoeff, SrcCoeff); const SCEV *Inner = Src; while (RunningGCD != 1 && isa<SCEVAddRecExpr>(Inner)) { AddRec = cast<SCEVAddRecExpr>(Inner); const SCEV *Coeff = AddRec->getStepRecurrence(*SE); if (CurLoop == AddRec->getLoop()) ; // SrcCoeff == Coeff else { if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff)) // If the coefficient is the product of a constant and other stuff, // we can use the constant in the GCD computation. Constant = getConstantPart(Product); else Constant = cast<SCEVConstant>(Coeff); APInt ConstCoeff = Constant->getValue()->getValue(); RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); } Inner = AddRec->getStart(); } Inner = Dst; while (RunningGCD != 1 && isa<SCEVAddRecExpr>(Inner)) { AddRec = cast<SCEVAddRecExpr>(Inner); const SCEV *Coeff = AddRec->getStepRecurrence(*SE); if (CurLoop == AddRec->getLoop()) DstCoeff = Coeff; else { if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Coeff)) // If the coefficient is the product of a constant and other stuff, // we can use the constant in the GCD computation. Constant = getConstantPart(Product); else Constant = cast<SCEVConstant>(Coeff); APInt ConstCoeff = Constant->getValue()->getValue(); RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); } Inner = AddRec->getStart(); } Delta = SE->getMinusSCEV(SrcCoeff, DstCoeff); if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Delta)) // If the coefficient is the product of a constant and other stuff, // we can use the constant in the GCD computation. Constant = getConstantPart(Product); else if (isa<SCEVConstant>(Delta)) Constant = cast<SCEVConstant>(Delta); else { // The difference of the two coefficients might not be a product // or constant, in which case we give up on this direction. continue; } APInt ConstCoeff = Constant->getValue()->getValue(); RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); DEBUG(dbgs() << "\tRunningGCD = " << RunningGCD << "\n"); if (RunningGCD != 0) { Remainder = ConstDelta.srem(RunningGCD); DEBUG(dbgs() << "\tRemainder = " << Remainder << "\n"); if (Remainder != 0) { unsigned Level = mapSrcLoop(CurLoop); assert(0 < Level && Level <= Result.Levels && "Level out of range"); assert(0 < Level && Level <= Result.Levels); // HLSL Change - TVS Result.DV[Level - 1].Direction &= unsigned(~Dependence::DVEntry::EQ); Improved = true; } } } if (Improved) ++GCDsuccesses; DEBUG(dbgs() << "all done\n"); return false; } //===----------------------------------------------------------------------===// // banerjeeMIVtest - // Use Banerjee's Inequalities to test an MIV subscript pair. // (Wolfe, in the race-car book, calls this the Extreme Value Test.) // Generally follows the discussion in Section 2.5.2 of // // Optimizing Supercompilers for Supercomputers // Michael Wolfe // // The inequalities given on page 25 are simplified in that loops are // normalized so that the lower bound is always 0 and the stride is always 1. // For example, Wolfe gives // // LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k // // where A_k is the coefficient of the kth index in the source subscript, // B_k is the coefficient of the kth index in the destination subscript, // U_k is the upper bound of the kth index, L_k is the lower bound of the Kth // index, and N_k is the stride of the kth index. Since all loops are normalized // by the SCEV package, N_k = 1 and L_k = 0, allowing us to simplify the // equation to // // LB^<_k = (A^-_k - B_k)^- (U_k - 0 - 1) + (A_k - B_k)0 - B_k 1 // = (A^-_k - B_k)^- (U_k - 1) - B_k // // Similar simplifications are possible for the other equations. // // When we can't determine the number of iterations for a loop, // we use NULL as an indicator for the worst case, infinity. // When computing the upper bound, NULL denotes +inf; // for the lower bound, NULL denotes -inf. // // Return true if dependence disproved. bool DependenceAnalysis::banerjeeMIVtest(const SCEV *Src, const SCEV *Dst, const SmallBitVector &Loops, FullDependence &Result) const { DEBUG(dbgs() << "starting Banerjee\n"); ++BanerjeeApplications; DEBUG(dbgs() << " Src = " << *Src << '\n'); const SCEV *A0; CoefficientInfo *A = collectCoeffInfo(Src, true, A0); DEBUG(dbgs() << " Dst = " << *Dst << '\n'); const SCEV *B0; CoefficientInfo *B = collectCoeffInfo(Dst, false, B0); BoundInfo *Bound = new BoundInfo[MaxLevels + 1]; const SCEV *Delta = SE->getMinusSCEV(B0, A0); DEBUG(dbgs() << "\tDelta = " << *Delta << '\n'); // Compute bounds for all the * directions. DEBUG(dbgs() << "\tBounds[*]\n"); for (unsigned K = 1; K <= MaxLevels; ++K) { Bound[K].Iterations = A[K].Iterations ? A[K].Iterations : B[K].Iterations; Bound[K].Direction = Dependence::DVEntry::ALL; Bound[K].DirSet = Dependence::DVEntry::NONE; findBoundsALL(A, B, Bound, K); #ifndef NDEBUG DEBUG(dbgs() << "\t " << K << '\t'); if (Bound[K].Lower[Dependence::DVEntry::ALL]) DEBUG(dbgs() << *Bound[K].Lower[Dependence::DVEntry::ALL] << '\t'); else DEBUG(dbgs() << "-inf\t"); if (Bound[K].Upper[Dependence::DVEntry::ALL]) DEBUG(dbgs() << *Bound[K].Upper[Dependence::DVEntry::ALL] << '\n'); else DEBUG(dbgs() << "+inf\n"); #endif } // Test the *, *, *, ... case. bool Disproved = false; if (testBounds(Dependence::DVEntry::ALL, 0, Bound, Delta)) { // Explore the direction vector hierarchy. unsigned DepthExpanded = 0; unsigned NewDeps = exploreDirections(1, A, B, Bound, Loops, DepthExpanded, Delta); if (NewDeps > 0) { bool Improved = false; for (unsigned K = 1; K <= CommonLevels; ++K) { if (Loops[K]) { assert(Result.Levels == CommonLevels); // HLSL Change - TVS assert(K <= MaxLevels); // HLSL Change - TVS unsigned Old = Result.DV[K - 1].Direction; Result.DV[K - 1].Direction = Old & Bound[K].DirSet; Improved |= Old != Result.DV[K - 1].Direction; if (!Result.DV[K - 1].Direction) { Improved = false; Disproved = true; break; } } } if (Improved) ++BanerjeeSuccesses; } else { ++BanerjeeIndependence; Disproved = true; } } else { ++BanerjeeIndependence; Disproved = true; } delete [] Bound; delete [] A; delete [] B; return Disproved; } // Hierarchically expands the direction vector // search space, combining the directions of discovered dependences // in the DirSet field of Bound. Returns the number of distinct // dependences discovered. If the dependence is disproved, // it will return 0. unsigned DependenceAnalysis::exploreDirections(unsigned Level, CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound, const SmallBitVector &Loops, unsigned &DepthExpanded, const SCEV *Delta) const { if (Level > CommonLevels) { // record result DEBUG(dbgs() << "\t["); for (unsigned K = 1; K <= CommonLevels; ++K) { if (Loops[K]) { Bound[K].DirSet |= Bound[K].Direction; #ifndef NDEBUG switch (Bound[K].Direction) { case Dependence::DVEntry::LT: DEBUG(dbgs() << " <"); break; case Dependence::DVEntry::EQ: DEBUG(dbgs() << " ="); break; case Dependence::DVEntry::GT: DEBUG(dbgs() << " >"); break; case Dependence::DVEntry::ALL: DEBUG(dbgs() << " *"); break; default: llvm_unreachable("unexpected Bound[K].Direction"); } #endif } } DEBUG(dbgs() << " ]\n"); return 1; } if (Loops[Level]) { if (Level > DepthExpanded) { DepthExpanded = Level; // compute bounds for <, =, > at current level findBoundsLT(A, B, Bound, Level); findBoundsGT(A, B, Bound, Level); findBoundsEQ(A, B, Bound, Level); #ifndef NDEBUG DEBUG(dbgs() << "\tBound for level = " << Level << '\n'); DEBUG(dbgs() << "\t <\t"); if (Bound[Level].Lower[Dependence::DVEntry::LT]) DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::LT] << '\t'); else DEBUG(dbgs() << "-inf\t"); if (Bound[Level].Upper[Dependence::DVEntry::LT]) DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::LT] << '\n'); else DEBUG(dbgs() << "+inf\n"); DEBUG(dbgs() << "\t =\t"); if (Bound[Level].Lower[Dependence::DVEntry::EQ]) DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::EQ] << '\t'); else DEBUG(dbgs() << "-inf\t"); if (Bound[Level].Upper[Dependence::DVEntry::EQ]) DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::EQ] << '\n'); else DEBUG(dbgs() << "+inf\n"); DEBUG(dbgs() << "\t >\t"); if (Bound[Level].Lower[Dependence::DVEntry::GT]) DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::GT] << '\t'); else DEBUG(dbgs() << "-inf\t"); if (Bound[Level].Upper[Dependence::DVEntry::GT]) DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::GT] << '\n'); else DEBUG(dbgs() << "+inf\n"); #endif } unsigned NewDeps = 0; // test bounds for <, *, *, ... if (testBounds(Dependence::DVEntry::LT, Level, Bound, Delta)) NewDeps += exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta); // Test bounds for =, *, *, ... if (testBounds(Dependence::DVEntry::EQ, Level, Bound, Delta)) NewDeps += exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta); // test bounds for >, *, *, ... if (testBounds(Dependence::DVEntry::GT, Level, Bound, Delta)) NewDeps += exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta); Bound[Level].Direction = Dependence::DVEntry::ALL; return NewDeps; } else return exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta); } // Returns true iff the current bounds are plausible. bool DependenceAnalysis::testBounds(unsigned char DirKind, unsigned Level, BoundInfo *Bound, const SCEV *Delta) const { Bound[Level].Direction = DirKind; if (const SCEV *LowerBound = getLowerBound(Bound)) if (isKnownPredicate(CmpInst::ICMP_SGT, LowerBound, Delta)) return false; if (const SCEV *UpperBound = getUpperBound(Bound)) if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, UpperBound)) return false; return true; } // Computes the upper and lower bounds for level K // using the * direction. Records them in Bound. // Wolfe gives the equations // // LB^*_k = (A^-_k - B^+_k)(U_k - L_k) + (A_k - B_k)L_k // UB^*_k = (A^+_k - B^-_k)(U_k - L_k) + (A_k - B_k)L_k // // Since we normalize loops, we can simplify these equations to // // LB^*_k = (A^-_k - B^+_k)U_k // UB^*_k = (A^+_k - B^-_k)U_k // // We must be careful to handle the case where the upper bound is unknown. // Note that the lower bound is always <= 0 // and the upper bound is always >= 0. void DependenceAnalysis::findBoundsALL(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound, unsigned K) const { Bound[K].Lower[Dependence::DVEntry::ALL] = nullptr; // Default value = -infinity. Bound[K].Upper[Dependence::DVEntry::ALL] = nullptr; // Default value = +infinity. if (Bound[K].Iterations) { Bound[K].Lower[Dependence::DVEntry::ALL] = SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart), Bound[K].Iterations); Bound[K].Upper[Dependence::DVEntry::ALL] = SE->getMulExpr(SE->getMinusSCEV(A[K].PosPart, B[K].NegPart), Bound[K].Iterations); } else { // If the difference is 0, we won't need to know the number of iterations. if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].NegPart, B[K].PosPart)) Bound[K].Lower[Dependence::DVEntry::ALL] = SE->getConstant(A[K].Coeff->getType(), 0); if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].PosPart, B[K].NegPart)) Bound[K].Upper[Dependence::DVEntry::ALL] = SE->getConstant(A[K].Coeff->getType(), 0); } } // Computes the upper and lower bounds for level K // using the = direction. Records them in Bound. // Wolfe gives the equations // // LB^=_k = (A_k - B_k)^- (U_k - L_k) + (A_k - B_k)L_k // UB^=_k = (A_k - B_k)^+ (U_k - L_k) + (A_k - B_k)L_k // // Since we normalize loops, we can simplify these equations to // // LB^=_k = (A_k - B_k)^- U_k // UB^=_k = (A_k - B_k)^+ U_k // // We must be careful to handle the case where the upper bound is unknown. // Note that the lower bound is always <= 0 // and the upper bound is always >= 0. void DependenceAnalysis::findBoundsEQ(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound, unsigned K) const { Bound[K].Lower[Dependence::DVEntry::EQ] = nullptr; // Default value = -infinity. Bound[K].Upper[Dependence::DVEntry::EQ] = nullptr; // Default value = +infinity. if (Bound[K].Iterations) { const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff); const SCEV *NegativePart = getNegativePart(Delta); Bound[K].Lower[Dependence::DVEntry::EQ] = SE->getMulExpr(NegativePart, Bound[K].Iterations); const SCEV *PositivePart = getPositivePart(Delta); Bound[K].Upper[Dependence::DVEntry::EQ] = SE->getMulExpr(PositivePart, Bound[K].Iterations); } else { // If the positive/negative part of the difference is 0, // we won't need to know the number of iterations. const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff); const SCEV *NegativePart = getNegativePart(Delta); if (NegativePart->isZero()) Bound[K].Lower[Dependence::DVEntry::EQ] = NegativePart; // Zero const SCEV *PositivePart = getPositivePart(Delta); if (PositivePart->isZero()) Bound[K].Upper[Dependence::DVEntry::EQ] = PositivePart; // Zero } } // Computes the upper and lower bounds for level K // using the < direction. Records them in Bound. // Wolfe gives the equations // // LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k // UB^<_k = (A^+_k - B_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k // // Since we normalize loops, we can simplify these equations to // // LB^<_k = (A^-_k - B_k)^- (U_k - 1) - B_k // UB^<_k = (A^+_k - B_k)^+ (U_k - 1) - B_k // // We must be careful to handle the case where the upper bound is unknown. void DependenceAnalysis::findBoundsLT(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound, unsigned K) const { Bound[K].Lower[Dependence::DVEntry::LT] = nullptr; // Default value = -infinity. Bound[K].Upper[Dependence::DVEntry::LT] = nullptr; // Default value = +infinity. if (Bound[K].Iterations) { const SCEV *Iter_1 = SE->getMinusSCEV(Bound[K].Iterations, SE->getConstant(Bound[K].Iterations->getType(), 1)); const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff)); Bound[K].Lower[Dependence::DVEntry::LT] = SE->getMinusSCEV(SE->getMulExpr(NegPart, Iter_1), B[K].Coeff); const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff)); Bound[K].Upper[Dependence::DVEntry::LT] = SE->getMinusSCEV(SE->getMulExpr(PosPart, Iter_1), B[K].Coeff); } else { // If the positive/negative part of the difference is 0, // we won't need to know the number of iterations. const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff)); if (NegPart->isZero()) Bound[K].Lower[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff); const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff)); if (PosPart->isZero()) Bound[K].Upper[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff); } } // Computes the upper and lower bounds for level K // using the > direction. Records them in Bound. // Wolfe gives the equations // // LB^>_k = (A_k - B^+_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k // UB^>_k = (A_k - B^-_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k // // Since we normalize loops, we can simplify these equations to // // LB^>_k = (A_k - B^+_k)^- (U_k - 1) + A_k // UB^>_k = (A_k - B^-_k)^+ (U_k - 1) + A_k // // We must be careful to handle the case where the upper bound is unknown. void DependenceAnalysis::findBoundsGT(CoefficientInfo *A, CoefficientInfo *B, BoundInfo *Bound, unsigned K) const { Bound[K].Lower[Dependence::DVEntry::GT] = nullptr; // Default value = -infinity. Bound[K].Upper[Dependence::DVEntry::GT] = nullptr; // Default value = +infinity. if (Bound[K].Iterations) { const SCEV *Iter_1 = SE->getMinusSCEV(Bound[K].Iterations, SE->getConstant(Bound[K].Iterations->getType(), 1)); const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart)); Bound[K].Lower[Dependence::DVEntry::GT] = SE->getAddExpr(SE->getMulExpr(NegPart, Iter_1), A[K].Coeff); const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart)); Bound[K].Upper[Dependence::DVEntry::GT] = SE->getAddExpr(SE->getMulExpr(PosPart, Iter_1), A[K].Coeff); } else { // If the positive/negative part of the difference is 0, // we won't need to know the number of iterations. const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart)); if (NegPart->isZero()) Bound[K].Lower[Dependence::DVEntry::GT] = A[K].Coeff; const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart)); if (PosPart->isZero()) Bound[K].Upper[Dependence::DVEntry::GT] = A[K].Coeff; } } // X^+ = max(X, 0) const SCEV *DependenceAnalysis::getPositivePart(const SCEV *X) const { return SE->getSMaxExpr(X, SE->getConstant(X->getType(), 0)); } // X^- = min(X, 0) const SCEV *DependenceAnalysis::getNegativePart(const SCEV *X) const { return SE->getSMinExpr(X, SE->getConstant(X->getType(), 0)); } // Walks through the subscript, // collecting each coefficient, the associated loop bounds, // and recording its positive and negative parts for later use. DependenceAnalysis::CoefficientInfo * DependenceAnalysis::collectCoeffInfo(const SCEV *Subscript, bool SrcFlag, const SCEV *&Constant) const { const SCEV *Zero = SE->getConstant(Subscript->getType(), 0); CoefficientInfo *CI = new CoefficientInfo[MaxLevels + 1]; for (unsigned K = 1; K <= MaxLevels; ++K) { CI[K].Coeff = Zero; CI[K].PosPart = Zero; CI[K].NegPart = Zero; CI[K].Iterations = nullptr; } while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Subscript)) { const Loop *L = AddRec->getLoop(); unsigned K = SrcFlag ? mapSrcLoop(L) : mapDstLoop(L); CI[K].Coeff = AddRec->getStepRecurrence(*SE); CI[K].PosPart = getPositivePart(CI[K].Coeff); CI[K].NegPart = getNegativePart(CI[K].Coeff); CI[K].Iterations = collectUpperBound(L, Subscript->getType()); Subscript = AddRec->getStart(); } Constant = Subscript; #ifndef NDEBUG DEBUG(dbgs() << "\tCoefficient Info\n"); for (unsigned K = 1; K <= MaxLevels; ++K) { DEBUG(dbgs() << "\t " << K << "\t" << *CI[K].Coeff); DEBUG(dbgs() << "\tPos Part = "); DEBUG(dbgs() << *CI[K].PosPart); DEBUG(dbgs() << "\tNeg Part = "); DEBUG(dbgs() << *CI[K].NegPart); DEBUG(dbgs() << "\tUpper Bound = "); if (CI[K].Iterations) DEBUG(dbgs() << *CI[K].Iterations); else DEBUG(dbgs() << "+inf"); DEBUG(dbgs() << '\n'); } DEBUG(dbgs() << "\t Constant = " << *Subscript << '\n'); #endif return CI; } // Looks through all the bounds info and // computes the lower bound given the current direction settings // at each level. If the lower bound for any level is -inf, // the result is -inf. const SCEV *DependenceAnalysis::getLowerBound(BoundInfo *Bound) const { const SCEV *Sum = Bound[1].Lower[Bound[1].Direction]; for (unsigned K = 2; Sum && K <= MaxLevels; ++K) { if (Bound[K].Lower[Bound[K].Direction]) Sum = SE->getAddExpr(Sum, Bound[K].Lower[Bound[K].Direction]); else Sum = nullptr; } return Sum; } // Looks through all the bounds info and // computes the upper bound given the current direction settings // at each level. If the upper bound at any level is +inf, // the result is +inf. const SCEV *DependenceAnalysis::getUpperBound(BoundInfo *Bound) const { const SCEV *Sum = Bound[1].Upper[Bound[1].Direction]; for (unsigned K = 2; Sum && K <= MaxLevels; ++K) { if (Bound[K].Upper[Bound[K].Direction]) Sum = SE->getAddExpr(Sum, Bound[K].Upper[Bound[K].Direction]); else Sum = nullptr; } return Sum; } //===----------------------------------------------------------------------===// // Constraint manipulation for Delta test. // Given a linear SCEV, // return the coefficient (the step) // corresponding to the specified loop. // If there isn't one, return 0. // For example, given a*i + b*j + c*k, finding the coefficient // corresponding to the j loop would yield b. const SCEV *DependenceAnalysis::findCoefficient(const SCEV *Expr, const Loop *TargetLoop) const { const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr); if (!AddRec) return SE->getConstant(Expr->getType(), 0); if (AddRec->getLoop() == TargetLoop) return AddRec->getStepRecurrence(*SE); return findCoefficient(AddRec->getStart(), TargetLoop); } // Given a linear SCEV, // return the SCEV given by zeroing out the coefficient // corresponding to the specified loop. // For example, given a*i + b*j + c*k, zeroing the coefficient // corresponding to the j loop would yield a*i + c*k. const SCEV *DependenceAnalysis::zeroCoefficient(const SCEV *Expr, const Loop *TargetLoop) const { const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr); if (!AddRec) return Expr; // ignore if (AddRec->getLoop() == TargetLoop) return AddRec->getStart(); return SE->getAddRecExpr(zeroCoefficient(AddRec->getStart(), TargetLoop), AddRec->getStepRecurrence(*SE), AddRec->getLoop(), AddRec->getNoWrapFlags()); } // Given a linear SCEV Expr, // return the SCEV given by adding some Value to the // coefficient corresponding to the specified TargetLoop. // For example, given a*i + b*j + c*k, adding 1 to the coefficient // corresponding to the j loop would yield a*i + (b+1)*j + c*k. const SCEV *DependenceAnalysis::addToCoefficient(const SCEV *Expr, const Loop *TargetLoop, const SCEV *Value) const { const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Expr); if (!AddRec) // create a new addRec return SE->getAddRecExpr(Expr, Value, TargetLoop, SCEV::FlagAnyWrap); // Worst case, with no info. if (AddRec->getLoop() == TargetLoop) { const SCEV *Sum = SE->getAddExpr(AddRec->getStepRecurrence(*SE), Value); if (Sum->isZero()) return AddRec->getStart(); return SE->getAddRecExpr(AddRec->getStart(), Sum, AddRec->getLoop(), AddRec->getNoWrapFlags()); } if (SE->isLoopInvariant(AddRec, TargetLoop)) return SE->getAddRecExpr(AddRec, Value, TargetLoop, SCEV::FlagAnyWrap); return SE->getAddRecExpr( addToCoefficient(AddRec->getStart(), TargetLoop, Value), AddRec->getStepRecurrence(*SE), AddRec->getLoop(), AddRec->getNoWrapFlags()); } // Review the constraints, looking for opportunities // to simplify a subscript pair (Src and Dst). // Return true if some simplification occurs. // If the simplification isn't exact (that is, if it is conservative // in terms of dependence), set consistent to false. // Corresponds to Figure 5 from the paper // // Practical Dependence Testing // Goff, Kennedy, Tseng // PLDI 1991 bool DependenceAnalysis::propagate(const SCEV *&Src, const SCEV *&Dst, SmallBitVector &Loops, SmallVectorImpl<Constraint> &Constraints, bool &Consistent) { bool Result = false; for (int LI = Loops.find_first(); LI >= 0; LI = Loops.find_next(LI)) { DEBUG(dbgs() << "\t Constraint[" << LI << "] is"); DEBUG(Constraints[LI].dump(dbgs())); if (Constraints[LI].isDistance()) Result |= propagateDistance(Src, Dst, Constraints[LI], Consistent); else if (Constraints[LI].isLine()) Result |= propagateLine(Src, Dst, Constraints[LI], Consistent); else if (Constraints[LI].isPoint()) Result |= propagatePoint(Src, Dst, Constraints[LI]); } return Result; } // Attempt to propagate a distance // constraint into a subscript pair (Src and Dst). // Return true if some simplification occurs. // If the simplification isn't exact (that is, if it is conservative // in terms of dependence), set consistent to false. bool DependenceAnalysis::propagateDistance(const SCEV *&Src, const SCEV *&Dst, Constraint &CurConstraint, bool &Consistent) { const Loop *CurLoop = CurConstraint.getAssociatedLoop(); DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n"); const SCEV *A_K = findCoefficient(Src, CurLoop); if (A_K->isZero()) return false; const SCEV *DA_K = SE->getMulExpr(A_K, CurConstraint.getD()); Src = SE->getMinusSCEV(Src, DA_K); Src = zeroCoefficient(Src, CurLoop); DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n"); DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n"); Dst = addToCoefficient(Dst, CurLoop, SE->getNegativeSCEV(A_K)); DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n"); if (!findCoefficient(Dst, CurLoop)->isZero()) Consistent = false; return true; } // Attempt to propagate a line // constraint into a subscript pair (Src and Dst). // Return true if some simplification occurs. // If the simplification isn't exact (that is, if it is conservative // in terms of dependence), set consistent to false. bool DependenceAnalysis::propagateLine(const SCEV *&Src, const SCEV *&Dst, Constraint &CurConstraint, bool &Consistent) { const Loop *CurLoop = CurConstraint.getAssociatedLoop(); const SCEV *A = CurConstraint.getA(); const SCEV *B = CurConstraint.getB(); const SCEV *C = CurConstraint.getC(); DEBUG(dbgs() << "\t\tA = " << *A << ", B = " << *B << ", C = " << *C << "\n"); DEBUG(dbgs() << "\t\tSrc = " << *Src << "\n"); DEBUG(dbgs() << "\t\tDst = " << *Dst << "\n"); if (A->isZero()) { const SCEVConstant *Bconst = dyn_cast<SCEVConstant>(B); const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C); if (!Bconst || !Cconst) return false; APInt Beta = Bconst->getValue()->getValue(); APInt Charlie = Cconst->getValue()->getValue(); APInt CdivB = Charlie.sdiv(Beta); assert(Charlie.srem(Beta) == 0 && "C should be evenly divisible by B"); const SCEV *AP_K = findCoefficient(Dst, CurLoop); // Src = SE->getAddExpr(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB))); Src = SE->getMinusSCEV(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB))); Dst = zeroCoefficient(Dst, CurLoop); if (!findCoefficient(Src, CurLoop)->isZero()) Consistent = false; } else if (B->isZero()) { const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A); const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C); if (!Aconst || !Cconst) return false; APInt Alpha = Aconst->getValue()->getValue(); APInt Charlie = Cconst->getValue()->getValue(); APInt CdivA = Charlie.sdiv(Alpha); assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A"); const SCEV *A_K = findCoefficient(Src, CurLoop); Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA))); Src = zeroCoefficient(Src, CurLoop); if (!findCoefficient(Dst, CurLoop)->isZero()) Consistent = false; } else if (isKnownPredicate(CmpInst::ICMP_EQ, A, B)) { const SCEVConstant *Aconst = dyn_cast<SCEVConstant>(A); const SCEVConstant *Cconst = dyn_cast<SCEVConstant>(C); if (!Aconst || !Cconst) return false; APInt Alpha = Aconst->getValue()->getValue(); APInt Charlie = Cconst->getValue()->getValue(); APInt CdivA = Charlie.sdiv(Alpha); assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A"); const SCEV *A_K = findCoefficient(Src, CurLoop); Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA))); Src = zeroCoefficient(Src, CurLoop); Dst = addToCoefficient(Dst, CurLoop, A_K); if (!findCoefficient(Dst, CurLoop)->isZero()) Consistent = false; } else { // paper is incorrect here, or perhaps just misleading const SCEV *A_K = findCoefficient(Src, CurLoop); Src = SE->getMulExpr(Src, A); Dst = SE->getMulExpr(Dst, A); Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, C)); Src = zeroCoefficient(Src, CurLoop); Dst = addToCoefficient(Dst, CurLoop, SE->getMulExpr(A_K, B)); if (!findCoefficient(Dst, CurLoop)->isZero()) Consistent = false; } DEBUG(dbgs() << "\t\tnew Src = " << *Src << "\n"); DEBUG(dbgs() << "\t\tnew Dst = " << *Dst << "\n"); return true; } // Attempt to propagate a point // constraint into a subscript pair (Src and Dst). // Return true if some simplification occurs. bool DependenceAnalysis::propagatePoint(const SCEV *&Src, const SCEV *&Dst, Constraint &CurConstraint) { const Loop *CurLoop = CurConstraint.getAssociatedLoop(); const SCEV *A_K = findCoefficient(Src, CurLoop); const SCEV *AP_K = findCoefficient(Dst, CurLoop); const SCEV *XA_K = SE->getMulExpr(A_K, CurConstraint.getX()); const SCEV *YAP_K = SE->getMulExpr(AP_K, CurConstraint.getY()); DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n"); Src = SE->getAddExpr(Src, SE->getMinusSCEV(XA_K, YAP_K)); Src = zeroCoefficient(Src, CurLoop); DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n"); DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n"); Dst = zeroCoefficient(Dst, CurLoop); DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n"); return true; } // Update direction vector entry based on the current constraint. void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level, const Constraint &CurConstraint ) const { DEBUG(dbgs() << "\tUpdate direction, constraint ="); DEBUG(CurConstraint.dump(dbgs())); if (CurConstraint.isAny()) ; // use defaults else if (CurConstraint.isDistance()) { // this one is consistent, the others aren't Level.Scalar = false; Level.Distance = CurConstraint.getD(); unsigned NewDirection = Dependence::DVEntry::NONE; if (!SE->isKnownNonZero(Level.Distance)) // if may be zero NewDirection = Dependence::DVEntry::EQ; if (!SE->isKnownNonPositive(Level.Distance)) // if may be positive NewDirection |= Dependence::DVEntry::LT; if (!SE->isKnownNonNegative(Level.Distance)) // if may be negative NewDirection |= Dependence::DVEntry::GT; Level.Direction &= NewDirection; } else if (CurConstraint.isLine()) { Level.Scalar = false; Level.Distance = nullptr; // direction should be accurate } else if (CurConstraint.isPoint()) { Level.Scalar = false; Level.Distance = nullptr; unsigned NewDirection = Dependence::DVEntry::NONE; if (!isKnownPredicate(CmpInst::ICMP_NE, CurConstraint.getY(), CurConstraint.getX())) // if X may be = Y NewDirection |= Dependence::DVEntry::EQ; if (!isKnownPredicate(CmpInst::ICMP_SLE, CurConstraint.getY(), CurConstraint.getX())) // if Y may be > X NewDirection |= Dependence::DVEntry::LT; if (!isKnownPredicate(CmpInst::ICMP_SGE, CurConstraint.getY(), CurConstraint.getX())) // if Y may be < X NewDirection |= Dependence::DVEntry::GT; Level.Direction &= NewDirection; } else llvm_unreachable("constraint has unexpected kind"); } /// Check if we can delinearize the subscripts. If the SCEVs representing the /// source and destination array references are recurrences on a nested loop, /// this function flattens the nested recurrences into separate recurrences /// for each loop level. bool DependenceAnalysis::tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV, SmallVectorImpl<Subscript> &Pair, const SCEV *ElementSize) { const SCEVUnknown *SrcBase = dyn_cast<SCEVUnknown>(SE->getPointerBase(SrcSCEV)); const SCEVUnknown *DstBase = dyn_cast<SCEVUnknown>(SE->getPointerBase(DstSCEV)); if (!SrcBase || !DstBase || SrcBase != DstBase) return false; SrcSCEV = SE->getMinusSCEV(SrcSCEV, SrcBase); DstSCEV = SE->getMinusSCEV(DstSCEV, DstBase); const SCEVAddRecExpr *SrcAR = dyn_cast<SCEVAddRecExpr>(SrcSCEV); const SCEVAddRecExpr *DstAR = dyn_cast<SCEVAddRecExpr>(DstSCEV); if (!SrcAR || !DstAR || !SrcAR->isAffine() || !DstAR->isAffine()) return false; // First step: collect parametric terms in both array references. SmallVector<const SCEV *, 4> Terms; SE->collectParametricTerms(SrcAR, Terms); SE->collectParametricTerms(DstAR, Terms); // Second step: find subscript sizes. SmallVector<const SCEV *, 4> Sizes; SE->findArrayDimensions(Terms, Sizes, ElementSize); // Third step: compute the access functions for each subscript. SmallVector<const SCEV *, 4> SrcSubscripts, DstSubscripts; SE->computeAccessFunctions(SrcAR, SrcSubscripts, Sizes); SE->computeAccessFunctions(DstAR, DstSubscripts, Sizes); // Fail when there is only a subscript: that's a linearized access function. if (SrcSubscripts.size() < 2 || DstSubscripts.size() < 2 || SrcSubscripts.size() != DstSubscripts.size()) return false; int size = SrcSubscripts.size(); DEBUG({ dbgs() << "\nSrcSubscripts: "; for (int i = 0; i < size; i++) dbgs() << *SrcSubscripts[i]; dbgs() << "\nDstSubscripts: "; for (int i = 0; i < size; i++) dbgs() << *DstSubscripts[i]; }); // The delinearization transforms a single-subscript MIV dependence test into // a multi-subscript SIV dependence test that is easier to compute. So we // resize Pair to contain as many pairs of subscripts as the delinearization // has found, and then initialize the pairs following the delinearization. Pair.resize(size); for (int i = 0; i < size; ++i) { Pair[i].Src = SrcSubscripts[i]; Pair[i].Dst = DstSubscripts[i]; unifySubscriptType(&Pair[i]); // FIXME: we should record the bounds SrcSizes[i] and DstSizes[i] that the // delinearization has found, and add these constraints to the dependence // check to avoid memory accesses overflow from one dimension into another. // This is related to the problem of determining the existence of data // dependences in array accesses using a different number of subscripts: in // C one can access an array A[100][100]; as A[0][9999], *A[9999], etc. } return true; } //===----------------------------------------------------------------------===// #ifndef NDEBUG // For debugging purposes, dump a small bit vector to dbgs(). static void dumpSmallBitVector(SmallBitVector &BV) { dbgs() << "{"; for (int VI = BV.find_first(); VI >= 0; VI = BV.find_next(VI)) { dbgs() << VI; if (BV.find_next(VI) >= 0) dbgs() << ' '; } dbgs() << "}\n"; } #endif // depends - // Returns NULL if there is no dependence. // Otherwise, return a Dependence with as many details as possible. // Corresponds to Section 3.1 in the paper // // Practical Dependence Testing // Goff, Kennedy, Tseng // PLDI 1991 // // Care is required to keep the routine below, getSplitIteration(), // up to date with respect to this routine. std::unique_ptr<Dependence> DependenceAnalysis::depends(Instruction *Src, Instruction *Dst, bool PossiblyLoopIndependent) { if (Src == Dst) PossiblyLoopIndependent = false; if ((!Src->mayReadFromMemory() && !Src->mayWriteToMemory()) || (!Dst->mayReadFromMemory() && !Dst->mayWriteToMemory())) // if both instructions don't reference memory, there's no dependence return nullptr; if (!isLoadOrStore(Src) || !isLoadOrStore(Dst)) { // can only analyze simple loads and stores, i.e., no calls, invokes, etc. DEBUG(dbgs() << "can only handle simple loads and stores\n"); return make_unique<Dependence>(Src, Dst); } Value *SrcPtr = getPointerOperand(Src); Value *DstPtr = getPointerOperand(Dst); switch (underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr, SrcPtr)) { case MayAlias: case PartialAlias: // cannot analyse objects if we don't understand their aliasing. DEBUG(dbgs() << "can't analyze may or partial alias\n"); return make_unique<Dependence>(Src, Dst); case NoAlias: // If the objects noalias, they are distinct, accesses are independent. DEBUG(dbgs() << "no alias\n"); return nullptr; case MustAlias: break; // The underlying objects alias; test accesses for dependence. } // establish loop nesting levels establishNestingLevels(Src, Dst); DEBUG(dbgs() << " common nesting levels = " << CommonLevels << "\n"); DEBUG(dbgs() << " maximum nesting levels = " << MaxLevels << "\n"); FullDependence Result(Src, Dst, PossiblyLoopIndependent, CommonLevels); assert(Result.Levels == CommonLevels); // HLSL Change - TVS ++TotalArrayPairs; // See if there are GEPs we can use. bool UsefulGEP = false; GEPOperator *SrcGEP = dyn_cast<GEPOperator>(SrcPtr); GEPOperator *DstGEP = dyn_cast<GEPOperator>(DstPtr); if (SrcGEP && DstGEP && SrcGEP->getPointerOperandType() == DstGEP->getPointerOperandType()) { const SCEV *SrcPtrSCEV = SE->getSCEV(SrcGEP->getPointerOperand()); const SCEV *DstPtrSCEV = SE->getSCEV(DstGEP->getPointerOperand()); DEBUG(dbgs() << " SrcPtrSCEV = " << *SrcPtrSCEV << "\n"); DEBUG(dbgs() << " DstPtrSCEV = " << *DstPtrSCEV << "\n"); UsefulGEP = isLoopInvariant(SrcPtrSCEV, LI->getLoopFor(Src->getParent())) && isLoopInvariant(DstPtrSCEV, LI->getLoopFor(Dst->getParent())) && (SrcGEP->getNumOperands() == DstGEP->getNumOperands()); } unsigned Pairs = UsefulGEP ? SrcGEP->idx_end() - SrcGEP->idx_begin() : 1; SmallVector<Subscript, 4> Pair(Pairs); if (UsefulGEP) { DEBUG(dbgs() << " using GEPs\n"); unsigned P = 0; for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(), SrcEnd = SrcGEP->idx_end(), DstIdx = DstGEP->idx_begin(); SrcIdx != SrcEnd; ++SrcIdx, ++DstIdx, ++P) { Pair[P].Src = SE->getSCEV(*SrcIdx); Pair[P].Dst = SE->getSCEV(*DstIdx); unifySubscriptType(&Pair[P]); } } else { DEBUG(dbgs() << " ignoring GEPs\n"); const SCEV *SrcSCEV = SE->getSCEV(SrcPtr); const SCEV *DstSCEV = SE->getSCEV(DstPtr); DEBUG(dbgs() << " SrcSCEV = " << *SrcSCEV << "\n"); DEBUG(dbgs() << " DstSCEV = " << *DstSCEV << "\n"); Pair[0].Src = SrcSCEV; Pair[0].Dst = DstSCEV; } if (Delinearize && Pairs == 1 && CommonLevels > 1 && tryDelinearize(Pair[0].Src, Pair[0].Dst, Pair, SE->getElementSize(Src))) { DEBUG(dbgs() << " delinerized GEP\n"); Pairs = Pair.size(); } for (unsigned P = 0; P < Pairs; ++P) { Pair[P].Loops.resize(MaxLevels + 1); Pair[P].GroupLoops.resize(MaxLevels + 1); Pair[P].Group.resize(Pairs); removeMatchingExtensions(&Pair[P]); Pair[P].Classification = classifyPair(Pair[P].Src, LI->getLoopFor(Src->getParent()), Pair[P].Dst, LI->getLoopFor(Dst->getParent()), Pair[P].Loops); Pair[P].GroupLoops = Pair[P].Loops; Pair[P].Group.set(P); DEBUG(dbgs() << " subscript " << P << "\n"); DEBUG(dbgs() << "\tsrc = " << *Pair[P].Src << "\n"); DEBUG(dbgs() << "\tdst = " << *Pair[P].Dst << "\n"); DEBUG(dbgs() << "\tclass = " << Pair[P].Classification << "\n"); DEBUG(dbgs() << "\tloops = "); DEBUG(dumpSmallBitVector(Pair[P].Loops)); } SmallBitVector Separable(Pairs); SmallBitVector Coupled(Pairs); // Partition subscripts into separable and minimally-coupled groups // Algorithm in paper is algorithmically better; // this may be faster in practice. Check someday. // // Here's an example of how it works. Consider this code: // // for (i = ...) { // for (j = ...) { // for (k = ...) { // for (l = ...) { // for (m = ...) { // A[i][j][k][m] = ...; // ... = A[0][j][l][i + j]; // } // } // } // } // } // // There are 4 subscripts here: // 0 [i] and [0] // 1 [j] and [j] // 2 [k] and [l] // 3 [m] and [i + j] // // We've already classified each subscript pair as ZIV, SIV, etc., // and collected all the loops mentioned by pair P in Pair[P].Loops. // In addition, we've initialized Pair[P].GroupLoops to Pair[P].Loops // and set Pair[P].Group = {P}. // // Src Dst Classification Loops GroupLoops Group // 0 [i] [0] SIV {1} {1} {0} // 1 [j] [j] SIV {2} {2} {1} // 2 [k] [l] RDIV {3,4} {3,4} {2} // 3 [m] [i + j] MIV {1,2,5} {1,2,5} {3} // // For each subscript SI 0 .. 3, we consider each remaining subscript, SJ. // So, 0 is compared against 1, 2, and 3; 1 is compared against 2 and 3, etc. // // We begin by comparing 0 and 1. The intersection of the GroupLoops is empty. // Next, 0 and 2. Again, the intersection of their GroupLoops is empty. // Next 0 and 3. The intersection of their GroupLoop = {1}, not empty, // so Pair[3].Group = {0,3} and Done = false (that is, 0 will not be added // to either Separable or Coupled). // // Next, we consider 1 and 2. The intersection of the GroupLoops is empty. // Next, 1 and 3. The intersectionof their GroupLoops = {2}, not empty, // so Pair[3].Group = {0, 1, 3} and Done = false. // // Next, we compare 2 against 3. The intersection of the GroupLoops is empty. // Since Done remains true, we add 2 to the set of Separable pairs. // // Finally, we consider 3. There's nothing to compare it with, // so Done remains true and we add it to the Coupled set. // Pair[3].Group = {0, 1, 3} and GroupLoops = {1, 2, 5}. // // In the end, we've got 1 separable subscript and 1 coupled group. for (unsigned SI = 0; SI < Pairs; ++SI) { if (Pair[SI].Classification == Subscript::NonLinear) { // ignore these, but collect loops for later ++NonlinearSubscriptPairs; collectCommonLoops(Pair[SI].Src, LI->getLoopFor(Src->getParent()), Pair[SI].Loops); collectCommonLoops(Pair[SI].Dst, LI->getLoopFor(Dst->getParent()), Pair[SI].Loops); Result.Consistent = false; } else if (Pair[SI].Classification == Subscript::ZIV) { // always separable Separable.set(SI); } else { // SIV, RDIV, or MIV, so check for coupled group bool Done = true; for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) { SmallBitVector Intersection = Pair[SI].GroupLoops; Intersection &= Pair[SJ].GroupLoops; if (Intersection.any()) { // accumulate set of all the loops in group Pair[SJ].GroupLoops |= Pair[SI].GroupLoops; // accumulate set of all subscripts in group Pair[SJ].Group |= Pair[SI].Group; Done = false; } } if (Done) { if (Pair[SI].Group.count() == 1) { Separable.set(SI); ++SeparableSubscriptPairs; } else { Coupled.set(SI); ++CoupledSubscriptPairs; } } } } DEBUG(dbgs() << " Separable = "); DEBUG(dumpSmallBitVector(Separable)); DEBUG(dbgs() << " Coupled = "); DEBUG(dumpSmallBitVector(Coupled)); Constraint NewConstraint; NewConstraint.setAny(SE); // test separable subscripts for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) { DEBUG(dbgs() << "testing subscript " << SI); switch (Pair[SI].Classification) { case Subscript::ZIV: DEBUG(dbgs() << ", ZIV\n"); if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result)) return nullptr; break; case Subscript::SIV: { DEBUG(dbgs() << ", SIV\n"); unsigned Level; const SCEV *SplitIter = nullptr; if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level, Result, NewConstraint, SplitIter)) return nullptr; break; } case Subscript::RDIV: DEBUG(dbgs() << ", RDIV\n"); if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result)) return nullptr; break; case Subscript::MIV: DEBUG(dbgs() << ", MIV\n"); if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result)) return nullptr; break; default: llvm_unreachable("subscript has unexpected classification"); } } if (Coupled.count()) { // test coupled subscript groups DEBUG(dbgs() << "starting on coupled subscripts\n"); DEBUG(dbgs() << "MaxLevels + 1 = " << MaxLevels + 1 << "\n"); SmallVector<Constraint, 4> Constraints(MaxLevels + 1); for (unsigned II = 0; II <= MaxLevels; ++II) Constraints[II].setAny(SE); for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) { DEBUG(dbgs() << "testing subscript group " << SI << " { "); SmallBitVector Group(Pair[SI].Group); SmallBitVector Sivs(Pairs); SmallBitVector Mivs(Pairs); SmallBitVector ConstrainedLevels(MaxLevels + 1); SmallVector<Subscript *, 4> PairsInGroup; for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) { DEBUG(dbgs() << SJ << " "); if (Pair[SJ].Classification == Subscript::SIV) Sivs.set(SJ); else Mivs.set(SJ); PairsInGroup.push_back(&Pair[SJ]); } unifySubscriptType(PairsInGroup); DEBUG(dbgs() << "}\n"); while (Sivs.any()) { bool Changed = false; for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) { DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n"); // SJ is an SIV subscript that's part of the current coupled group unsigned Level; const SCEV *SplitIter = nullptr; DEBUG(dbgs() << "SIV\n"); if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level, Result, NewConstraint, SplitIter)) return nullptr; ConstrainedLevels.set(Level); if (intersectConstraints(&Constraints[Level], &NewConstraint)) { if (Constraints[Level].isEmpty()) { ++DeltaIndependence; return nullptr; } Changed = true; } Sivs.reset(SJ); } if (Changed) { // propagate, possibly creating new SIVs and ZIVs DEBUG(dbgs() << " propagating\n"); DEBUG(dbgs() << "\tMivs = "); DEBUG(dumpSmallBitVector(Mivs)); for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { // SJ is an MIV subscript that's part of the current coupled group DEBUG(dbgs() << "\tSJ = " << SJ << "\n"); if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Constraints, Result.Consistent)) { DEBUG(dbgs() << "\t Changed\n"); ++DeltaPropagations; Pair[SJ].Classification = classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()), Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()), Pair[SJ].Loops); switch (Pair[SJ].Classification) { case Subscript::ZIV: DEBUG(dbgs() << "ZIV\n"); if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result)) return nullptr; Mivs.reset(SJ); break; case Subscript::SIV: Sivs.set(SJ); Mivs.reset(SJ); break; case Subscript::RDIV: case Subscript::MIV: break; default: llvm_unreachable("bad subscript classification"); } } } } } // test & propagate remaining RDIVs for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { if (Pair[SJ].Classification == Subscript::RDIV) { DEBUG(dbgs() << "RDIV test\n"); if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result)) return nullptr; // I don't yet understand how to propagate RDIV results Mivs.reset(SJ); } } // test remaining MIVs // This code is temporary. // Better to somehow test all remaining subscripts simultaneously. for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { if (Pair[SJ].Classification == Subscript::MIV) { DEBUG(dbgs() << "MIV test\n"); if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result)) return nullptr; } else llvm_unreachable("expected only MIV subscripts at this point"); } // update Result.DV from constraint vector DEBUG(dbgs() << " updating\n"); for (int SJ = ConstrainedLevels.find_first(); SJ >= 0; SJ = ConstrainedLevels.find_next(SJ)) { if (SJ > (int)CommonLevels) break; updateDirection(Result.DV[SJ - 1], Constraints[SJ]); if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE) return nullptr; } } } // Make sure the Scalar flags are set correctly. SmallBitVector CompleteLoops(MaxLevels + 1); for (unsigned SI = 0; SI < Pairs; ++SI) CompleteLoops |= Pair[SI].Loops; for (unsigned II = 1; II <= CommonLevels; ++II) if (CompleteLoops[II]) Result.DV[II - 1].Scalar = false; if (PossiblyLoopIndependent) { // Make sure the LoopIndependent flag is set correctly. // All directions must include equal, otherwise no // loop-independent dependence is possible. for (unsigned II = 1; II <= CommonLevels; ++II) { if (!(Result.getDirection(II) & Dependence::DVEntry::EQ)) { Result.LoopIndependent = false; break; } } } else { // On the other hand, if all directions are equal and there's no // loop-independent dependence possible, then no dependence exists. bool AllEqual = true; for (unsigned II = 1; II <= CommonLevels; ++II) { if (Result.getDirection(II) != Dependence::DVEntry::EQ) { AllEqual = false; break; } } if (AllEqual) return nullptr; } auto Final = make_unique<FullDependence>(Result); Result.DV = nullptr; return Final; } //===----------------------------------------------------------------------===// // getSplitIteration - // Rather than spend rarely-used space recording the splitting iteration // during the Weak-Crossing SIV test, we re-compute it on demand. // The re-computation is basically a repeat of the entire dependence test, // though simplified since we know that the dependence exists. // It's tedious, since we must go through all propagations, etc. // // Care is required to keep this code up to date with respect to the routine // above, depends(). // // Generally, the dependence analyzer will be used to build // a dependence graph for a function (basically a map from instructions // to dependences). Looking for cycles in the graph shows us loops // that cannot be trivially vectorized/parallelized. // // We can try to improve the situation by examining all the dependences // that make up the cycle, looking for ones we can break. // Sometimes, peeling the first or last iteration of a loop will break // dependences, and we've got flags for those possibilities. // Sometimes, splitting a loop at some other iteration will do the trick, // and we've got a flag for that case. Rather than waste the space to // record the exact iteration (since we rarely know), we provide // a method that calculates the iteration. It's a drag that it must work // from scratch, but wonderful in that it's possible. // // Here's an example: // // for (i = 0; i < 10; i++) // A[i] = ... // ... = A[11 - i] // // There's a loop-carried flow dependence from the store to the load, // found by the weak-crossing SIV test. The dependence will have a flag, // indicating that the dependence can be broken by splitting the loop. // Calling getSplitIteration will return 5. // Splitting the loop breaks the dependence, like so: // // for (i = 0; i <= 5; i++) // A[i] = ... // ... = A[11 - i] // for (i = 6; i < 10; i++) // A[i] = ... // ... = A[11 - i] // // breaks the dependence and allows us to vectorize/parallelize // both loops. const SCEV *DependenceAnalysis::getSplitIteration(const Dependence &Dep, unsigned SplitLevel) { assert(Dep.isSplitable(SplitLevel) && "Dep should be splitable at SplitLevel"); Instruction *Src = Dep.getSrc(); Instruction *Dst = Dep.getDst(); assert(Src->mayReadFromMemory() || Src->mayWriteToMemory()); assert(Dst->mayReadFromMemory() || Dst->mayWriteToMemory()); assert(isLoadOrStore(Src)); assert(isLoadOrStore(Dst)); Value *SrcPtr = getPointerOperand(Src); Value *DstPtr = getPointerOperand(Dst); assert(underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr, SrcPtr) == MustAlias); // establish loop nesting levels establishNestingLevels(Src, Dst); FullDependence Result(Src, Dst, false, CommonLevels); // See if there are GEPs we can use. bool UsefulGEP = false; GEPOperator *SrcGEP = dyn_cast<GEPOperator>(SrcPtr); GEPOperator *DstGEP = dyn_cast<GEPOperator>(DstPtr); if (SrcGEP && DstGEP && SrcGEP->getPointerOperandType() == DstGEP->getPointerOperandType()) { const SCEV *SrcPtrSCEV = SE->getSCEV(SrcGEP->getPointerOperand()); const SCEV *DstPtrSCEV = SE->getSCEV(DstGEP->getPointerOperand()); UsefulGEP = isLoopInvariant(SrcPtrSCEV, LI->getLoopFor(Src->getParent())) && isLoopInvariant(DstPtrSCEV, LI->getLoopFor(Dst->getParent())) && (SrcGEP->getNumOperands() == DstGEP->getNumOperands()); } unsigned Pairs = UsefulGEP ? SrcGEP->idx_end() - SrcGEP->idx_begin() : 1; SmallVector<Subscript, 4> Pair(Pairs); if (UsefulGEP) { unsigned P = 0; for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(), SrcEnd = SrcGEP->idx_end(), DstIdx = DstGEP->idx_begin(); SrcIdx != SrcEnd; ++SrcIdx, ++DstIdx, ++P) { Pair[P].Src = SE->getSCEV(*SrcIdx); Pair[P].Dst = SE->getSCEV(*DstIdx); } } else { const SCEV *SrcSCEV = SE->getSCEV(SrcPtr); const SCEV *DstSCEV = SE->getSCEV(DstPtr); Pair[0].Src = SrcSCEV; Pair[0].Dst = DstSCEV; } if (Delinearize && Pairs == 1 && CommonLevels > 1 && tryDelinearize(Pair[0].Src, Pair[0].Dst, Pair, SE->getElementSize(Src))) { DEBUG(dbgs() << " delinerized GEP\n"); Pairs = Pair.size(); } for (unsigned P = 0; P < Pairs; ++P) { Pair[P].Loops.resize(MaxLevels + 1); Pair[P].GroupLoops.resize(MaxLevels + 1); Pair[P].Group.resize(Pairs); removeMatchingExtensions(&Pair[P]); Pair[P].Classification = classifyPair(Pair[P].Src, LI->getLoopFor(Src->getParent()), Pair[P].Dst, LI->getLoopFor(Dst->getParent()), Pair[P].Loops); Pair[P].GroupLoops = Pair[P].Loops; Pair[P].Group.set(P); } SmallBitVector Separable(Pairs); SmallBitVector Coupled(Pairs); // partition subscripts into separable and minimally-coupled groups for (unsigned SI = 0; SI < Pairs; ++SI) { if (Pair[SI].Classification == Subscript::NonLinear) { // ignore these, but collect loops for later collectCommonLoops(Pair[SI].Src, LI->getLoopFor(Src->getParent()), Pair[SI].Loops); collectCommonLoops(Pair[SI].Dst, LI->getLoopFor(Dst->getParent()), Pair[SI].Loops); Result.Consistent = false; } else if (Pair[SI].Classification == Subscript::ZIV) Separable.set(SI); else { // SIV, RDIV, or MIV, so check for coupled group bool Done = true; for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) { SmallBitVector Intersection = Pair[SI].GroupLoops; Intersection &= Pair[SJ].GroupLoops; if (Intersection.any()) { // accumulate set of all the loops in group Pair[SJ].GroupLoops |= Pair[SI].GroupLoops; // accumulate set of all subscripts in group Pair[SJ].Group |= Pair[SI].Group; Done = false; } } if (Done) { if (Pair[SI].Group.count() == 1) Separable.set(SI); else Coupled.set(SI); } } } Constraint NewConstraint; NewConstraint.setAny(SE); // test separable subscripts for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) { switch (Pair[SI].Classification) { case Subscript::SIV: { unsigned Level; const SCEV *SplitIter = nullptr; (void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level, Result, NewConstraint, SplitIter); if (Level == SplitLevel) { assert(SplitIter != nullptr); return SplitIter; } break; } case Subscript::ZIV: case Subscript::RDIV: case Subscript::MIV: break; default: llvm_unreachable("subscript has unexpected classification"); } } if (Coupled.count()) { // test coupled subscript groups SmallVector<Constraint, 4> Constraints(MaxLevels + 1); for (unsigned II = 0; II <= MaxLevels; ++II) Constraints[II].setAny(SE); for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) { SmallBitVector Group(Pair[SI].Group); SmallBitVector Sivs(Pairs); SmallBitVector Mivs(Pairs); SmallBitVector ConstrainedLevels(MaxLevels + 1); for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) { if (Pair[SJ].Classification == Subscript::SIV) Sivs.set(SJ); else Mivs.set(SJ); } while (Sivs.any()) { bool Changed = false; for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) { // SJ is an SIV subscript that's part of the current coupled group unsigned Level; const SCEV *SplitIter = nullptr; (void) testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level, Result, NewConstraint, SplitIter); if (Level == SplitLevel && SplitIter) return SplitIter; ConstrainedLevels.set(Level); if (intersectConstraints(&Constraints[Level], &NewConstraint)) Changed = true; Sivs.reset(SJ); } if (Changed) { // propagate, possibly creating new SIVs and ZIVs for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { // SJ is an MIV subscript that's part of the current coupled group if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Constraints, Result.Consistent)) { Pair[SJ].Classification = classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()), Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()), Pair[SJ].Loops); switch (Pair[SJ].Classification) { case Subscript::ZIV: Mivs.reset(SJ); break; case Subscript::SIV: Sivs.set(SJ); Mivs.reset(SJ); break; case Subscript::RDIV: case Subscript::MIV: break; default: llvm_unreachable("bad subscript classification"); } } } } } } } llvm_unreachable("somehow reached end of routine"); return nullptr; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/InstCount.cpp
//===-- InstCount.cpp - Collects the count of all instructions ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass collects the count of all instructions and reports them // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstVisitor.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "instcount" STATISTIC(TotalInsts , "Number of instructions (of all types)"); STATISTIC(TotalBlocks, "Number of basic blocks"); STATISTIC(TotalFuncs , "Number of non-external functions"); STATISTIC(TotalMemInst, "Number of memory instructions"); #define HANDLE_INST(N, OPCODE, CLASS) \ STATISTIC(Num ## OPCODE ## Inst, "Number of " #OPCODE " insts"); #include "llvm/IR/Instruction.def" namespace { class InstCount : public FunctionPass, public InstVisitor<InstCount> { friend class InstVisitor<InstCount>; void visitFunction (Function &F) { ++TotalFuncs; } void visitBasicBlock(BasicBlock &BB) { ++TotalBlocks; } #define HANDLE_INST(N, OPCODE, CLASS) \ void visit##OPCODE(CLASS &) { ++Num##OPCODE##Inst; ++TotalInsts; } #include "llvm/IR/Instruction.def" void visitInstruction(Instruction &I) { errs() << "Instruction Count does not know about " << I; llvm_unreachable(nullptr); } public: static char ID; // Pass identification, replacement for typeid InstCount() : FunctionPass(ID) { initializeInstCountPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } void print(raw_ostream &O, const Module *M) const override {} }; } char InstCount::ID = 0; INITIALIZE_PASS(InstCount, "instcount", "Counts the various types of Instructions", false, true) FunctionPass *llvm::createInstCountPass() { return new InstCount(); } // InstCount::run - This is the main Analysis entry point for a // function. // bool InstCount::runOnFunction(Function &F) { unsigned StartMemInsts = NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst + NumInvokeInst + NumAllocaInst; visit(F); unsigned EndMemInsts = NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst + NumInvokeInst + NumAllocaInst; TotalMemInst += EndMemInsts-StartMemInsts; return false; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/PtrUseVisitor.cpp
//===- PtrUseVisitor.cpp - InstVisitors over a pointers uses --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// Implementation of the pointer use visitors. /// //===----------------------------------------------------------------------===// #include "llvm/Analysis/PtrUseVisitor.h" using namespace llvm; void detail::PtrUseVisitorBase::enqueueUsers(Instruction &I) { for (Use &U : I.uses()) { if (VisitedUses.insert(&U).second) { UseToVisit NewU = { UseToVisit::UseAndIsOffsetKnownPair(&U, IsOffsetKnown), Offset }; Worklist.push_back(std::move(NewU)); } } } bool detail::PtrUseVisitorBase::adjustOffsetForGEP(GetElementPtrInst &GEPI) { if (!IsOffsetKnown) return false; return GEPI.accumulateConstantOffset(DL, Offset); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/MemDerefPrinter.cpp
//===- MemDerefPrinter.cpp - Printer for isDereferenceablePointer ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; namespace { struct MemDerefPrinter : public FunctionPass { SmallVector<Value *, 4> Vec; static char ID; // Pass identification, replacement for typeid MemDerefPrinter() : FunctionPass(ID) { initializeMemDerefPrinterPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } bool runOnFunction(Function &F) override; void print(raw_ostream &OS, const Module * = nullptr) const override; void releaseMemory() override { Vec.clear(); } }; } char MemDerefPrinter::ID = 0; INITIALIZE_PASS_BEGIN(MemDerefPrinter, "print-memderefs", "Memory Dereferenciblity of pointers in function", false, true) INITIALIZE_PASS_END(MemDerefPrinter, "print-memderefs", "Memory Dereferenciblity of pointers in function", false, true) FunctionPass *llvm::createMemDerefPrinter() { return new MemDerefPrinter(); } bool MemDerefPrinter::runOnFunction(Function &F) { const DataLayout &DL = F.getParent()->getDataLayout(); for (auto &I: inst_range(F)) { if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { Value *PO = LI->getPointerOperand(); if (isDereferenceablePointer(PO, DL)) Vec.push_back(PO); } } return false; } void MemDerefPrinter::print(raw_ostream &OS, const Module *M) const { OS << "The following are dereferenceable:\n"; for (auto &V: Vec) { V->print(OS); OS << "\n\n"; } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/ScalarEvolution.cpp
//===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the implementation of the scalar evolution analysis // engine, which is used primarily to analyze expressions involving induction // variables in loops. // // There are several aspects to this library. First is the representation of // scalar expressions, which are represented as subclasses of the SCEV class. // These classes are used to represent certain types of subexpressions that we // can handle. We only create one SCEV of a particular shape, so // pointer-comparisons for equality are legal. // // One important aspect of the SCEV objects is that they are never cyclic, even // if there is a cycle in the dataflow for an expression (ie, a PHI node). If // the PHI node is one of the idioms that we can represent (e.g., a polynomial // recurrence) then we represent it directly as a recurrence node, otherwise we // represent it as a SCEVUnknown node. // // In addition to being able to represent expressions of various types, we also // have folders that are used to build the *canonical* representation for a // particular expression. These folders are capable of using a variety of // rewrite rules to simplify the expressions. // // Once the folders are defined, we can implement the more interesting // higher-level code, such as the code that recognizes PHI nodes of various // types, computes the execution count of a loop, etc. // // TODO: We should use these routines and value representations to implement // dependence analysis! // //===----------------------------------------------------------------------===// // // There are several good references for the techniques used in this analysis. // // Chains of recurrences -- a method to expedite the evaluation // of closed-form functions // Olaf Bachmann, Paul S. Wang, Eugene V. Zima // // On computational properties of chains of recurrences // Eugene V. Zima // // Symbolic Evaluation of Chains of Recurrences for Loop Optimization // Robert A. van Engelen // // Efficient Symbolic Analysis for Optimizing Compilers // Robert A. van Engelen // // Using the chains of recurrences algebra for data dependence testing and // induction variable substitution // MS Thesis, Johnie Birch // //===----------------------------------------------------------------------===// #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Operator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Analysis/DxilValueCache.h" // HLSL Change #include <algorithm> using namespace llvm; #define DEBUG_TYPE "scalar-evolution" STATISTIC(NumArrayLenItCounts, "Number of trip counts computed with array length"); STATISTIC(NumTripCountsComputed, "Number of loops with predictable loop counts"); STATISTIC(NumTripCountsNotComputed, "Number of loops without predictable loop counts"); STATISTIC(NumBruteForceTripCountsComputed, "Number of loops with trip counts computed by force"); #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned> MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100)); // FIXME: Enable this with XDEBUG when the test suite is clean. static cl::opt<bool> VerifySCEV("verify-scev", cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); #else static const unsigned MaxBruteForceIterations = 100; static const bool VerifySCEV = false; #endif // HLSL Change Ends INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution", "Scalar Evolution Analysis", false, true) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DxilValueCache) // HLSL Change INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution", "Scalar Evolution Analysis", false, true) char ScalarEvolution::ID = 0; //===----------------------------------------------------------------------===// // SCEV class definitions //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Implementation of the SCEV class. // #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void SCEV::dump() const { print(dbgs()); dbgs() << '\n'; } #endif void SCEV::print(raw_ostream &OS) const { switch (static_cast<SCEVTypes>(getSCEVType())) { case scConstant: cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); return; case scTruncate: { const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); const SCEV *Op = Trunc->getOperand(); OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Trunc->getType() << ")"; return; } case scZeroExtend: { const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); const SCEV *Op = ZExt->getOperand(); OS << "(zext " << *Op->getType() << " " << *Op << " to " << *ZExt->getType() << ")"; return; } case scSignExtend: { const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); const SCEV *Op = SExt->getOperand(); OS << "(sext " << *Op->getType() << " " << *Op << " to " << *SExt->getType() << ")"; return; } case scAddRecExpr: { const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); OS << "{" << *AR->getOperand(0); for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) OS << ",+," << *AR->getOperand(i); OS << "}<"; if (AR->getNoWrapFlags(FlagNUW)) OS << "nuw><"; if (AR->getNoWrapFlags(FlagNSW)) OS << "nsw><"; if (AR->getNoWrapFlags(FlagNW) && !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) OS << "nw><"; AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ">"; return; } case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: { const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); const char *OpStr = nullptr; switch (NAry->getSCEVType()) { case scAddExpr: OpStr = " + "; break; case scMulExpr: OpStr = " * "; break; case scUMaxExpr: OpStr = " umax "; break; case scSMaxExpr: OpStr = " smax "; break; } OS << "("; for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); I != E; ++I) { OS << **I; if (std::next(I) != E) OS << OpStr; } OS << ")"; switch (NAry->getSCEVType()) { case scAddExpr: case scMulExpr: if (NAry->getNoWrapFlags(FlagNUW)) OS << "<nuw>"; if (NAry->getNoWrapFlags(FlagNSW)) OS << "<nsw>"; } return; } case scUDivExpr: { const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; return; } case scUnknown: { const SCEVUnknown *U = cast<SCEVUnknown>(this); Type *AllocTy; if (U->isSizeOf(AllocTy)) { OS << "sizeof(" << *AllocTy << ")"; return; } if (U->isAlignOf(AllocTy)) { OS << "alignof(" << *AllocTy << ")"; return; } Type *CTy; Constant *FieldNo; if (U->isOffsetOf(CTy, FieldNo)) { OS << "offsetof(" << *CTy << ", "; FieldNo->printAsOperand(OS, false); OS << ")"; return; } // Otherwise just print it normally. U->getValue()->printAsOperand(OS, false); return; } case scCouldNotCompute: OS << "***COULDNOTCOMPUTE***"; return; } llvm_unreachable("Unknown SCEV kind!"); } Type *SCEV::getType() const { switch (static_cast<SCEVTypes>(getSCEVType())) { case scConstant: return cast<SCEVConstant>(this)->getType(); case scTruncate: case scZeroExtend: case scSignExtend: return cast<SCEVCastExpr>(this)->getType(); case scAddRecExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: return cast<SCEVNAryExpr>(this)->getType(); case scAddExpr: return cast<SCEVAddExpr>(this)->getType(); case scUDivExpr: return cast<SCEVUDivExpr>(this)->getType(); case scUnknown: return cast<SCEVUnknown>(this)->getType(); case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } bool SCEV::isZero() const { if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) return SC->getValue()->isZero(); return false; } bool SCEV::isOne() const { if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) return SC->getValue()->isOne(); return false; } bool SCEV::isAllOnesValue() const { if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) return SC->getValue()->isAllOnesValue(); return false; } /// isNonConstantNegative - Return true if the specified scev is negated, but /// not a constant. bool SCEV::isNonConstantNegative() const { const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); if (!Mul) return false; // If there is a constant factor, it will be first. const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); if (!SC) return false; // Return true if the value is negative, this matches things like (-42 * V). return SC->getValue()->getValue().isNegative(); } SCEVCouldNotCompute::SCEVCouldNotCompute() : SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} bool SCEVCouldNotCompute::classof(const SCEV *S) { return S->getSCEVType() == scCouldNotCompute; } const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { FoldingSetNodeID ID; ID.AddInteger(scConstant); ID.AddPointer(V); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); UniqueSCEVs.InsertNode(S, IP); return S; } const SCEV *ScalarEvolution::getConstant(const APInt &Val) { return getConstant(ConstantInt::get(getContext(), Val)); } const SCEV * ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); return getConstant(ConstantInt::get(ITy, V, isSigned)); } SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, unsigned SCEVTy, const SCEV *op, Type *ty) : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty) : SCEVCastExpr(ID, scTruncate, op, ty) { assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate non-integer value!"); } SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty) : SCEVCastExpr(ID, scZeroExtend, op, ty) { assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot zero extend non-integer value!"); } SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty) : SCEVCastExpr(ID, scSignExtend, op, ty) { assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot sign extend non-integer value!"); } void SCEVUnknown::deleted() { // Clear this SCEVUnknown from various maps. SE->forgetMemoizedResults(this); // Remove this SCEVUnknown from the uniquing map. SE->UniqueSCEVs.RemoveNode(this); // Release the value. setValPtr(nullptr); } void SCEVUnknown::allUsesReplacedWith(Value *New) { // Clear this SCEVUnknown from various maps. SE->forgetMemoizedResults(this); // Remove this SCEVUnknown from the uniquing map. SE->UniqueSCEVs.RemoveNode(this); // Update this SCEVUnknown to point to the new value. This is needed // because there may still be outstanding SCEVs which still point to // this SCEVUnknown. setValPtr(New); } bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getOperand(0)->isNullValue() && CE->getNumOperands() == 2) if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) if (CI->isOne()) { AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) ->getElementType(); return true; } return false; } bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getOperand(0)->isNullValue()) { Type *Ty = cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); if (StructType *STy = dyn_cast<StructType>(Ty)) if (!STy->isPacked() && CE->getNumOperands() == 3 && CE->getOperand(1)->isNullValue()) { if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) if (CI->isOne() && STy->getNumElements() == 2 && STy->getElementType(0)->isIntegerTy(1)) { AllocTy = STy->getElementType(1); return true; } } } return false; } bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getNumOperands() == 3 && CE->getOperand(0)->isNullValue() && CE->getOperand(1)->isNullValue()) { Type *Ty = cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); // Ignore vector types here so that ScalarEvolutionExpander doesn't // emit getelementptrs that index into vectors. if (Ty->isStructTy() || Ty->isArrayTy()) { CTy = Ty; FieldNo = CE->getOperand(2); return true; } } return false; } //===----------------------------------------------------------------------===// // SCEV Utilities //===----------------------------------------------------------------------===// namespace { /// SCEVComplexityCompare - Return true if the complexity of the LHS is less /// than the complexity of the RHS. This comparator is used to canonicalize /// expressions. class SCEVComplexityCompare { const LoopInfo *const LI; public: explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {} // Return true or false if LHS is less than, or at least RHS, respectively. bool operator()(const SCEV *LHS, const SCEV *RHS) const { return compare(LHS, RHS) < 0; } // Return negative, zero, or positive, if LHS is less than, equal to, or // greater than RHS, respectively. A three-way result allows recursive // comparisons to be more efficient. int compare(const SCEV *LHS, const SCEV *RHS) const { // Fast-path: SCEVs are uniqued so we can do a quick equality check. if (LHS == RHS) return 0; // Primarily, sort the SCEVs by their getSCEVType(). unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); if (LType != RType) return (int)LType - (int)RType; // Aside from the getSCEVType() ordering, the particular ordering // isn't very important except that it's beneficial to be consistent, // so that (a + b) and (b + a) don't end up as different expressions. switch (static_cast<SCEVTypes>(LType)) { case scUnknown: { const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); // Sort SCEVUnknown values with some loose heuristics. TODO: This is // not as complete as it could be. const Value *LV = LU->getValue(), *RV = RU->getValue(); // Order pointer values after integer values. This helps SCEVExpander // form GEPs. bool LIsPointer = LV->getType()->isPointerTy(), RIsPointer = RV->getType()->isPointerTy(); if (LIsPointer != RIsPointer) return (int)LIsPointer - (int)RIsPointer; // Compare getValueID values. unsigned LID = LV->getValueID(), RID = RV->getValueID(); if (LID != RID) return (int)LID - (int)RID; // Sort arguments by their position. if (const Argument *LA = dyn_cast<Argument>(LV)) { const Argument *RA = cast<Argument>(RV); unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); return (int)LArgNo - (int)RArgNo; } // For instructions, compare their loop depth, and their operand // count. This is pretty loose. if (const Instruction *LInst = dyn_cast<Instruction>(LV)) { const Instruction *RInst = cast<Instruction>(RV); // Compare loop depths. const BasicBlock *LParent = LInst->getParent(), *RParent = RInst->getParent(); if (LParent != RParent) { unsigned LDepth = LI->getLoopDepth(LParent), RDepth = LI->getLoopDepth(RParent); if (LDepth != RDepth) return (int)LDepth - (int)RDepth; } // Compare the number of operands. unsigned LNumOps = LInst->getNumOperands(), RNumOps = RInst->getNumOperands(); return (int)LNumOps - (int)RNumOps; } return 0; } case scConstant: { const SCEVConstant *LC = cast<SCEVConstant>(LHS); const SCEVConstant *RC = cast<SCEVConstant>(RHS); // Compare constant values. const APInt &LA = LC->getValue()->getValue(); const APInt &RA = RC->getValue()->getValue(); unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); if (LBitWidth != RBitWidth) return (int)LBitWidth - (int)RBitWidth; return LA.ult(RA) ? -1 : 1; } case scAddRecExpr: { const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); // Compare addrec loop depths. const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); if (LLoop != RLoop) { unsigned LDepth = LLoop->getLoopDepth(), RDepth = RLoop->getLoopDepth(); if (LDepth != RDepth) return (int)LDepth - (int)RDepth; } // Addrec complexity grows with operand count. unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); if (LNumOps != RNumOps) return (int)LNumOps - (int)RNumOps; // Lexicographically compare. for (unsigned i = 0; i != LNumOps; ++i) { long X = compare(LA->getOperand(i), RA->getOperand(i)); if (X != 0) return X; } return 0; } case scAddExpr: case scMulExpr: case scSMaxExpr: case scUMaxExpr: { const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); // Lexicographically compare n-ary expressions. unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); if (LNumOps != RNumOps) return (int)LNumOps - (int)RNumOps; for (unsigned i = 0; i != LNumOps; ++i) { if (i >= RNumOps) return 1; long X = compare(LC->getOperand(i), RC->getOperand(i)); if (X != 0) return X; } return (int)LNumOps - (int)RNumOps; } case scUDivExpr: { const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); // Lexicographically compare udiv expressions. long X = compare(LC->getLHS(), RC->getLHS()); if (X != 0) return X; return compare(LC->getRHS(), RC->getRHS()); } case scTruncate: case scZeroExtend: case scSignExtend: { const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); // Compare cast expressions by operand. return compare(LC->getOperand(), RC->getOperand()); } case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } }; } /// GroupByComplexity - Given a list of SCEV objects, order them by their /// complexity, and group objects of the same complexity together by value. /// When this routine is finished, we know that any duplicates in the vector are /// consecutive and that complexity is monotonically increasing. /// /// Note that we go take special precautions to ensure that we get deterministic /// results from this routine. In other words, we don't want the results of /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. /// static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, LoopInfo *LI) { if (Ops.size() < 2) return; // Noop if (Ops.size() == 2) { // This is the common case, which also happens to be trivially simple. // Special case it. const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; if (SCEVComplexityCompare(LI)(RHS, LHS)) std::swap(LHS, RHS); return; } // Do the rough sort by complexity. std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); // Now that we are sorted by complexity, group elements of the same // complexity. Note that this is, at worst, N^2, but the vector is likely to // be extremely short in practice. Note that we take this approach because we // do not want to depend on the addresses of the objects we are grouping. for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { const SCEV *S = Ops[i]; unsigned Complexity = S->getSCEVType(); // If there are any objects of the same complexity and same value as this // one, group them. for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { if (Ops[j] == S) { // Found a duplicate. // Move it to immediately after i'th element. std::swap(Ops[i+1], Ops[j]); ++i; // no need to rescan it. if (i == e-2) return; // Done! } } } } namespace { struct FindSCEVSize { int Size; FindSCEVSize() : Size(0) {} bool follow(const SCEV *S) { ++Size; // Keep looking at all operands of S. return true; } bool isDone() const { return false; } }; } // Returns the size of the SCEV S. static inline int sizeOfSCEV(const SCEV *S) { FindSCEVSize F; SCEVTraversal<FindSCEVSize> ST(F); ST.visitAll(S); return F.Size; } namespace { struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { public: // Computes the Quotient and Remainder of the division of Numerator by // Denominator. static void divide(ScalarEvolution &SE, const SCEV *Numerator, const SCEV *Denominator, const SCEV **Quotient, const SCEV **Remainder) { assert(Numerator && Denominator && "Uninitialized SCEV"); SCEVDivision D(SE, Numerator, Denominator); // Check for the trivial case here to avoid having to check for it in the // rest of the code. if (Numerator == Denominator) { *Quotient = D.One; *Remainder = D.Zero; return; } if (Numerator->isZero()) { *Quotient = D.Zero; *Remainder = D.Zero; return; } // A simple case when N/1. The quotient is N. if (Denominator->isOne()) { *Quotient = Numerator; *Remainder = D.Zero; return; } // Split the Denominator when it is a product. if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) { const SCEV *Q, *R; *Quotient = Numerator; for (const SCEV *Op : T->operands()) { divide(SE, *Quotient, Op, &Q, &R); *Quotient = Q; // Bail out when the Numerator is not divisible by one of the terms of // the Denominator. if (!R->isZero()) { *Quotient = D.Zero; *Remainder = Numerator; return; } } *Remainder = D.Zero; return; } D.visit(Numerator); *Quotient = D.Quotient; *Remainder = D.Remainder; } // Except in the trivial case described above, we do not know how to divide // Expr by Denominator for the following functions with empty implementation. void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} void visitUDivExpr(const SCEVUDivExpr *Numerator) {} void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} void visitUnknown(const SCEVUnknown *Numerator) {} void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} void visitConstant(const SCEVConstant *Numerator) { if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { APInt NumeratorVal = Numerator->getValue()->getValue(); APInt DenominatorVal = D->getValue()->getValue(); uint32_t NumeratorBW = NumeratorVal.getBitWidth(); uint32_t DenominatorBW = DenominatorVal.getBitWidth(); if (NumeratorBW > DenominatorBW) DenominatorVal = DenominatorVal.sext(NumeratorBW); else if (NumeratorBW < DenominatorBW) NumeratorVal = NumeratorVal.sext(DenominatorBW); APInt QuotientVal(NumeratorVal.getBitWidth(), 0); APInt RemainderVal(NumeratorVal.getBitWidth(), 0); APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); Quotient = SE.getConstant(QuotientVal); Remainder = SE.getConstant(RemainderVal); return; } } void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { const SCEV *StartQ, *StartR, *StepQ, *StepR; assert(Numerator->isAffine() && "Numerator should be affine"); divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); // Bail out if the types do not match. Type *Ty = Denominator->getType(); if (Ty != StartQ->getType() || Ty != StartR->getType() || Ty != StepQ->getType() || Ty != StepR->getType()) { Quotient = Zero; Remainder = Numerator; return; } Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), Numerator->getNoWrapFlags()); Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), Numerator->getNoWrapFlags()); } void visitAddExpr(const SCEVAddExpr *Numerator) { SmallVector<const SCEV *, 2> Qs, Rs; Type *Ty = Denominator->getType(); for (const SCEV *Op : Numerator->operands()) { const SCEV *Q, *R; divide(SE, Op, Denominator, &Q, &R); // Bail out if types do not match. if (Ty != Q->getType() || Ty != R->getType()) { Quotient = Zero; Remainder = Numerator; return; } Qs.push_back(Q); Rs.push_back(R); } if (Qs.size() == 1) { Quotient = Qs[0]; Remainder = Rs[0]; return; } Quotient = SE.getAddExpr(Qs); Remainder = SE.getAddExpr(Rs); } void visitMulExpr(const SCEVMulExpr *Numerator) { SmallVector<const SCEV *, 2> Qs; Type *Ty = Denominator->getType(); bool FoundDenominatorTerm = false; for (const SCEV *Op : Numerator->operands()) { // Bail out if types do not match. if (Ty != Op->getType()) { Quotient = Zero; Remainder = Numerator; return; } if (FoundDenominatorTerm) { Qs.push_back(Op); continue; } // Check whether Denominator divides one of the product operands. const SCEV *Q, *R; divide(SE, Op, Denominator, &Q, &R); if (!R->isZero()) { Qs.push_back(Op); continue; } // Bail out if types do not match. if (Ty != Q->getType()) { Quotient = Zero; Remainder = Numerator; return; } FoundDenominatorTerm = true; Qs.push_back(Q); } if (FoundDenominatorTerm) { Remainder = Zero; if (Qs.size() == 1) Quotient = Qs[0]; else Quotient = SE.getMulExpr(Qs); return; } if (!isa<SCEVUnknown>(Denominator)) { Quotient = Zero; Remainder = Numerator; return; } // The Remainder is obtained by replacing Denominator by 0 in Numerator. ValueToValueMap RewriteMap; RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = cast<SCEVConstant>(Zero)->getValue(); Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); if (Remainder->isZero()) { // The Quotient is obtained by replacing Denominator by 1 in Numerator. RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = cast<SCEVConstant>(One)->getValue(); Quotient = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); return; } // Quotient is (Numerator - Remainder) divided by Denominator. const SCEV *Q, *R; const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) { // This SCEV does not seem to simplify: fail the division here. Quotient = Zero; Remainder = Numerator; return; } divide(SE, Diff, Denominator, &Q, &R); assert(R == Zero && "(Numerator - Remainder) should evenly divide Denominator"); Quotient = Q; } private: SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, const SCEV *Denominator) : SE(S), Denominator(Denominator) { Zero = SE.getConstant(Denominator->getType(), 0); One = SE.getConstant(Denominator->getType(), 1); // By default, we don't know how to divide Expr by Denominator. // Providing the default here simplifies the rest of the code. Quotient = Zero; Remainder = Numerator; } ScalarEvolution &SE; const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; }; } //===----------------------------------------------------------------------===// // Simple SCEV method implementations //===----------------------------------------------------------------------===// /// BinomialCoefficient - Compute BC(It, K). The result has width W. /// Assume, K > 0. static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy) { // Handle the simplest case efficiently. if (K == 1) return SE.getTruncateOrZeroExtend(It, ResultTy); // We are using the following formula for BC(It, K): // // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! // // Suppose, W is the bitwidth of the return value. We must be prepared for // overflow. Hence, we must assure that the result of our computation is // equal to the accurate one modulo 2^W. Unfortunately, division isn't // safe in modular arithmetic. // // However, this code doesn't use exactly that formula; the formula it uses // is something like the following, where T is the number of factors of 2 in // K! (i.e. trailing zeros in the binary representation of K!), and ^ is // exponentiation: // // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) // // This formula is trivially equivalent to the previous formula. However, // this formula can be implemented much more efficiently. The trick is that // K! / 2^T is odd, and exact division by an odd number *is* safe in modular // arithmetic. To do exact division in modular arithmetic, all we have // to do is multiply by the inverse. Therefore, this step can be done at // width W. // // The next issue is how to safely do the division by 2^T. The way this // is done is by doing the multiplication step at a width of at least W + T // bits. This way, the bottom W+T bits of the product are accurate. Then, // when we perform the division by 2^T (which is equivalent to a right shift // by T), the bottom W bits are accurate. Extra bits are okay; they'll get // truncated out after the division by 2^T. // // In comparison to just directly using the first formula, this technique // is much more efficient; using the first formula requires W * K bits, // but this formula less than W + K bits. Also, the first formula requires // a division step, whereas this formula only requires multiplies and shifts. // // It doesn't matter whether the subtraction step is done in the calculation // width or the input iteration count's width; if the subtraction overflows, // the result must be zero anyway. We prefer here to do it in the width of // the induction variable because it helps a lot for certain cases; CodeGen // isn't smart enough to ignore the overflow, which leads to much less // efficient code if the width of the subtraction is wider than the native // register width. // // (It's possible to not widen at all by pulling out factors of 2 before // the multiplication; for example, K=2 can be calculated as // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires // extra arithmetic, so it's not an obvious win, and it gets // much more complicated for K > 3.) // Protection from insane SCEVs; this bound is conservative, // but it probably doesn't matter. if (K > 1000) return SE.getCouldNotCompute(); unsigned W = SE.getTypeSizeInBits(ResultTy); // Calculate K! / 2^T and T; we divide out the factors of two before // multiplying for calculating K! / 2^T to avoid overflow. // Other overflow doesn't matter because we only care about the bottom // W bits of the result. APInt OddFactorial(W, 1); unsigned T = 1; for (unsigned i = 3; i <= K; ++i) { APInt Mult(W, i); unsigned TwoFactors = Mult.countTrailingZeros(); T += TwoFactors; Mult = Mult.lshr(TwoFactors); OddFactorial *= Mult; } // We need at least W + T bits for the multiplication step unsigned CalculationBits = W + T; // Calculate 2^T, at width T+W. APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); // Calculate the multiplicative inverse of K! / 2^T; // this multiplication factor will perform the exact division by // K! / 2^T. APInt Mod = APInt::getSignedMinValue(W+1); APInt MultiplyFactor = OddFactorial.zext(W+1); MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); MultiplyFactor = MultiplyFactor.trunc(W); // Calculate the product, at width T+W IntegerType *CalculationTy = IntegerType::get(SE.getContext(), CalculationBits); const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); for (unsigned i = 1; i != K; ++i) { const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); Dividend = SE.getMulExpr(Dividend, SE.getTruncateOrZeroExtend(S, CalculationTy)); } // Divide by 2^T const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); // Truncate the result, and divide by K! / 2^T. return SE.getMulExpr(SE.getConstant(MultiplyFactor), SE.getTruncateOrZeroExtend(DivResult, ResultTy)); } /// evaluateAtIteration - Return the value of this chain of recurrences at /// the specified iteration number. We can evaluate this recurrence by /// multiplying each element in the chain by the binomial coefficient /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: /// /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) /// /// where BC(It, k) stands for binomial coefficient. /// const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const { const SCEV *Result = getStart(); for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { // The computation is correct in the face of overflow provided that the // multiplication is performed _after_ the evaluation of the binomial // coefficient. const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); if (isa<SCEVCouldNotCompute>(Coeff)) return Coeff; Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); } return Result; } //===----------------------------------------------------------------------===// // SCEV Expression folder implementations //===----------------------------------------------------------------------===// const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty) { assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && "This is not a truncating conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); Ty = getEffectiveSCEVType(Ty); FoldingSetNodeID ID; ID.AddInteger(scTruncate); ID.AddPointer(Op); ID.AddPointer(Ty); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) return getConstant( cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); // trunc(trunc(x)) --> trunc(x) if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) return getTruncateExpr(ST->getOperand(), Ty); // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) return getTruncateOrSignExtend(SS->getOperand(), Ty); // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) return getTruncateOrZeroExtend(SZ->getOperand(), Ty); // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can // eliminate all the truncates, or we replace other casts with truncates. if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { SmallVector<const SCEV *, 4> Operands; bool hasTrunc = false; for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); if (!isa<SCEVCastExpr>(SA->getOperand(i))) hasTrunc = isa<SCEVTruncateExpr>(S); Operands.push_back(S); } if (!hasTrunc) return getAddExpr(Operands); UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. } // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can // eliminate all the truncates, or we replace other casts with truncates. if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { SmallVector<const SCEV *, 4> Operands; bool hasTrunc = false; for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); if (!isa<SCEVCastExpr>(SM->getOperand(i))) hasTrunc = isa<SCEVTruncateExpr>(S); Operands.push_back(S); } if (!hasTrunc) return getMulExpr(Operands); UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. } // If the input value is a chrec scev, truncate the chrec's operands. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { SmallVector<const SCEV *, 4> Operands; for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); } // The cast wasn't folded; create an explicit cast node. We can reuse // the existing insert position since if we get here, we won't have // made any changes which would invalidate it. SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); return S; } // Get the limit of a recurrence such that incrementing by Step cannot cause // signed overflow as long as the value of the recurrence within the // loop does not exceed this limit before incrementing. static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); if (SE->isKnownPositive(Step)) { *Pred = ICmpInst::ICMP_SLT; return SE->getConstant(APInt::getSignedMinValue(BitWidth) - SE->getSignedRange(Step).getSignedMax()); } if (SE->isKnownNegative(Step)) { *Pred = ICmpInst::ICMP_SGT; return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - SE->getSignedRange(Step).getSignedMin()); } return nullptr; } // Get the limit of a recurrence such that incrementing by Step cannot cause // unsigned overflow as long as the value of the recurrence within the loop does // not exceed this limit before incrementing. static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); *Pred = ICmpInst::ICMP_ULT; return SE->getConstant(APInt::getMinValue(BitWidth) - SE->getUnsignedRange(Step).getUnsignedMax()); } namespace { struct ExtendOpTraitsBase { typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *); }; // Used to make code generic over signed and unsigned overflow. template <typename ExtendOp> struct ExtendOpTraits { // Members present: // // static const SCEV::NoWrapFlags WrapType; // // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; // // static const SCEV *getOverflowLimitForStep(const SCEV *Step, // ICmpInst::Predicate *Pred, // ScalarEvolution *SE); }; template <> struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; static const GetExtendExprTy GetExtendExpr; static const SCEV *getOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { return getSignedOverflowLimitForStep(Step, Pred, SE); } }; const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; template <> struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; static const GetExtendExprTy GetExtendExpr; static const SCEV *getOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE) { return getUnsignedOverflowLimitForStep(Step, Pred, SE); } }; const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; } // The recurrence AR has been shown to have no signed/unsigned wrap or something // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as // easily prove NSW/NUW for its preincrement or postincrement sibling. This // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the // expression "Step + sext/zext(PreIncAR)" is congruent with // "sext/zext(PostIncAR)" template <typename ExtendOpTy> static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE) { auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; const Loop *L = AR->getLoop(); const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*SE); // Check for a simple looking step prior to loop entry. const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); if (!SA) return nullptr; // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV // subtraction is expensive. For this purpose, perform a quick and dirty // difference, by checking for Step in the operand list. SmallVector<const SCEV *, 4> DiffOps; for (const SCEV *Op : SA->operands()) if (Op != Step) DiffOps.push_back(Op); if (DiffOps.size() == SA->getNumOperands()) return nullptr; // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + // `Step`: // 1. NSW/NUW flags on the step increment. const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags()); const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies // "S+X does not sign/unsign-overflow". // const SCEV *BECount = SE->getBackedgeTakenCount(L); if (PreAR && PreAR->getNoWrapFlags(WrapType) && !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) return PreStart; // 2. Direct overflow check on the step operation's expression. unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); const SCEV *OperandExtendedStart = SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy), (SE->*GetExtendExpr)(Step, WideTy)); if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) { if (PreAR && AR->getNoWrapFlags(WrapType)) { // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); } return PreStart; } // 3. Loop precondition. ICmpInst::Predicate Pred; const SCEV *OverflowLimit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); if (OverflowLimit && SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) { return PreStart; } return nullptr; } // Get the normalized zero or sign extended expression for this AddRec's Start. template <typename ExtendOpTy> static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE) { auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE); if (!PreStart) return (SE->*GetExtendExpr)(AR->getStart(), Ty); return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty), (SE->*GetExtendExpr)(PreStart, Ty)); } // Try to prove away overflow by looking at "nearby" add recurrences. A // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. // // Formally: // // {S,+,X} == {S-T,+,X} + T // => Ext({S,+,X}) == Ext({S-T,+,X} + T) // // If ({S-T,+,X} + T) does not overflow ... (1) // // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) // // If {S-T,+,X} does not overflow ... (2) // // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) // == {Ext(S-T)+Ext(T),+,Ext(X)} // // If (S-T)+T does not overflow ... (3) // // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} // == {Ext(S),+,Ext(X)} == LHS // // Thus, if (1), (2) and (3) are true for some T, then // Ext({S,+,X}) == {Ext(S),+,Ext(X)} // // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) // does not overflow" restricted to the 0th iteration. Therefore we only need // to check for (1) and (2). // // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T // is `Delta` (defined below). // template <typename ExtendOpTy> bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step, const Loop *L) { auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; // We restrict `Start` to a constant to prevent SCEV from spending too much // time here. It is correct (but more expensive) to continue with a // non-constant `Start` and do a general SCEV subtraction to compute // `PreStart` below. // const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); if (!StartC) return false; APInt StartAI = StartC->getValue()->getValue(); for (unsigned Delta : {-2, -1, 1, 2}) { const SCEV *PreStart = getConstant(StartAI - Delta); // Give up if we don't already have the add recurrence we need because // actually constructing an add recurrence is relatively expensive. const SCEVAddRecExpr *PreAR = [&]() { FoldingSetNodeID ID; ID.AddInteger(scAddRecExpr); ID.AddPointer(PreStart); ID.AddPointer(Step); ID.AddPointer(L); void *IP = nullptr; return static_cast<SCEVAddRecExpr *>( this->UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); }(); if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) const SCEV *DeltaS = getConstant(StartC->getType(), Delta); ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( DeltaS, &Pred, this); if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) return true; } } return false; } const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) return getConstant( cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); // zext(zext(x)) --> zext(x) if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) return getZeroExtendExpr(SZ->getOperand(), Ty); // Before doing any expensive analysis, check to see if we've already // computed a SCEV for this Op and Ty. FoldingSetNodeID ID; ID.AddInteger(scZeroExtend); ID.AddPointer(Op); ID.AddPointer(Ty); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // zext(trunc(x)) --> zext(x) or x or trunc(x) if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { // It's possible the bits taken off by the truncate were all zero bits. If // so, we should be able to simplify this further. const SCEV *X = ST->getOperand(); ConstantRange CR = getUnsignedRange(X); unsigned TruncBits = getTypeSizeInBits(ST->getType()); unsigned NewBits = getTypeSizeInBits(Ty); if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( CR.zextOrTrunc(NewBits))) return getTruncateOrZeroExtend(X, Ty); } // If the input value is a chrec scev, and we can prove that the value // did not overflow the old, smaller, value, we can zero extend all of the // operands (often constants). This allows analysis of something like // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) if (AR->isAffine()) { const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*this); unsigned BitWidth = getTypeSizeInBits(AR->getType()); const Loop *L = AR->getLoop(); // If we have special knowledge that this addrec won't overflow, // we don't need to do any further analysis. if (AR->getNoWrapFlags(SCEV::FlagNUW)) return getAddRecExpr( getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are // simply not analyzable, and it covers the case where this code is // being called from within backedge-taken count analysis, such that // attempting to ask for the backedge-taken count would likely result // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); if (!isa<SCEVCouldNotCompute>(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no unsigned overflow. const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy); const SCEV *WideStart = getZeroExtendExpr(Start, WideTy); const SCEV *WideMaxBECount = getZeroExtendExpr(CastedMaxBECount, WideTy); const SCEV *OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getZeroExtendExpr(Step, WideTy))); if (ZAdd == OperandExtendedAdd) { // Cache knowledge of AR NUW, which is propagated to this AddRec. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } // Similar to above, only this time treat the step value as signed. // This covers loops that count down. OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getSignExtendExpr(Step, WideTy))); if (ZAdd == OperandExtendedAdd) { // Cache knowledge of AR NW, which is propagated to this AddRec. // Negative step causes unsigned wrap, but it still can't self-wrap. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } // If the backedge is guarded by a comparison with the pre-inc value // the addrec is safe. Also, if the entry is guarded by a comparison // with the start value and the backedge is guarded by a comparison // with the post-inc value, the addrec is safe. if (isKnownPositive(Step)) { const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - getUnsignedRange(Step).getUnsignedMax()); if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR->getPostIncExpr(*this), N))) { // Cache knowledge of AR NUW, which is propagated to this AddRec. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } else if (isKnownNegative(Step)) { const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - getSignedRange(Step).getSignedMin()); if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR->getPostIncExpr(*this), N))) { // Cache knowledge of AR NW, which is propagated to this AddRec. // Negative step causes unsigned wrap, but it still can't self-wrap. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } } if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); return getAddRecExpr( getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } // The cast wasn't folded; create an explicit cast node. // Recompute the insert position, as it may have been invalidated. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); return S; } const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) return getConstant( cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); // sext(sext(x)) --> sext(x) if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) return getSignExtendExpr(SS->getOperand(), Ty); // sext(zext(x)) --> zext(x) if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) return getZeroExtendExpr(SZ->getOperand(), Ty); // Before doing any expensive analysis, check to see if we've already // computed a SCEV for this Op and Ty. FoldingSetNodeID ID; ID.AddInteger(scSignExtend); ID.AddPointer(Op); ID.AddPointer(Ty); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // If the input value is provably positive, build a zext instead. if (isKnownNonNegative(Op)) return getZeroExtendExpr(Op, Ty); // sext(trunc(x)) --> sext(x) or x or trunc(x) if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { // It's possible the bits taken off by the truncate were all sign bits. If // so, we should be able to simplify this further. const SCEV *X = ST->getOperand(); ConstantRange CR = getSignedRange(X); unsigned TruncBits = getTypeSizeInBits(ST->getType()); unsigned NewBits = getTypeSizeInBits(Ty); if (CR.truncate(TruncBits).signExtend(NewBits).contains( CR.sextOrTrunc(NewBits))) return getTruncateOrSignExtend(X, Ty); } // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 if (auto SA = dyn_cast<SCEVAddExpr>(Op)) { if (SA->getNumOperands() == 2) { auto SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); auto SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); if (SMul && SC1) { if (auto SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { const APInt &C1 = SC1->getValue()->getValue(); const APInt &C2 = SC2->getValue()->getValue(); if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && C2.isPowerOf2()) return getAddExpr(getSignExtendExpr(SC1, Ty), getSignExtendExpr(SMul, Ty)); } } } } // If the input value is a chrec scev, and we can prove that the value // did not overflow the old, smaller, value, we can sign extend all of the // operands (often constants). This allows analysis of something like // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) if (AR->isAffine()) { const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*this); unsigned BitWidth = getTypeSizeInBits(AR->getType()); const Loop *L = AR->getLoop(); // If we have special knowledge that this addrec won't overflow, // we don't need to do any further analysis. if (AR->getNoWrapFlags(SCEV::FlagNSW)) return getAddRecExpr( getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW); // Check whether the backedge-taken count is SCEVCouldNotCompute. // Note that this serves two purposes: It filters out loops that are // simply not analyzable, and it covers the case where this code is // being called from within backedge-taken count analysis, such that // attempting to ask for the backedge-taken count would likely result // in infinite recursion. In the later case, the analysis code will // cope with a conservative value, and it will take care to purge // that value once it has finished. const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); if (!isa<SCEVCouldNotCompute>(MaxBECount)) { // Manually compute the final value for AR, checking for // overflow. // Check whether the backedge-taken count can be losslessly casted to // the addrec's type. The count is always unsigned. const SCEV *CastedMaxBECount = getTruncateOrZeroExtend(MaxBECount, Start->getType()); const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); if (MaxBECount == RecastedMaxBECount) { Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); // Check whether Start+Step*MaxBECount has no signed overflow. const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy); const SCEV *WideStart = getSignExtendExpr(Start, WideTy); const SCEV *WideMaxBECount = getZeroExtendExpr(CastedMaxBECount, WideTy); const SCEV *OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getSignExtendExpr(Step, WideTy))); if (SAdd == OperandExtendedAdd) { // Cache knowledge of AR NSW, which is propagated to this AddRec. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } // Similar to above, only this time treat the step value as unsigned. // This covers loops that count up with an unsigned step. OperandExtendedAdd = getAddExpr(WideStart, getMulExpr(WideMaxBECount, getZeroExtendExpr(Step, WideTy))); if (SAdd == OperandExtendedAdd) { // If AR wraps around then // // abs(Step) * MaxBECount > unsigned-max(AR->getType()) // => SAdd != OperandExtendedAdd // // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> // (SAdd == OperandExtendedAdd => AR is NW) const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); // Return the expression with the addrec on the outside. return getAddRecExpr( getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } // If the backedge is guarded by a comparison with the pre-inc value // the addrec is safe. Also, if the entry is guarded by a comparison // with the start value and the backedge is guarded by a comparison // with the post-inc value, the addrec is safe. ICmpInst::Predicate Pred; const SCEV *OverflowLimit = getSignedOverflowLimitForStep(Step, &Pred, this); if (OverflowLimit && (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), OverflowLimit)))) { // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); return getAddRecExpr( getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } // If Start and Step are constants, check if we can apply this // transformation: // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 auto SC1 = dyn_cast<SCEVConstant>(Start); auto SC2 = dyn_cast<SCEVConstant>(Step); if (SC1 && SC2) { const APInt &C1 = SC1->getValue()->getValue(); const APInt &C2 = SC2->getValue()->getValue(); if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && C2.isPowerOf2()) { Start = getSignExtendExpr(Start, Ty); const SCEV *NewAR = getAddRecExpr(getConstant(AR->getType(), 0), Step, L, AR->getNoWrapFlags()); return getAddExpr(Start, getSignExtendExpr(NewAR, Ty)); } } if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); return getAddRecExpr( getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); } } // The cast wasn't folded; create an explicit cast node. // Recompute the insert position, as it may have been invalidated. if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), Op, Ty); UniqueSCEVs.InsertNode(S, IP); return S; } /// getAnyExtendExpr - Return a SCEV for the given operand extended with /// unspecified bits out to the given type. /// const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, Type *Ty) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && "This is not an extending conversion!"); assert(isSCEVable(Ty) && "This is not a conversion to a SCEVable type!"); Ty = getEffectiveSCEVType(Ty); // Sign-extend negative constants. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) if (SC->getValue()->getValue().isNegative()) return getSignExtendExpr(Op, Ty); // Peel off a truncate cast. if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { const SCEV *NewOp = T->getOperand(); if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) return getAnyExtendExpr(NewOp, Ty); return getTruncateOrNoop(NewOp, Ty); } // Next try a zext cast. If the cast is folded, use it. const SCEV *ZExt = getZeroExtendExpr(Op, Ty); if (!isa<SCEVZeroExtendExpr>(ZExt)) return ZExt; // Next try a sext cast. If the cast is folded, use it. const SCEV *SExt = getSignExtendExpr(Op, Ty); if (!isa<SCEVSignExtendExpr>(SExt)) return SExt; // Force the cast to be folded into the operands of an addrec. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { SmallVector<const SCEV *, 4> Ops; for (const SCEV *Op : AR->operands()) Ops.push_back(getAnyExtendExpr(Op, Ty)); return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); } // If the expression is obviously signed, use the sext cast value. if (isa<SCEVSMaxExpr>(Op)) return SExt; // Absent any other information, use the zext cast value. return ZExt; } /// CollectAddOperandsWithScales - Process the given Ops list, which is /// a list of operands to be added under the given scale, update the given /// map. This is a helper function for getAddRecExpr. As an example of /// what it does, given a sequence of operands that would form an add /// expression like this: /// /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) /// /// where A and B are constants, update the map with these values: /// /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) /// /// and add 13 + A*B*29 to AccumulatedConstant. /// This will allow getAddRecExpr to produce this: /// /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) /// /// This form often exposes folding opportunities that are hidden in /// the original operand list. /// /// Return true iff it appears that any interesting folding opportunities /// may be exposed. This helps getAddRecExpr short-circuit extra work in /// the common case where no interesting opportunities are present, and /// is also used as a check to avoid infinite recursion. /// static bool CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, SmallVectorImpl<const SCEV *> &NewOps, APInt &AccumulatedConstant, const SCEV *const *Ops, size_t NumOperands, const APInt &Scale, ScalarEvolution &SE) { bool Interesting = false; // Iterate over the add operands. They are sorted, with constants first. unsigned i = 0; while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { ++i; // Pull a buried constant out to the outside. if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) Interesting = true; AccumulatedConstant += Scale * C->getValue()->getValue(); } // Next comes everything else. We're especially interested in multiplies // here, but they're in the middle, so just visit the rest with one loop. for (; i != NumOperands; ++i) { const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { APInt NewScale = Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { // A multiplication of a constant with another add; recurse. const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); Interesting |= CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Add->op_begin(), Add->getNumOperands(), NewScale, SE); } else { // A multiplication of a constant with some other value. Update // the map. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); const SCEV *Key = SE.getMulExpr(MulOps); std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = M.insert(std::make_pair(Key, NewScale)); if (Pair.second) { NewOps.push_back(Pair.first->first); } else { Pair.first->second += NewScale; // The map already had an entry for this value, which may indicate // a folding opportunity. Interesting = true; } } } else { // An ordinary operand. Update the map. std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = M.insert(std::make_pair(Ops[i], Scale)); if (Pair.second) { NewOps.push_back(Pair.first->first); } else { Pair.first->second += Scale; // The map already had an entry for this value, which may indicate // a folding opportunity. Interesting = true; } } } return Interesting; } namespace { struct APIntCompare { bool operator()(const APInt &LHS, const APInt &RHS) const { return LHS.ult(RHS); } }; } // We're trying to construct a SCEV of type `Type' with `Ops' as operands and // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of // can't-overflow flags for the operation if possible. static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, const SmallVectorImpl<const SCEV *> &Ops, SCEV::NoWrapFlags OldFlags) { using namespace std::placeholders; bool CanAnalyze = Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; (void)CanAnalyze; assert(CanAnalyze && "don't call from other places!"); int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; SCEV::NoWrapFlags SignOrUnsignWrap = ScalarEvolution::maskFlags(OldFlags, SignOrUnsignMask); // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. auto IsKnownNonNegative = std::bind(std::mem_fn(&ScalarEvolution::isKnownNonNegative), SE, _1); if (SignOrUnsignWrap == SCEV::FlagNSW && std::all_of(Ops.begin(), Ops.end(), IsKnownNonNegative)) return ScalarEvolution::setFlags(OldFlags, (SCEV::NoWrapFlags)SignOrUnsignMask); return OldFlags; } /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, SCEV::NoWrapFlags Flags) { assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && "only nuw or nsw allowed"); assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "SCEVAddExpr operand types don't match!"); #endif Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { ++Idx; assert(Idx < Ops.size()); while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { // We found two constants, fold them together! Ops[0] = getConstant(LHSC->getValue()->getValue() + RHSC->getValue()->getValue()); if (Ops.size() == 2) return Ops[0]; Ops.erase(Ops.begin()+1); // Erase the folded element LHSC = cast<SCEVConstant>(Ops[0]); } // If we are left with a constant zero being added, strip it off. if (LHSC->getValue()->isZero()) { Ops.erase(Ops.begin()); --Idx; } if (Ops.size() == 1) return Ops[0]; } // Okay, check to see if the same value occurs in the operand list more than // once. If so, merge them together into an multiply expression. Since we // sorted the list, these values are required to be adjacent. Type *Ty = Ops[0]->getType(); bool FoundMatch = false; for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 // Scan ahead to count how many equal operands there are. unsigned Count = 2; while (i+Count != e && Ops[i+Count] == Ops[i]) ++Count; // Merge the values into a multiply. const SCEV *Scale = getConstant(Ty, Count); const SCEV *Mul = getMulExpr(Scale, Ops[i]); if (Ops.size() == Count) return Mul; Ops[i] = Mul; Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); --i; e -= Count - 1; FoundMatch = true; } if (FoundMatch) return getAddExpr(Ops, Flags); // Check for truncates. If all the operands are truncated from the same // type, see if factoring out the truncate would permit the result to be // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) // if the contents of the resulting outer trunc fold to something simple. for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); Type *DstType = Trunc->getType(); Type *SrcType = Trunc->getOperand()->getType(); SmallVector<const SCEV *, 8> LargeOps; bool Ok = true; // Check all the operands to see if they can be represented in the // source type of the truncate. for (unsigned i = 0, e = Ops.size(); i != e; ++i) { if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { if (T->getOperand()->getType() != SrcType) { Ok = false; break; } LargeOps.push_back(T->getOperand()); } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { LargeOps.push_back(getAnyExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { SmallVector<const SCEV *, 8> LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { if (T->getOperand()->getType() != SrcType) { Ok = false; break; } LargeMulOps.push_back(T->getOperand()); } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); } else { Ok = false; break; } } if (Ok) LargeOps.push_back(getMulExpr(LargeMulOps)); } else { Ok = false; break; } } if (Ok) { // Evaluate the expression in the larger type. const SCEV *Fold = getAddExpr(LargeOps, Flags); // If it folds to something simple, use it. Otherwise, don't. if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) return getTruncateExpr(Fold, DstType); } } // Skip past any other cast SCEVs. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) ++Idx; // If there are add operands they would be next. if (Idx < Ops.size()) { bool DeletedAdd = false; while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { // If we have an add, expand the add operands onto the end of the operands // list. Ops.erase(Ops.begin()+Idx); Ops.append(Add->op_begin(), Add->op_end()); DeletedAdd = true; } // If we deleted at least one add, we added operands to the end of the list, // and they are not necessarily sorted. Recurse to resort and resimplify // any operands we just acquired. if (DeletedAdd) return getAddExpr(Ops); } // Skip over the add expression until we get to a multiply. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) ++Idx; // Check to see if there are any folding opportunities present with // operands multiplied by constant values. if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { uint64_t BitWidth = getTypeSizeInBits(Ty); DenseMap<const SCEV *, APInt> M; SmallVector<const SCEV *, 8> NewOps; APInt AccumulatedConstant(BitWidth, 0); if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Ops.data(), Ops.size(), APInt(BitWidth, 1), *this)) { // Some interesting folding opportunity is present, so its worthwhile to // re-generate the operands list. Group the operands by constant scale, // to avoid multiplying by the same constant scale multiple times. std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(), E = NewOps.end(); I != E; ++I) MulOpLists[M.find(*I)->second].push_back(*I); // Re-generate the operands list. Ops.clear(); if (AccumulatedConstant != 0) Ops.push_back(getConstant(AccumulatedConstant)); for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) if (I->first != 0) Ops.push_back(getMulExpr(getConstant(I->first), getAddExpr(I->second))); if (Ops.empty()) return getConstant(Ty, 0); if (Ops.size() == 1) return Ops[0]; return getAddExpr(Ops); } } // If we are adding something to a multiply expression, make sure the // something is not already an operand of the multiply. If so, merge it into // the multiply. for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { const SCEV *MulOpSCEV = Mul->getOperand(MulOp); if (isa<SCEVConstant>(MulOpSCEV)) continue; for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) if (MulOpSCEV == Ops[AddOp]) { // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) const SCEV *InnerMul = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { // If the multiply has more than two operands, we must get the // Y*Z term. SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_begin()+MulOp); MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); InnerMul = getMulExpr(MulOps); } const SCEV *One = getConstant(Ty, 1); const SCEV *AddOne = getAddExpr(One, InnerMul); const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); if (Ops.size() == 2) return OuterMul; if (AddOp < Idx) { Ops.erase(Ops.begin()+AddOp); Ops.erase(Ops.begin()+Idx-1); } else { Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+AddOp-1); } Ops.push_back(OuterMul); return getAddExpr(Ops); } // Check this multiply against other multiplies being added together. for (unsigned OtherMulIdx = Idx+1; OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); ++OtherMulIdx) { const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); // If MulOp occurs in OtherMul, we can fold the two multiplies // together. for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); OMulOp != e; ++OMulOp) if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_begin()+MulOp); MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); InnerMul1 = getMulExpr(MulOps); } const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), OtherMul->op_begin()+OMulOp); MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); InnerMul2 = getMulExpr(MulOps); } const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); if (Ops.size() == 2) return OuterMul; Ops.erase(Ops.begin()+Idx); Ops.erase(Ops.begin()+OtherMulIdx-1); Ops.push_back(OuterMul); return getAddExpr(Ops); } } } } // If there are any add recurrences in the operands list, see if any other // added values are loop invariant. If so, we can fold them into the // recurrence. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) ++Idx; // Scan over all recurrences, trying to fold loop invariants into them. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. SmallVector<const SCEV *, 8> LIOps; const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); const Loop *AddRecLoop = AddRec->getLoop(); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (isLoopInvariant(Ops[i], AddRecLoop)) { LIOps.push_back(Ops[i]); Ops.erase(Ops.begin()+i); --i; --e; } // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} LIOps.push_back(AddRec->getStart()); SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); // Build the new addrec. Propagate the NUW and NSW flags if both the // outer add and the inner addrec are guaranteed to have no overflow. // Always propagate NW. Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; // Otherwise, add the folded AddRec by the non-invariant parts. for (unsigned i = 0;; ++i) if (Ops[i] == AddRec) { Ops[i] = NewRec; break; } return getAddExpr(Ops); } // Okay, if there weren't any loop invariants to be folded, check to see if // there are multiple AddRec's with the same loop induction variable being // added together. If so, we can fold them. for (unsigned OtherIdx = Idx+1; OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); ++OtherIdx) if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), AddRec->op_end()); for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); ++OtherIdx) if (const SCEVAddRecExpr *OtherAddRec = dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) if (OtherAddRec->getLoop() == AddRecLoop) { for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { if (i >= AddRecOps.size()) { AddRecOps.append(OtherAddRec->op_begin()+i, OtherAddRec->op_end()); break; } AddRecOps[i] = getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i)); } Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; } // Step size has changed, so we cannot guarantee no self-wraparound. Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); return getAddExpr(Ops); } // Otherwise couldn't fold anything into this recurrence. Move onto the // next one. } // Okay, it looks like we really DO need an add expr. Check to see if we // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(scAddExpr); for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = nullptr; SCEVAddExpr *S = static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); if (!S) { const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); } S->setNoWrapFlags(Flags); return S; } static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { uint64_t k = i*j; if (j > 1 && k / j != i) Overflow = true; return k; } /// Compute the result of "n choose k", the binomial coefficient. If an /// intermediate computation overflows, Overflow will be set and the return will /// be garbage. Overflow is not cleared on absence of overflow. static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { // We use the multiplicative formula: // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . // At each iteration, we take the n-th term of the numeral and divide by the // (k-n)th term of the denominator. This division will always produce an // integral result, and helps reduce the chance of overflow in the // intermediate computations. However, we can still overflow even when the // final result would fit. if (n == 0 || n == k) return 1; if (k > n) return 0; if (k > n/2) k = n-k; uint64_t r = 1; for (uint64_t i = 1; i <= k; ++i) { r = umul_ov(r, n-(i-1), Overflow); r /= i; } return r; } /// Determine if any of the operands in this SCEV are a constant or if /// any of the add or multiply expressions in this SCEV contain a constant. static bool containsConstantSomewhere(const SCEV *StartExpr) { SmallVector<const SCEV *, 4> Ops; Ops.push_back(StartExpr); while (!Ops.empty()) { const SCEV *CurrentExpr = Ops.pop_back_val(); if (isa<SCEVConstant>(*CurrentExpr)) return true; if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) { const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr); Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); } } return false; } /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, SCEV::NoWrapFlags Flags) { assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && "only nuw or nsw allowed"); assert(!Ops.empty() && "Cannot get empty mul!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "SCEVMulExpr operand types don't match!"); #endif Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { // C1*(C2+V) -> C1*C2 + C1*V if (Ops.size() == 2) if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) // If any of Add's ops are Adds or Muls with a constant, // apply this transformation as well. if (Add->getNumOperands() == 2) if (containsConstantSomewhere(Add)) return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), getMulExpr(LHSC, Add->getOperand(1))); ++Idx; while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { // We found two constants, fold them together! ConstantInt *Fold = ConstantInt::get(getContext(), LHSC->getValue()->getValue() * RHSC->getValue()->getValue()); Ops[0] = getConstant(Fold); Ops.erase(Ops.begin()+1); // Erase the folded element if (Ops.size() == 1) return Ops[0]; LHSC = cast<SCEVConstant>(Ops[0]); } // If we are left with a constant one being multiplied, strip it off. if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { Ops.erase(Ops.begin()); --Idx; } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { // If we have a multiply of zero, it will always be zero. return Ops[0]; } else if (Ops[0]->isAllOnesValue()) { // If we have a mul by -1 of an add, try distributing the -1 among the // add operands. if (Ops.size() == 2) { if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { SmallVector<const SCEV *, 4> NewOps; bool AnyFolded = false; for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); I != E; ++I) { const SCEV *Mul = getMulExpr(Ops[0], *I); if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; NewOps.push_back(Mul); } if (AnyFolded) return getAddExpr(NewOps); } else if (const SCEVAddRecExpr * AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { // Negation preserves a recurrence's no self-wrap property. SmallVector<const SCEV *, 4> Operands; for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(), E = AddRec->op_end(); I != E; ++I) { Operands.push_back(getMulExpr(Ops[0], *I)); } return getAddRecExpr(Operands, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW)); } } } if (Ops.size() == 1) return Ops[0]; } // Skip over the add expression until we get to a multiply. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) ++Idx; // If there are mul operands inline them all into this expression. if (Idx < Ops.size()) { bool DeletedMul = false; while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { // If we have an mul, expand the mul operands onto the end of the operands // list. Ops.erase(Ops.begin()+Idx); Ops.append(Mul->op_begin(), Mul->op_end()); DeletedMul = true; } // If we deleted at least one mul, we added operands to the end of the list, // and they are not necessarily sorted. Recurse to resort and resimplify // any operands we just acquired. if (DeletedMul) return getMulExpr(Ops); } // If there are any add recurrences in the operands list, see if any other // added values are loop invariant. If so, we can fold them into the // recurrence. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) ++Idx; // Scan over all recurrences, trying to fold loop invariants into them. for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { // Scan all of the other operands to this mul and add them to the vector if // they are loop invariant w.r.t. the recurrence. SmallVector<const SCEV *, 8> LIOps; const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); const Loop *AddRecLoop = AddRec->getLoop(); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (isLoopInvariant(Ops[i], AddRecLoop)) { LIOps.push_back(Ops[i]); Ops.erase(Ops.begin()+i); --i; --e; } // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} SmallVector<const SCEV *, 4> NewOps; NewOps.reserve(AddRec->getNumOperands()); const SCEV *Scale = getMulExpr(LIOps); for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); // Build the new addrec. Propagate the NUW and NSW flags if both the // outer mul and the inner addrec are guaranteed to have no overflow. // // No self-wrap cannot be guaranteed after changing the step size, but // will be inferred if either NUW or NSW is true. Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; // Otherwise, multiply the folded AddRec by the non-invariant parts. for (unsigned i = 0;; ++i) if (Ops[i] == AddRec) { Ops[i] = NewRec; break; } return getMulExpr(Ops); } // Okay, if there weren't any loop invariants to be folded, check to see if // there are multiple AddRec's with the same loop induction variable being // multiplied together. If so, we can fold them. // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z // ]]],+,...up to x=2n}. // Note that the arguments to choose() are always integers with values // known at compile time, never SCEV objects. // // The implementation avoids pointless extra computations when the two // addrec's are of different length (mathematically, it's equivalent to // an infinite stream of zeros on the right). bool OpsModified = false; for (unsigned OtherIdx = Idx+1; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); ++OtherIdx) { const SCEVAddRecExpr *OtherAddRec = dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) continue; bool Overflow = false; Type *Ty = AddRec->getType(); bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; SmallVector<const SCEV*, 7> AddRecOps; for (int x = 0, xe = AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { const SCEV *Term = getConstant(Ty, 0); for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); z < ze && !Overflow; ++z) { uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); uint64_t Coeff; if (LargerThan64Bits) Coeff = umul_ov(Coeff1, Coeff2, Overflow); else Coeff = Coeff1*Coeff2; const SCEV *CoeffTerm = getConstant(Ty, Coeff); const SCEV *Term1 = AddRec->getOperand(y-z); const SCEV *Term2 = OtherAddRec->getOperand(z); Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2)); } } AddRecOps.push_back(Term); } if (!Overflow) { const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), SCEV::FlagAnyWrap); if (Ops.size() == 2) return NewAddRec; Ops[Idx] = NewAddRec; Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; OpsModified = true; AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); if (!AddRec) break; } } if (OpsModified) return getMulExpr(Ops); // Otherwise couldn't fold anything into this recurrence. Move onto the // next one. } // Okay, it looks like we really DO need an mul expr. Check to see if we // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(scMulExpr); for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = nullptr; SCEVMulExpr *S = static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); if (!S) { const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); } S->setNoWrapFlags(Flags); return S; } /// getUDivExpr - Get a canonical unsigned division expression, or something /// simpler if possible. const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, const SCEV *RHS) { assert(getEffectiveSCEVType(LHS->getType()) == getEffectiveSCEVType(RHS->getType()) && "SCEVUDivExpr operand types don't match!"); if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { if (RHSC->getValue()->equalsInt(1)) return LHS; // X udiv 1 --> x // If the denominator is zero, the result of the udiv is undefined. Don't // try to analyze it, because the resolution chosen here may differ from // the resolution chosen in other parts of the compiler. if (!RHSC->getValue()->isZero()) { // Determine if the division can be folded into the operands of // its operands. // TODO: Generalize this to non-constants by using known-bits information. Type *Ty = LHS->getType(); unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; // For non-power-of-two values, effectively round the value up to the // nearest power of two. if (!RHSC->getValue()->getValue().isPowerOf2()) ++MaxShiftAmt; IntegerType *ExtTy = IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) if (const SCEVConstant *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. const APInt &StepInt = Step->getValue()->getValue(); const APInt &DivInt = RHSC->getValue()->getValue(); if (!StepInt.urem(DivInt) && getZeroExtendExpr(AR, ExtTy) == getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop(), SCEV::FlagAnyWrap)) { SmallVector<const SCEV *, 4> Operands; for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); } /// Get a canonical UDivExpr for a recurrence. /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. // We can currently only fold X%N if X is constant. const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); if (StartC && !DivInt.urem(StepInt) && getZeroExtendExpr(AR, ExtTy) == getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop(), SCEV::FlagAnyWrap)) { const APInt &StartInt = StartC->getValue()->getValue(); const APInt &StartRem = StartInt.urem(StepInt); if (StartRem != 0) LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, AR->getLoop(), SCEV::FlagNW); } } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { SmallVector<const SCEV *, 4> Operands; for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) // Find an operand that's safely divisible. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { const SCEV *Op = M->getOperand(i); const SCEV *Div = getUDivExpr(Op, RHSC); if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { Operands = SmallVector<const SCEV *, 4>(M->op_begin(), M->op_end()); Operands[i] = Div; return getMulExpr(Operands); } } } // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { SmallVector<const SCEV *, 4> Operands; for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { Operands.clear(); for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) break; Operands.push_back(Op); } if (Operands.size() == A->getNumOperands()) return getAddExpr(Operands); } } // Fold if both operands are constant. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { Constant *LHSCV = LHSC->getValue(); Constant *RHSCV = RHSC->getValue(); return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, RHSCV))); } } } FoldingSetNodeID ID; ID.AddInteger(scUDivExpr); ID.AddPointer(LHS); ID.AddPointer(RHS); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), LHS, RHS); UniqueSCEVs.InsertNode(S, IP); return S; } static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { APInt A = C1->getValue()->getValue().abs(); APInt B = C2->getValue()->getValue().abs(); uint32_t ABW = A.getBitWidth(); uint32_t BBW = B.getBitWidth(); if (ABW > BBW) B = B.zext(ABW); else if (ABW < BBW) A = A.zext(BBW); return APIntOps::GreatestCommonDivisor(A, B); } /// getUDivExactExpr - Get a canonical unsigned division expression, or /// something simpler if possible. There is no representation for an exact udiv /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS. /// We can't do this when it's not exact because the udiv may be clearing bits. const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, const SCEV *RHS) { // TODO: we could try to find factors in all sorts of things, but for now we // just deal with u/exact (multiply, constant). See SCEVDivision towards the // end of this file for inspiration. const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); if (!Mul) return getUDivExpr(LHS, RHS); if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { // If the mulexpr multiplies by a constant, then that constant must be the // first element of the mulexpr. if (const SCEVConstant *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { if (LHSCst == RHSCst) { SmallVector<const SCEV *, 2> Operands; Operands.append(Mul->op_begin() + 1, Mul->op_end()); return getMulExpr(Operands); } // We can't just assume that LHSCst divides RHSCst cleanly, it could be // that there's a factor provided by one of the other terms. We need to // check. APInt Factor = gcd(LHSCst, RHSCst); if (!Factor.isIntN(1)) { LHSCst = cast<SCEVConstant>( getConstant(LHSCst->getValue()->getValue().udiv(Factor))); RHSCst = cast<SCEVConstant>( getConstant(RHSCst->getValue()->getValue().udiv(Factor))); SmallVector<const SCEV *, 2> Operands; Operands.push_back(LHSCst); Operands.append(Mul->op_begin() + 1, Mul->op_end()); LHS = getMulExpr(Operands); RHS = RHSCst; Mul = dyn_cast<SCEVMulExpr>(LHS); if (!Mul) return getUDivExactExpr(LHS, RHS); } } } for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { if (Mul->getOperand(i) == RHS) { SmallVector<const SCEV *, 2> Operands; Operands.append(Mul->op_begin(), Mul->op_begin() + i); Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); return getMulExpr(Operands); } } return getUDivExpr(LHS, RHS); } /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags) { SmallVector<const SCEV *, 4> Operands; Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) if (StepChrec->getLoop() == L) { Operands.append(StepChrec->op_begin(), StepChrec->op_end()); return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); } Operands.push_back(Step); return getAddRecExpr(Operands, L, Flags); } /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. const SCEV * ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, const Loop *L, SCEV::NoWrapFlags Flags) { if (Operands.size() == 1) return Operands[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); for (unsigned i = 1, e = Operands.size(); i != e; ++i) assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && "SCEVAddRecExpr operand types don't match!"); for (unsigned i = 0, e = Operands.size(); i != e; ++i) assert(isLoopInvariant(Operands[i], L) && "SCEVAddRecExpr operand is not loop-invariant!"); #endif if (Operands.back()->isZero()) { Operands.pop_back(); return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X } // It's tempting to want to call getMaxBackedgeTakenCount count here and // use that information to infer NUW and NSW flags. However, computing a // BE count requires calling getAddRecExpr, so we may not yet have a // meaningful BE count at this point (and if we don't, we'd be stuck // with a SCEVCouldNotCompute as the cached BE count). Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); // Canonicalize nested AddRecs in by nesting them in order of loop depth. if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { const Loop *NestedLoop = NestedAR->getLoop(); if (L->contains(NestedLoop) ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) : (!NestedLoop->contains(L) && DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), NestedAR->op_end()); Operands[0] = NestedAR->getStart(); // AddRecs require their operands be loop-invariant with respect to their // loops. Don't perform this transformation if it would break this // requirement. bool AllInvariant = true; for (unsigned i = 0, e = Operands.size(); i != e; ++i) if (!isLoopInvariant(Operands[i], L)) { AllInvariant = false; break; } if (AllInvariant) { // Create a recurrence for the outer loop with the same step size. // // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the // inner recurrence has the same property. SCEV::NoWrapFlags OuterFlags = maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); AllInvariant = true; for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) if (!isLoopInvariant(NestedOperands[i], NestedLoop)) { AllInvariant = false; break; } if (AllInvariant) { // Ok, both add recurrences are valid after the transformation. // // The inner recurrence keeps its NW flag but only keeps NUW/NSW if // the outer recurrence has the same property. SCEV::NoWrapFlags InnerFlags = maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); } } // Reset Operands to its original state. Operands[0] = NestedAR; } } // Okay, it looks like we really DO need an addrec expr. Check to see if we // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(scAddRecExpr); for (unsigned i = 0, e = Operands.size(); i != e; ++i) ID.AddPointer(Operands[i]); ID.AddPointer(L); void *IP = nullptr; SCEVAddRecExpr *S = static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); if (!S) { const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); std::uninitialized_copy(Operands.begin(), Operands.end(), O); S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Operands.size(), L); UniqueSCEVs.InsertNode(S, IP); } S->setNoWrapFlags(Flags); return S; } const SCEV * ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr, const SmallVectorImpl<const SCEV *> &IndexExprs, bool InBounds) { // getSCEV(Base)->getType() has the same address space as Base->getType() // because SCEV::getType() preserves the address space. Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP // instruction to its SCEV, because the Instruction may be guarded by control // flow and the no-overflow bits may not be valid for the expression in any // context. SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap; const SCEV *TotalOffset = getConstant(IntPtrTy, 0); // The address space is unimportant. The first thing we do on CurTy is getting // its element type. Type *CurTy = PointerType::getUnqual(PointeeType); for (const SCEV *IndexExpr : IndexExprs) { // Compute the (potentially symbolic) offset in bytes for this index. if (StructType *STy = dyn_cast<StructType>(CurTy)) { // For a struct, add the member offset. ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); unsigned FieldNo = Index->getZExtValue(); const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); // Add the field offset to the running total offset. TotalOffset = getAddExpr(TotalOffset, FieldOffset); // Update CurTy to the type of the field at Index. CurTy = STy->getTypeAtIndex(Index); } else { // Update CurTy to its element type. CurTy = cast<SequentialType>(CurTy)->getElementType(); // For an array, add the element offset, explicitly scaled. const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); // Getelementptr indices are signed. IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); // Multiply the index by the element size to compute the element offset. const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); // Add the element offset to the running total offset. TotalOffset = getAddExpr(TotalOffset, LocalOffset); } } // Add the total offset from all the GEP indices to the base. return getAddExpr(BaseExpr, TotalOffset, Wrap); } const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { SmallVector<const SCEV *, 2> Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getSMaxExpr(Ops); } const SCEV * ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { assert(!Ops.empty() && "Cannot get empty smax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "SCEVSMaxExpr operand types don't match!"); #endif // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { ++Idx; assert(Idx < Ops.size()); while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { // We found two constants, fold them together! ConstantInt *Fold = ConstantInt::get(getContext(), APIntOps::smax(LHSC->getValue()->getValue(), RHSC->getValue()->getValue())); Ops[0] = getConstant(Fold); Ops.erase(Ops.begin()+1); // Erase the folded element if (Ops.size() == 1) return Ops[0]; LHSC = cast<SCEVConstant>(Ops[0]); } // If we are left with a constant minimum-int, strip it off. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { Ops.erase(Ops.begin()); --Idx; } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { // If we have an smax with a constant maximum-int, it will always be // maximum-int. return Ops[0]; } if (Ops.size() == 1) return Ops[0]; } // Find the first SMax while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) ++Idx; // Check to see if one of the operands is an SMax. If so, expand its operands // onto our operand list, and recurse to simplify. if (Idx < Ops.size()) { bool DeletedSMax = false; while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { Ops.erase(Ops.begin()+Idx); Ops.append(SMax->op_begin(), SMax->op_end()); DeletedSMax = true; } if (DeletedSMax) return getSMaxExpr(Ops); } // Okay, check to see if the same value occurs in the operand list twice. If // so, delete one. Since we sorted the list, these values are required to // be adjacent. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) // X smax Y smax Y --> X smax Y // X smax Y --> X, if X is always greater than Y if (Ops[i] == Ops[i+1] || isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); --i; --e; } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { Ops.erase(Ops.begin()+i, Ops.begin()+i+1); --i; --e; } if (Ops.size() == 1) return Ops[0]; assert(!Ops.empty() && "Reduced smax down to nothing!"); // Okay, it looks like we really DO need an smax expr. Check to see if we // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(scSMaxExpr); for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); return S; } const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { SmallVector<const SCEV *, 2> Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getUMaxExpr(Ops); } const SCEV * ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { assert(!Ops.empty() && "Cannot get empty umax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); for (unsigned i = 1, e = Ops.size(); i != e; ++i) assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && "SCEVUMaxExpr operand types don't match!"); #endif // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); // If there are any constants, fold them together. unsigned Idx = 0; if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { ++Idx; assert(Idx < Ops.size()); while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { // We found two constants, fold them together! ConstantInt *Fold = ConstantInt::get(getContext(), APIntOps::umax(LHSC->getValue()->getValue(), RHSC->getValue()->getValue())); Ops[0] = getConstant(Fold); Ops.erase(Ops.begin()+1); // Erase the folded element if (Ops.size() == 1) return Ops[0]; LHSC = cast<SCEVConstant>(Ops[0]); } // If we are left with a constant minimum-int, strip it off. if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { Ops.erase(Ops.begin()); --Idx; } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { // If we have an umax with a constant maximum-int, it will always be // maximum-int. return Ops[0]; } if (Ops.size() == 1) return Ops[0]; } // Find the first UMax while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) ++Idx; // Check to see if one of the operands is a UMax. If so, expand its operands // onto our operand list, and recurse to simplify. if (Idx < Ops.size()) { bool DeletedUMax = false; while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { Ops.erase(Ops.begin()+Idx); Ops.append(UMax->op_begin(), UMax->op_end()); DeletedUMax = true; } if (DeletedUMax) return getUMaxExpr(Ops); } // Okay, check to see if the same value occurs in the operand list twice. If // so, delete one. Since we sorted the list, these values are required to // be adjacent. for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) // X umax Y umax Y --> X umax Y // X umax Y --> X, if X is always greater than Y if (Ops[i] == Ops[i+1] || isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); --i; --e; } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { Ops.erase(Ops.begin()+i, Ops.begin()+i+1); --i; --e; } if (Ops.size() == 1) return Ops[0]; assert(!Ops.empty() && "Reduced umax down to nothing!"); // Okay, it looks like we really DO need a umax expr. Check to see if we // already have one, otherwise create a new one. FoldingSetNodeID ID; ID.AddInteger(scUMaxExpr); for (unsigned i = 0, e = Ops.size(); i != e; ++i) ID.AddPointer(Ops[i]); void *IP = nullptr; if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); std::uninitialized_copy(Ops.begin(), Ops.end(), O); SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), O, Ops.size()); UniqueSCEVs.InsertNode(S, IP); return S; } const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, const SCEV *RHS) { // ~smax(~x, ~y) == smin(x, y). return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); } const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS) { // ~umax(~x, ~y) == umin(x, y) return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); } const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { // We can bypass creating a target-independent // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. return getConstant(IntTy, F->getParent()->getDataLayout().getTypeAllocSize(AllocTy)); } const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo) { // We can bypass creating a target-independent // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. return getConstant( IntTy, F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset( FieldNo)); } const SCEV *ScalarEvolution::getUnknown(Value *V) { // Don't attempt to do anything other than create a SCEVUnknown object // here. createSCEV only calls getUnknown after checking for all other // interesting possibilities, and any other code that calls getUnknown // is doing so in order to hide a value from SCEV canonicalization. FoldingSetNodeID ID; ID.AddInteger(scUnknown); ID.AddPointer(V); void *IP = nullptr; if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { assert(cast<SCEVUnknown>(S)->getValue() == V && "Stale SCEVUnknown in uniquing map!"); return S; } SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, FirstUnknown); FirstUnknown = cast<SCEVUnknown>(S); UniqueSCEVs.InsertNode(S, IP); return S; } //===----------------------------------------------------------------------===// // Basic SCEV Analysis and PHI Idiom Recognition Code // /// isSCEVable - Test if values of the given type are analyzable within /// the SCEV framework. This primarily includes integer types, and it /// can optionally include pointer types if the ScalarEvolution class /// has access to target-specific information. bool ScalarEvolution::isSCEVable(Type *Ty) const { // Integers and pointers are always SCEVable. return Ty->isIntegerTy() || Ty->isPointerTy(); } /// getTypeSizeInBits - Return the size in bits of the specified type, /// for which isSCEVable must return true. uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); return F->getParent()->getDataLayout().getTypeSizeInBits(Ty); } /// getEffectiveSCEVType - Return a type with the same bitwidth as /// the given type and which represents how SCEV will treat the given /// type, for which isSCEVable must return true. For pointer types, /// this is the pointer-sized integer type. Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); if (Ty->isIntegerTy()) { return Ty; } // The only other support type is pointer. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); return F->getParent()->getDataLayout().getIntPtrType(Ty); } const SCEV *ScalarEvolution::getCouldNotCompute() { return &CouldNotCompute; } namespace { // Helper class working with SCEVTraversal to figure out if a SCEV contains // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne // is set iff if find such SCEVUnknown. // struct FindInvalidSCEVUnknown { bool FindOne; FindInvalidSCEVUnknown() { FindOne = false; } bool follow(const SCEV *S) { switch (static_cast<SCEVTypes>(S->getSCEVType())) { case scConstant: return false; case scUnknown: if (!cast<SCEVUnknown>(S)->getValue()) FindOne = true; return false; default: return true; } } bool isDone() const { return FindOne; } }; } bool ScalarEvolution::checkValidity(const SCEV *S) const { FindInvalidSCEVUnknown F; SCEVTraversal<FindInvalidSCEVUnknown> ST(F); ST.visitAll(S); return !F.FindOne; } /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the /// expression and create a new one. const SCEV *ScalarEvolution::getSCEV(Value *V) { assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); ValueExprMapType::iterator I = ValueExprMap.find_as(V); if (I != ValueExprMap.end()) { const SCEV *S = I->second; if (checkValidity(S)) return S; else ValueExprMap.erase(I); } const SCEV *S = createSCEV(V); // The process of creating a SCEV for V may have caused other SCEVs // to have been created, so it's necessary to insert the new entry // from scratch, rather than trying to remember the insert position // above. ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S)); return S; } /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V /// const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) return getConstant( cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); Type *Ty = V->getType(); Ty = getEffectiveSCEVType(Ty); return getMulExpr(V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)))); } /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) return getConstant( cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); Type *Ty = V->getType(); Ty = getEffectiveSCEVType(Ty); const SCEV *AllOnes = getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); return getMinusSCEV(AllOnes, V); } /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1. const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags) { assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW"); // Fast path: X - X --> 0. if (LHS == RHS) return getConstant(LHS->getType(), 0); // X - Y --> X + -Y. // X -(nsw || nuw) Y --> X + -Y. return getAddExpr(LHS, getNegativeSCEV(RHS)); } /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is zero /// extended. const SCEV * ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) return getTruncateExpr(V, Ty); return getZeroExtendExpr(V, Ty); } /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is sign /// extended. const SCEV * ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or zero extend with non-integer arguments!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) return getTruncateExpr(V, Ty); return getSignExtendExpr(V, Ty); } /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is zero /// extended. The conversion must not be narrowing. const SCEV * ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or zero extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrZeroExtend cannot truncate!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getZeroExtendExpr(V, Ty); } /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the /// input value to the specified type. If the type must be extended, it is sign /// extended. The conversion must not be narrowing. const SCEV * ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or sign extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrSignExtend cannot truncate!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getSignExtendExpr(V, Ty); } /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of /// the input value to the specified type. If the type must be extended, /// it is extended with unspecified bits. The conversion must not be /// narrowing. const SCEV * ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot noop or any extend with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && "getNoopOrAnyExtend cannot truncate!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getAnyExtendExpr(V, Ty); } /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the /// input value to the specified type. The conversion must not be widening. const SCEV * ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { Type *SrcTy = V->getType(); assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && (Ty->isIntegerTy() || Ty->isPointerTy()) && "Cannot truncate or noop with non-integer arguments!"); assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && "getTruncateOrNoop cannot extend!"); if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) return V; // No conversion return getTruncateExpr(V, Ty); } /// getUMaxFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umax operation /// with them. const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS) { const SCEV *PromotedLHS = LHS; const SCEV *PromotedRHS = RHS; if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); else PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); return getUMaxExpr(PromotedLHS, PromotedRHS); } /// getUMinFromMismatchedTypes - Promote the operands to the wider of /// the types using zero-extension, and then perform a umin operation /// with them. const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS) { const SCEV *PromotedLHS = LHS; const SCEV *PromotedRHS = RHS; if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); else PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); return getUMinExpr(PromotedLHS, PromotedRHS); } /// getPointerBase - Transitively follow the chain of pointer-type operands /// until reaching a SCEV that does not have a single pointer operand. This /// returns a SCEVUnknown pointer for well-formed pointer-type expressions, /// but corner cases do exist. const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { // A pointer operand may evaluate to a nonpointer expression, such as null. if (!V->getType()->isPointerTy()) return V; if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { return getPointerBase(Cast->getOperand()); } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { const SCEV *PtrOp = nullptr; for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); I != E; ++I) { if ((*I)->getType()->isPointerTy()) { // Cannot find the base of an expression with multiple pointer operands. if (PtrOp) return V; PtrOp = *I; } } if (!PtrOp) return V; return getPointerBase(PtrOp); } return V; } /// PushDefUseChildren - Push users of the given Instruction /// onto the given Worklist. static void PushDefUseChildren(Instruction *I, SmallVectorImpl<Instruction *> &Worklist) { // Push the def-use children onto the Worklist stack. for (User *U : I->users()) Worklist.push_back(cast<Instruction>(U)); } /// ForgetSymbolicValue - This looks up computed SCEV values for all /// instructions that depend on the given instruction and removes them from /// the ValueExprMapType map if they reference SymName. This is used during PHI /// resolution. void ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) { SmallVector<Instruction *, 16> Worklist; PushDefUseChildren(PN, Worklist); SmallPtrSet<Instruction *, 8> Visited; Visited.insert(PN); while (!Worklist.empty()) { Instruction *I = Worklist.pop_back_val(); if (!Visited.insert(I).second) continue; ValueExprMapType::iterator It = ValueExprMap.find_as(static_cast<Value *>(I)); if (It != ValueExprMap.end()) { const SCEV *Old = It->second; // Short-circuit the def-use traversal if the symbolic name // ceases to appear in expressions. if (Old != SymName && !hasOperand(Old, SymName)) continue; // SCEVUnknown for a PHI either means that it has an unrecognized // structure, it's a PHI that's in the progress of being computed // by createNodeForPHI, or it's a single-value PHI. In the first case, // additional loop trip count information isn't going to change anything. // In the second case, createNodeForPHI will perform the necessary // updates on its own when it gets to that point. In the third, we do // want to forget the SCEVUnknown. if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old) || (I != PN && Old == SymName)) { forgetMemoizedResults(Old); ValueExprMap.erase(It); } } PushDefUseChildren(I, Worklist); } } /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in /// a loop header, making it a potential recurrence, or it doesn't. /// const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { if (const Loop *L = LI->getLoopFor(PN->getParent())) if (L->getHeader() == PN->getParent()) { // The loop may have multiple entrances or multiple exits; we can analyze // this phi as an addrec if it has a unique entry value and a unique // backedge value. Value *BEValueV = nullptr, *StartValueV = nullptr; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { Value *V = PN->getIncomingValue(i); if (L->contains(PN->getIncomingBlock(i))) { if (!BEValueV) { BEValueV = V; } else if (BEValueV != V) { BEValueV = nullptr; break; } } else if (!StartValueV) { StartValueV = V; } else if (StartValueV != V) { StartValueV = nullptr; break; } } if (BEValueV && StartValueV) { // While we are analyzing this PHI node, handle its value symbolically. const SCEV *SymbolicName = getUnknown(PN); assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && "PHI node already processed?"); ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); // Using this symbolic name for the PHI, analyze the value coming around // the back-edge. const SCEV *BEValue = getSCEV(BEValueV); // NOTE: If BEValue is loop invariant, we know that the PHI node just // has a special value for the first iteration of the loop. // If the value coming around the backedge is an add with the symbolic // value we just inserted, then we found a simple induction variable! if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { // If there is a single occurrence of the symbolic value, replace it // with a recurrence. unsigned FoundIndex = Add->getNumOperands(); for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (Add->getOperand(i) == SymbolicName) if (FoundIndex == e) { FoundIndex = i; break; } if (FoundIndex != Add->getNumOperands()) { // Create an add with everything but the specified operand. SmallVector<const SCEV *, 8> Ops; for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (i != FoundIndex) Ops.push_back(Add->getOperand(i)); const SCEV *Accum = getAddExpr(Ops); // This is not a valid addrec if the step amount is varying each // loop iteration, but is not itself an addrec in this loop. if (isLoopInvariant(Accum, L) || (isa<SCEVAddRecExpr>(Accum) && cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; // If the increment doesn't overflow, then neither the addrec nor // the post-increment will overflow. if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) { if (OBO->getOperand(0) == PN) { if (OBO->hasNoUnsignedWrap()) Flags = setFlags(Flags, SCEV::FlagNUW); if (OBO->hasNoSignedWrap()) Flags = setFlags(Flags, SCEV::FlagNSW); } } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { // If the increment is an inbounds GEP, then we know the address // space cannot be wrapped around. We cannot make any guarantee // about signed or unsigned overflow because pointers are // unsigned but we may have a negative index from the base // pointer. We can guarantee that no unsigned wrap occurs if the // indices form a positive value. if (GEP->isInBounds() && GEP->getOperand(0) == PN) { Flags = setFlags(Flags, SCEV::FlagNW); const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) Flags = setFlags(Flags, SCEV::FlagNUW); } // We cannot transfer nuw and nsw flags from subtraction // operations -- sub nuw X, Y is not the same as add nuw X, -Y // for instance. } const SCEV *StartVal = getSCEV(StartValueV); const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); // Since the no-wrap flags are on the increment, they apply to the // post-incremented value as well. if (isLoopInvariant(Accum, L)) (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and purge all of the // entries for the scalars that use the symbolic expression. ForgetSymbolicName(PN, SymbolicName); ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; return PHISCEV; } } } else if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(BEValue)) { // Otherwise, this could be a loop like this: // i = 0; for (j = 1; ..; ++j) { .... i = j; } // In this case, j = {1,+,1} and BEValue is j. // Because the other in-value of i (0) fits the evolution of BEValue // i really is an addrec evolution. if (AddRec->getLoop() == L && AddRec->isAffine()) { const SCEV *StartVal = getSCEV(StartValueV); // If StartVal = j.start - j.stride, we can use StartVal as the // initial step of the addrec evolution. if (StartVal == getMinusSCEV(AddRec->getOperand(0), AddRec->getOperand(1))) { // FIXME: For constant StartVal, we should be able to infer // no-wrap flags. const SCEV *PHISCEV = getAddRecExpr(StartVal, AddRec->getOperand(1), L, SCEV::FlagAnyWrap); // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and purge all of the // entries for the scalars that use the symbolic expression. ForgetSymbolicName(PN, SymbolicName); ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; return PHISCEV; } } } } } // If the PHI has a single incoming value, follow that value, unless the // PHI's incoming blocks are in a different loop, in which case doing so // risks breaking LCSSA form. Instcombine would normally zap these, but // it doesn't have DominatorTree information, so it may miss cases. if (Value *V = SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC)) if (LI->replacementPreservesLCSSAForm(PN, V)) return getSCEV(V); // If it's not a loop phi, we can't handle it yet. return getUnknown(PN); } /// createNodeForGEP - Expand GEP instructions into add and multiply /// operations. This allows them to be analyzed by regular SCEV code. /// const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { Value *Base = GEP->getOperand(0); // Don't attempt to analyze GEPs over unsized objects. if (!Base->getType()->getPointerElementType()->isSized()) return getUnknown(GEP); SmallVector<const SCEV *, 4> IndexExprs; for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) IndexExprs.push_back(getSCEV(*Index)); return getGEPExpr(GEP->getSourceElementType(), getSCEV(Base), IndexExprs, GEP->isInBounds()); } /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is /// guaranteed to end in (at every loop iteration). It is, at the same time, /// the minimum number of times S is divisible by 2. For example, given {4,+,8} /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) return C->getValue()->getValue().countTrailingZeros(); if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) return std::min(GetMinTrailingZeros(T->getOperand()), (uint32_t)getTypeSizeInBits(T->getType())); if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? getTypeSizeInBits(E->getType()) : OpRes; } if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? getTypeSizeInBits(E->getType()) : OpRes; } if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); return MinOpRes; } if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { // The result is the sum of all operands results. uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); uint32_t BitWidth = getTypeSizeInBits(M->getType()); for (unsigned i = 1, e = M->getNumOperands(); SumOpRes != BitWidth && i != e; ++i) SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); return SumOpRes; } if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); return MinOpRes; } if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); return MinOpRes; } if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); return MinOpRes; } if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { // For a SCEVUnknown, ask ValueTracking. unsigned BitWidth = getTypeSizeInBits(U->getType()); APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); computeKnownBits(U->getValue(), Zeros, Ones, F->getParent()->getDataLayout(), 0, AC, nullptr, DT); return Zeros.countTrailingOnes(); } // SCEVUDivExpr return 0; } /// GetRangeFromMetadata - Helper method to assign a range to V from /// metadata present in the IR. static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { if (Instruction *I = dyn_cast<Instruction>(V)) { if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) { ConstantRange TotalRange( cast<IntegerType>(I->getType())->getBitWidth(), false); unsigned NumRanges = MD->getNumOperands() / 2; assert(NumRanges >= 1); for (unsigned i = 0; i < NumRanges; ++i) { ConstantInt *Lower = mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 0)); ConstantInt *Upper = mdconst::extract<ConstantInt>(MD->getOperand(2 * i + 1)); ConstantRange Range(Lower->getValue(), Upper->getValue()); TotalRange = TotalRange.unionWith(Range); } return TotalRange; } } return None; } /// getRange - Determine the range for a particular SCEV. If SignHint is /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges /// with a "cleaner" unsigned (resp. signed) representation. /// ConstantRange ScalarEvolution::getRange(const SCEV *S, ScalarEvolution::RangeSignHint SignHint) { DenseMap<const SCEV *, ConstantRange> &Cache = SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges; // See if we've computed this range already. DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); if (I != Cache.end()) return I->second; if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) return setRange(C, SignHint, ConstantRange(C->getValue()->getValue())); unsigned BitWidth = getTypeSizeInBits(S->getType()); ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); // If the value has known zeros, the maximum value will have those known zeros // as well. uint32_t TZ = GetMinTrailingZeros(S); if (TZ != 0) { if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) ConservativeResult = ConstantRange(APInt::getMinValue(BitWidth), APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); else ConservativeResult = ConstantRange( APInt::getSignedMinValue(BitWidth), APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); } if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { ConstantRange X = getRange(Add->getOperand(0), SignHint); for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) X = X.add(getRange(Add->getOperand(i), SignHint)); return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); } if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { ConstantRange X = getRange(Mul->getOperand(0), SignHint); for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) X = X.multiply(getRange(Mul->getOperand(i), SignHint)); return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); } if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { ConstantRange X = getRange(SMax->getOperand(0), SignHint); for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) X = X.smax(getRange(SMax->getOperand(i), SignHint)); return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); } if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { ConstantRange X = getRange(UMax->getOperand(0), SignHint); for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) X = X.umax(getRange(UMax->getOperand(i), SignHint)); return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); } if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { ConstantRange X = getRange(UDiv->getLHS(), SignHint); ConstantRange Y = getRange(UDiv->getRHS(), SignHint); return setRange(UDiv, SignHint, ConservativeResult.intersectWith(X.udiv(Y))); } if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { ConstantRange X = getRange(ZExt->getOperand(), SignHint); return setRange(ZExt, SignHint, ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); } if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { ConstantRange X = getRange(SExt->getOperand(), SignHint); return setRange(SExt, SignHint, ConservativeResult.intersectWith(X.signExtend(BitWidth))); } if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { ConstantRange X = getRange(Trunc->getOperand(), SignHint); return setRange(Trunc, SignHint, ConservativeResult.intersectWith(X.truncate(BitWidth))); } if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { // If there's no unsigned wrap, the value will never be less than its // initial value. if (AddRec->getNoWrapFlags(SCEV::FlagNUW)) if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) if (!C->getValue()->isZero()) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0))); // If there's no signed wrap, and all the operands have the same sign or // zero, the value won't ever change sign. if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) { bool AllNonNeg = true; bool AllNonPos = true; for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; } if (AllNonNeg) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt(BitWidth, 0), APInt::getSignedMinValue(BitWidth))); else if (AllNonPos) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth), APInt(BitWidth, 1))); } // TODO: non-affine addrec if (AddRec->isAffine()) { Type *Ty = AddRec->getType(); const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); if (!isa<SCEVCouldNotCompute>(MaxBECount) && getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { // Check for overflow. This must be done with ConstantRange arithmetic // because we could be called from within the ScalarEvolution overflow // checking code. MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); ConstantRange ZExtMaxBECountRange = MaxBECountRange.zextOrTrunc(BitWidth * 2 + 1); const SCEV *Start = AddRec->getStart(); const SCEV *Step = AddRec->getStepRecurrence(*this); ConstantRange StepSRange = getSignedRange(Step); ConstantRange SExtStepSRange = StepSRange.sextOrTrunc(BitWidth * 2 + 1); ConstantRange StartURange = getUnsignedRange(Start); ConstantRange EndURange = StartURange.add(MaxBECountRange.multiply(StepSRange)); // Check for unsigned overflow. ConstantRange ZExtStartURange = StartURange.zextOrTrunc(BitWidth * 2 + 1); ConstantRange ZExtEndURange = EndURange.zextOrTrunc(BitWidth * 2 + 1); if (ZExtStartURange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == ZExtEndURange) { APInt Min = APIntOps::umin(StartURange.getUnsignedMin(), EndURange.getUnsignedMin()); APInt Max = APIntOps::umax(StartURange.getUnsignedMax(), EndURange.getUnsignedMax()); bool IsFullRange = Min.isMinValue() && Max.isMaxValue(); if (!IsFullRange) ConservativeResult = ConservativeResult.intersectWith(ConstantRange(Min, Max + 1)); } ConstantRange StartSRange = getSignedRange(Start); ConstantRange EndSRange = StartSRange.add(MaxBECountRange.multiply(StepSRange)); // Check for signed overflow. This must be done with ConstantRange // arithmetic because we could be called from within the ScalarEvolution // overflow checking code. ConstantRange SExtStartSRange = StartSRange.sextOrTrunc(BitWidth * 2 + 1); ConstantRange SExtEndSRange = EndSRange.sextOrTrunc(BitWidth * 2 + 1); if (SExtStartSRange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == SExtEndSRange) { APInt Min = APIntOps::smin(StartSRange.getSignedMin(), EndSRange.getSignedMin()); APInt Max = APIntOps::smax(StartSRange.getSignedMax(), EndSRange.getSignedMax()); bool IsFullRange = Min.isMinSignedValue() && Max.isMaxSignedValue(); if (!IsFullRange) ConservativeResult = ConservativeResult.intersectWith(ConstantRange(Min, Max + 1)); } } } return setRange(AddRec, SignHint, ConservativeResult); } if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { // Check if the IR explicitly contains !range metadata. Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); if (MDRange.hasValue()) ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); // Split here to avoid paying the compile-time cost of calling both // computeKnownBits and ComputeNumSignBits. This restriction can be lifted // if needed. const DataLayout &DL = F->getParent()->getDataLayout(); if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { // For a SCEVUnknown, ask ValueTracking. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT); if (Ones != ~Zeros + 1) ConservativeResult = ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); } else { assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && "generalize as needed!"); unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT); if (NS > 1) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); } return setRange(U, SignHint, ConservativeResult); } return setRange(S, SignHint, ConservativeResult); } /// createSCEV - We know that there is no SCEV for the specified value. /// Analyze the expression. /// const SCEV *ScalarEvolution::createSCEV(Value *V) { if (!isSCEVable(V->getType())) return getUnknown(V); unsigned Opcode = Instruction::UserOp1; if (Instruction *I = dyn_cast<Instruction>(V)) { Opcode = I->getOpcode(); // Don't attempt to analyze instructions in blocks that aren't // reachable. Such instructions don't matter, and they aren't required // to obey basic rules for definitions dominating uses which this // analysis depends on. if (!DT->isReachableFromEntry(I->getParent())) return getUnknown(V); } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) Opcode = CE->getOpcode(); else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) return getConstant(CI); else if (isa<ConstantPointerNull>(V)) return getConstant(V->getType(), 0); else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee()); else return getUnknown(V); Operator *U = cast<Operator>(V); switch (Opcode) { case Instruction::Add: { // The simple thing to do would be to just call getSCEV on both operands // and call getAddExpr with the result. However if we're looking at a // bunch of things all added together, this can be quite inefficient, // because it leads to N-1 getAddExpr calls for N ultimate operands. // Instead, gather up all the operands and make a single getAddExpr call. // LLVM IR canonical form means we need only traverse the left operands. // // Don't apply this instruction's NSW or NUW flags to the new // expression. The instruction may be guarded by control flow that the // no-wrap behavior depends on. Non-control-equivalent instructions can be // mapped to the same SCEV expression, and it would be incorrect to transfer // NSW/NUW semantics to those operations. SmallVector<const SCEV *, 4> AddOps; AddOps.push_back(getSCEV(U->getOperand(1))); for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) { unsigned Opcode = Op->getValueID() - Value::InstructionVal; if (Opcode != Instruction::Add && Opcode != Instruction::Sub) break; U = cast<Operator>(Op); const SCEV *Op1 = getSCEV(U->getOperand(1)); if (Opcode == Instruction::Sub) AddOps.push_back(getNegativeSCEV(Op1)); else AddOps.push_back(Op1); } AddOps.push_back(getSCEV(U->getOperand(0))); return getAddExpr(AddOps); } case Instruction::Mul: { // Don't transfer NSW/NUW for the same reason as AddExpr. SmallVector<const SCEV *, 4> MulOps; MulOps.push_back(getSCEV(U->getOperand(1))); for (Value *Op = U->getOperand(0); Op->getValueID() == Instruction::Mul + Value::InstructionVal; Op = U->getOperand(0)) { U = cast<Operator>(Op); MulOps.push_back(getSCEV(U->getOperand(1))); } MulOps.push_back(getSCEV(U->getOperand(0))); return getMulExpr(MulOps); } case Instruction::UDiv: return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); case Instruction::Sub: return getMinusSCEV(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); case Instruction::And: // For an expression like x&255 that merely masks off the high bits, // use zext(trunc(x)) as the SCEV expression. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { if (CI->isNullValue()) return getSCEV(U->getOperand(1)); if (CI->isAllOnesValue()) return getSCEV(U->getOperand(0)); const APInt &A = CI->getValue(); // Instcombine's ShrinkDemandedConstant may strip bits out of // constants, obscuring what would otherwise be a low-bits mask. // Use computeKnownBits to compute what ShrinkDemandedConstant // knew about to reconstruct a low-bits mask value. unsigned LZ = A.countLeadingZeros(); unsigned TZ = A.countTrailingZeros(); unsigned BitWidth = A.getBitWidth(); APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); computeKnownBits(U->getOperand(0), KnownZero, KnownOne, F->getParent()->getDataLayout(), 0, AC, nullptr, DT); APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) { const SCEV *MulCount = getConstant( ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, TZ))); return getMulExpr( getZeroExtendExpr( getTruncateExpr( getUDivExactExpr(getSCEV(U->getOperand(0)), MulCount), IntegerType::get(getContext(), BitWidth - LZ - TZ)), U->getType()), MulCount); } } break; case Instruction::Or: // If the RHS of the Or is a constant, we may have something like: // X*4+1 which got turned into X*4|1. Handle this as an Add so loop // optimizations will transparently handle this case. // // In order for this transformation to be safe, the LHS must be of the // form X*(2^n) and the Or constant must be less than 2^n. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { const SCEV *LHS = getSCEV(U->getOperand(0)); const APInt &CIVal = CI->getValue(); if (GetMinTrailingZeros(LHS) >= (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { // Build a plain add SCEV. const SCEV *S = getAddExpr(LHS, getSCEV(CI)); // If the LHS of the add was an addrec and it has no-wrap flags, // transfer the no-wrap flags, since an or won't introduce a wrap. if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( OldAR->getNoWrapFlags()); } return S; } } break; case Instruction::Xor: if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { // If the RHS of the xor is a signbit, then this is just an add. // Instcombine turns add of signbit into xor as a strength reduction step. if (CI->getValue().isSignBit()) return getAddExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); // If the RHS of xor is -1, then this is a not operation. if (CI->isAllOnesValue()) return getNotSCEV(getSCEV(U->getOperand(0))); // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. // This is a variant of the check for xor with -1, and it handles // the case where instcombine has trimmed non-demanded bits out // of an xor with -1. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) if (BO->getOpcode() == Instruction::And && LCI->getValue() == CI->getValue()) if (const SCEVZeroExtendExpr *Z = dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { Type *UTy = U->getType(); const SCEV *Z0 = Z->getOperand(); Type *Z0Ty = Z0->getType(); unsigned Z0TySize = getTypeSizeInBits(Z0Ty); // If C is a low-bits mask, the zero extend is serving to // mask off the high bits. Complement the operand and // re-apply the zext. if (APIntOps::isMask(Z0TySize, CI->getValue())) return getZeroExtendExpr(getNotSCEV(Z0), UTy); // If C is a single bit, it may be in the sign-bit position // before the zero-extend. In this case, represent the xor // using an add, which is equivalent, and re-apply the zext. APInt Trunc = CI->getValue().trunc(Z0TySize); if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && Trunc.isSignBit()) return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), UTy); } } break; case Instruction::Shl: // Turn shift left of a constant amount into a multiply. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); // If the shift count is not less than the bitwidth, the result of // the shift is undefined. Don't try to analyze it, because the // resolution chosen here may differ from the resolution chosen in // other parts of the compiler. if (SA->getValue().uge(BitWidth)) break; Constant *X = ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); } break; case Instruction::LShr: // Turn logical shift right of a constant into a unsigned divide. if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); // If the shift count is not less than the bitwidth, the result of // the shift is undefined. Don't try to analyze it, because the // resolution chosen here may differ from the resolution chosen in // other parts of the compiler. if (SA->getValue().uge(BitWidth)) break; Constant *X = ConstantInt::get(getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); } break; case Instruction::AShr: // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) if (Operator *L = dyn_cast<Operator>(U->getOperand(0))) if (L->getOpcode() == Instruction::Shl && L->getOperand(1) == U->getOperand(1)) { uint64_t BitWidth = getTypeSizeInBits(U->getType()); // If the shift count is not less than the bitwidth, the result of // the shift is undefined. Don't try to analyze it, because the // resolution chosen here may differ from the resolution chosen in // other parts of the compiler. if (CI->getValue().uge(BitWidth)) break; uint64_t Amt = BitWidth - CI->getZExtValue(); if (Amt == BitWidth) return getSCEV(L->getOperand(0)); // shift by zero --> noop return getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), IntegerType::get(getContext(), Amt)), U->getType()); } break; case Instruction::Trunc: return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); case Instruction::ZExt: return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); case Instruction::SExt: return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); case Instruction::BitCast: // BitCasts are no-op casts so we just eliminate the cast. if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) return getSCEV(U->getOperand(0)); break; // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can // lead to pointer expressions which cannot safely be expanded to GEPs, // because ScalarEvolution doesn't respect the GEP aliasing rules when // simplifying integer expressions. case Instruction::GetElementPtr: return createNodeForGEP(cast<GEPOperator>(U)); case Instruction::PHI: return createNodeForPHI(cast<PHINode>(U)); case Instruction::Select: // This could be a smax or umax that was lowered earlier. // Try to recover it. if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { Value *LHS = ICI->getOperand(0); Value *RHS = ICI->getOperand(1); switch (ICI->getPredicate()) { case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: // a >s b ? a+x : b+x -> smax(a, b)+x // a >s b ? b+x : a+x -> smin(a, b)+x if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(U->getType())) { const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), U->getType()); const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), U->getType()); const SCEV *LA = getSCEV(U->getOperand(1)); const SCEV *RA = getSCEV(U->getOperand(2)); const SCEV *LDiff = getMinusSCEV(LA, LS); const SCEV *RDiff = getMinusSCEV(RA, RS); if (LDiff == RDiff) return getAddExpr(getSMaxExpr(LS, RS), LDiff); LDiff = getMinusSCEV(LA, RS); RDiff = getMinusSCEV(RA, LS); if (LDiff == RDiff) return getAddExpr(getSMinExpr(LS, RS), LDiff); } break; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: // a >u b ? a+x : b+x -> umax(a, b)+x // a >u b ? b+x : a+x -> umin(a, b)+x if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(U->getType())) { const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType()); const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), U->getType()); const SCEV *LA = getSCEV(U->getOperand(1)); const SCEV *RA = getSCEV(U->getOperand(2)); const SCEV *LDiff = getMinusSCEV(LA, LS); const SCEV *RDiff = getMinusSCEV(RA, RS); if (LDiff == RDiff) return getAddExpr(getUMaxExpr(LS, RS), LDiff); LDiff = getMinusSCEV(LA, RS); RDiff = getMinusSCEV(RA, LS); if (LDiff == RDiff) return getAddExpr(getUMinExpr(LS, RS), LDiff); } break; case ICmpInst::ICMP_NE: // n != 0 ? n+x : 1+x -> umax(n, 1)+x if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(U->getType()) && isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { const SCEV *One = getConstant(U->getType(), 1); const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType()); const SCEV *LA = getSCEV(U->getOperand(1)); const SCEV *RA = getSCEV(U->getOperand(2)); const SCEV *LDiff = getMinusSCEV(LA, LS); const SCEV *RDiff = getMinusSCEV(RA, One); if (LDiff == RDiff) return getAddExpr(getUMaxExpr(One, LS), LDiff); } break; case ICmpInst::ICMP_EQ: // n == 0 ? 1+x : n+x -> umax(n, 1)+x if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(U->getType()) && isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { const SCEV *One = getConstant(U->getType(), 1); const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), U->getType()); const SCEV *LA = getSCEV(U->getOperand(1)); const SCEV *RA = getSCEV(U->getOperand(2)); const SCEV *LDiff = getMinusSCEV(LA, One); const SCEV *RDiff = getMinusSCEV(RA, LS); if (LDiff == RDiff) return getAddExpr(getUMaxExpr(One, LS), LDiff); } break; default: break; } } break; default: // We cannot analyze this expression. break; } return getUnknown(V); } //===----------------------------------------------------------------------===// // Iteration Count Computation Code // unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L) { if (BasicBlock *ExitingBB = L->getExitingBlock()) return getSmallConstantTripCount(L, ExitingBB); // No trip count information for multiple exits. return 0; } /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a /// normal unsigned value. Returns 0 if the trip count is unknown or not /// constant. Will also return 0 if the maximum trip count is very large (>= /// 2^32). /// /// This "trip count" assumes that control exits via ExitingBlock. More /// precisely, it is the number of times that control may reach ExitingBlock /// before taking the branch. For loops with multiple exits, it may not be the /// number times that the loop header executes because the loop may exit /// prematurely via another branch. unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock) { assert(ExitingBlock && "Must pass a non-null exiting block!"); assert(L->isLoopExiting(ExitingBlock) && "Exiting block must actually branch out of the loop!"); const SCEVConstant *ExitCount = dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); if (!ExitCount) return 0; ConstantInt *ExitConst = ExitCount->getValue(); // Guard against huge trip counts. if (ExitConst->getValue().getActiveBits() > 32) return 0; // In case of integer overflow, this returns 0, which is correct. return ((unsigned)ExitConst->getZExtValue()) + 1; } unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L) { if (BasicBlock *ExitingBB = L->getExitingBlock()) return getSmallConstantTripMultiple(L, ExitingBB); // No trip multiple information for multiple exits. return 0; } /// getSmallConstantTripMultiple - Returns the largest constant divisor of the /// trip count of this loop as a normal unsigned value, if possible. This /// means that the actual trip count is always a multiple of the returned /// value (don't forget the trip count could very well be zero as well!). /// /// Returns 1 if the trip count is unknown or not guaranteed to be the /// multiple of a constant (which is also the case if the trip count is simply /// constant, use getSmallConstantTripCount for that case), Will also return 1 /// if the trip count is very large (>= 2^32). /// /// As explained in the comments for getSmallConstantTripCount, this assumes /// that control exits the loop via ExitingBlock. unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock) { assert(ExitingBlock && "Must pass a non-null exiting block!"); assert(L->isLoopExiting(ExitingBlock) && "Exiting block must actually branch out of the loop!"); const SCEV *ExitCount = getExitCount(L, ExitingBlock); if (ExitCount == getCouldNotCompute()) return 1; // Get the trip count from the BE count by adding 1. const SCEV *TCMul = getAddExpr(ExitCount, getConstant(ExitCount->getType(), 1)); // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt // to factor simple cases. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul)) TCMul = Mul->getOperand(0); const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul); if (!MulC) return 1; ConstantInt *Result = MulC->getValue(); // Guard against huge trip counts (this requires checking // for zero to handle the case where the trip count == -1 and the // addition wraps). if (!Result || Result->getValue().getActiveBits() > 32 || Result->getValue().getActiveBits() == 0) return 1; return (unsigned)Result->getZExtValue(); } // getExitCount - Get the expression for the number of loop iterations for which // this loop is guaranteed not to exit via ExitingBlock. Otherwise return // SCEVCouldNotCompute. const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) { return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); } /// getBackedgeTakenCount - If the specified loop has a predictable /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute /// object. The backedge-taken count is the number of times the loop header /// will be branched to from within the loop. This is one less than the /// trip count of the loop, since it doesn't count the first iteration, /// when the header is branched to from outside the loop. /// /// Note that it is not valid to call this method on a loop without a /// loop-invariant backedge-taken count (see /// hasLoopInvariantBackedgeTakenCount). /// const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { return getBackedgeTakenInfo(L).getExact(this); } /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except /// return the least SCEV value that is known never to be less than the /// actual backedge taken count. const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { return getBackedgeTakenInfo(L).getMax(this); } /// PushLoopPHIs - Push PHI nodes in the header of the given loop /// onto the given Worklist. static void PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { BasicBlock *Header = L->getHeader(); // Push all Loop-header PHIs onto the Worklist stack. for (BasicBlock::iterator I = Header->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) Worklist.push_back(PN); } const ScalarEvolution::BackedgeTakenInfo & ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { // Initially insert an invalid entry for this loop. If the insertion // succeeds, proceed to actually compute a backedge-taken count and // update the value. The temporary CouldNotCompute value tells SCEV // code elsewhere that it shouldn't attempt to request a new // backedge-taken count, which could result in infinite recursion. std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo())); if (!Pair.second) return Pair.first->second; // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result // must be cleared in this scope. BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L); if (Result.getExact(this) != getCouldNotCompute()) { assert(isLoopInvariant(Result.getExact(this), L) && isLoopInvariant(Result.getMax(this), L) && "Computed backedge-taken count isn't loop invariant for loop!"); ++NumTripCountsComputed; } else if (Result.getMax(this) == getCouldNotCompute() && isa<PHINode>(L->getHeader()->begin())) { // Only count loops that have phi nodes as not being computable. ++NumTripCountsNotComputed; } // Now that we know more about the trip count for this loop, forget any // existing SCEV values for PHI nodes in this loop since they are only // conservative estimates made without the benefit of trip count // information. This is similar to the code in forgetLoop, except that // it handles SCEVUnknown PHI nodes specially. if (Result.hasAnyInfo()) { SmallVector<Instruction *, 16> Worklist; PushLoopPHIs(L, Worklist); SmallPtrSet<Instruction *, 8> Visited; while (!Worklist.empty()) { Instruction *I = Worklist.pop_back_val(); if (!Visited.insert(I).second) continue; ValueExprMapType::iterator It = ValueExprMap.find_as(static_cast<Value *>(I)); if (It != ValueExprMap.end()) { const SCEV *Old = It->second; // SCEVUnknown for a PHI either means that it has an unrecognized // structure, or it's a PHI that's in the progress of being computed // by createNodeForPHI. In the former case, additional loop trip // count information isn't going to change anything. In the later // case, createNodeForPHI will perform the necessary updates on its // own when it gets to that point. if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { forgetMemoizedResults(Old); ValueExprMap.erase(It); } if (PHINode *PN = dyn_cast<PHINode>(I)) ConstantEvolutionLoopExitValue.erase(PN); } PushDefUseChildren(I, Worklist); } } // Re-lookup the insert position, since the call to // ComputeBackedgeTakenCount above could result in a // recusive call to getBackedgeTakenInfo (on a different // loop), which would invalidate the iterator computed // earlier. return BackedgeTakenCounts.find(L)->second = Result; } /// forgetLoop - This method should be called by the client when it has /// changed a loop in a way that may effect ScalarEvolution's ability to /// compute a trip count, or if the loop is deleted. void ScalarEvolution::forgetLoop(const Loop *L) { // Drop any stored trip count value. DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos = BackedgeTakenCounts.find(L); if (BTCPos != BackedgeTakenCounts.end()) { BTCPos->second.clear(); BackedgeTakenCounts.erase(BTCPos); } // Drop information about expressions based on loop-header PHIs. SmallVector<Instruction *, 16> Worklist; PushLoopPHIs(L, Worklist); SmallPtrSet<Instruction *, 8> Visited; while (!Worklist.empty()) { Instruction *I = Worklist.pop_back_val(); if (!Visited.insert(I).second) continue; ValueExprMapType::iterator It = ValueExprMap.find_as(static_cast<Value *>(I)); if (It != ValueExprMap.end()) { forgetMemoizedResults(It->second); ValueExprMap.erase(It); if (PHINode *PN = dyn_cast<PHINode>(I)) ConstantEvolutionLoopExitValue.erase(PN); } PushDefUseChildren(I, Worklist); } // Forget all contained loops too, to avoid dangling entries in the // ValuesAtScopes map. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) forgetLoop(*I); } /// forgetValue - This method should be called by the client when it has /// changed a value in a way that may effect its value, or which may /// disconnect it from a def-use chain linking it to a loop. void ScalarEvolution::forgetValue(Value *V) { Instruction *I = dyn_cast<Instruction>(V); if (!I) return; // Drop information about expressions based on loop-header PHIs. SmallVector<Instruction *, 16> Worklist; Worklist.push_back(I); SmallPtrSet<Instruction *, 8> Visited; while (!Worklist.empty()) { I = Worklist.pop_back_val(); if (!Visited.insert(I).second) continue; ValueExprMapType::iterator It = ValueExprMap.find_as(static_cast<Value *>(I)); if (It != ValueExprMap.end()) { forgetMemoizedResults(It->second); ValueExprMap.erase(It); if (PHINode *PN = dyn_cast<PHINode>(I)) ConstantEvolutionLoopExitValue.erase(PN); } PushDefUseChildren(I, Worklist); } } /// getExact - Get the exact loop backedge taken count considering all loop /// exits. A computable result can only be return for loops with a single exit. /// Returning the minimum taken count among all exits is incorrect because one /// of the loop's exit limit's may have been skipped. HowFarToZero assumes that /// the limit of each loop test is never skipped. This is a valid assumption as /// long as the loop exits via that test. For precise results, it is the /// caller's responsibility to specify the relevant loop exit using /// getExact(ExitingBlock, SE). const SCEV * ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const { // If any exits were not computable, the loop is not computable. if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute(); // We need exactly one computable exit. if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute(); assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info"); const SCEV *BECount = nullptr; for (const ExitNotTakenInfo *ENT = &ExitNotTaken; ENT != nullptr; ENT = ENT->getNextExit()) { assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); if (!BECount) BECount = ENT->ExactNotTaken; else if (BECount != ENT->ExactNotTaken) return SE->getCouldNotCompute(); } assert(BECount && "Invalid not taken count for loop exit"); return BECount; } /// getExact - Get the exact not taken count for this loop exit. const SCEV * ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, ScalarEvolution *SE) const { for (const ExitNotTakenInfo *ENT = &ExitNotTaken; ENT != nullptr; ENT = ENT->getNextExit()) { if (ENT->ExitingBlock == ExitingBlock) return ENT->ExactNotTaken; } return SE->getCouldNotCompute(); } /// getMax - Get the max backedge taken count for the loop. const SCEV * ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { return Max ? Max : SE->getCouldNotCompute(); } bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, ScalarEvolution *SE) const { if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S)) return true; if (!ExitNotTaken.ExitingBlock) return false; for (const ExitNotTakenInfo *ENT = &ExitNotTaken; ENT != nullptr; ENT = ENT->getNextExit()) { if (ENT->ExactNotTaken != SE->getCouldNotCompute() && SE->hasOperand(ENT->ExactNotTaken, S)) { return true; } } return false; } /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each /// computable exit into a persistent ExitNotTakenInfo array. ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts, bool Complete, const SCEV *MaxCount) : Max(MaxCount) { if (!Complete) ExitNotTaken.setIncomplete(); unsigned NumExits = ExitCounts.size(); if (NumExits == 0) return; ExitNotTaken.ExitingBlock = ExitCounts[0].first; ExitNotTaken.ExactNotTaken = ExitCounts[0].second; if (NumExits == 1) return; // Handle the rare case of multiple computable exits. ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1]; ExitNotTakenInfo *PrevENT = &ExitNotTaken; for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) { PrevENT->setNextExit(ENT); ENT->ExitingBlock = ExitCounts[i].first; ENT->ExactNotTaken = ExitCounts[i].second; } } /// clear - Invalidate this result and free the ExitNotTakenInfo array. void ScalarEvolution::BackedgeTakenInfo::clear() { ExitNotTaken.ExitingBlock = nullptr; ExitNotTaken.ExactNotTaken = nullptr; delete[] ExitNotTaken.getNextExit(); } /// ComputeBackedgeTakenCount - Compute the number of times the backedge /// of the specified loop will execute. ScalarEvolution::BackedgeTakenInfo ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { SmallVector<BasicBlock *, 8> ExitingBlocks; L->getExitingBlocks(ExitingBlocks); SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts; bool CouldComputeBECount = true; BasicBlock *Latch = L->getLoopLatch(); // may be NULL. const SCEV *MustExitMaxBECount = nullptr; const SCEV *MayExitMaxBECount = nullptr; // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts // and compute maxBECount. for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { BasicBlock *ExitBB = ExitingBlocks[i]; ExitLimit EL = ComputeExitLimit(L, ExitBB); // 1. For each exit that can be computed, add an entry to ExitCounts. // CouldComputeBECount is true only if all exits can be computed. if (EL.Exact == getCouldNotCompute()) // We couldn't compute an exact value for this exit, so // we won't be able to compute an exact value for the loop. CouldComputeBECount = false; else ExitCounts.push_back(std::make_pair(ExitBB, EL.Exact)); // 2. Derive the loop's MaxBECount from each exit's max number of // non-exiting iterations. Partition the loop exits into two kinds: // LoopMustExits and LoopMayExits. // // If the exit dominates the loop latch, it is a LoopMustExit otherwise it // is a LoopMayExit. If any computable LoopMustExit is found, then // MaxBECount is the minimum EL.Max of computable LoopMustExits. Otherwise, // MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is // considered greater than any computable EL.Max. if (EL.Max != getCouldNotCompute() && Latch && DT->dominates(ExitBB, Latch)) { if (!MustExitMaxBECount) MustExitMaxBECount = EL.Max; else { MustExitMaxBECount = getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max); } } else if (MayExitMaxBECount != getCouldNotCompute()) { if (!MayExitMaxBECount || EL.Max == getCouldNotCompute()) MayExitMaxBECount = EL.Max; else { MayExitMaxBECount = getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max); } } } const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount); } /// ComputeExitLimit - Compute the number of times the backedge of the specified /// loop will execute if it exits via the specified block. ScalarEvolution::ExitLimit ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) { // Okay, we've chosen an exiting block. See what condition causes us to // exit at this block and remember the exit block and whether all other targets // lead to the loop header. bool MustExecuteLoopHeader = true; BasicBlock *Exit = nullptr; for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock); SI != SE; ++SI) if (!L->contains(*SI)) { if (Exit) // Multiple exit successors. return getCouldNotCompute(); Exit = *SI; } else if (*SI != L->getHeader()) { MustExecuteLoopHeader = false; } // At this point, we know we have a conditional branch that determines whether // the loop is exited. However, we don't know if the branch is executed each // time through the loop. If not, then the execution count of the branch will // not be equal to the trip count of the loop. // // Currently we check for this by checking to see if the Exit branch goes to // the loop header. If so, we know it will always execute the same number of // times as the loop. We also handle the case where the exit block *is* the // loop header. This is common for un-rotated loops. // // If both of those tests fail, walk up the unique predecessor chain to the // header, stopping if there is an edge that doesn't exit the loop. If the // header is reached, the execution count of the branch will be equal to the // trip count of the loop. // // More extensive analysis could be done to handle more cases here. // if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { // The simple checks failed, try climbing the unique predecessor chain // up to the header. bool Ok = false; for (BasicBlock *BB = ExitingBlock; BB; ) { BasicBlock *Pred = BB->getUniquePredecessor(); if (!Pred) return getCouldNotCompute(); TerminatorInst *PredTerm = Pred->getTerminator(); for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { BasicBlock *PredSucc = PredTerm->getSuccessor(i); if (PredSucc == BB) continue; // If the predecessor has a successor that isn't BB and isn't // outside the loop, assume the worst. if (L->contains(PredSucc)) return getCouldNotCompute(); } if (Pred == L->getHeader()) { Ok = true; break; } BB = Pred; } if (!Ok) return getCouldNotCompute(); } bool IsOnlyExit = (L->getExitingBlock() != nullptr); TerminatorInst *Term = ExitingBlock->getTerminator(); if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { assert(BI->isConditional() && "If unconditional, it can't be in loop!"); // Proceed to the next level to examine the exit condition expression. return ComputeExitLimitFromCond(L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), /*ControlsExit=*/IsOnlyExit); } if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) return ComputeExitLimitFromSingleExitSwitch(L, SI, Exit, /*ControlsExit=*/IsOnlyExit); return getCouldNotCompute(); } /// ComputeExitLimitFromCond - Compute the number of times the /// backedge of the specified loop will execute if its exit condition /// were a conditional branch of ExitCond, TBB, and FBB. /// /// @param ControlsExit is true if ExitCond directly controls the exit /// branch. In this case, we can assume that the loop exits only if the /// condition is true and can infer that failing to meet the condition prior to /// integer wraparound results in undefined behavior. ScalarEvolution::ExitLimit ScalarEvolution::ComputeExitLimitFromCond(const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB, bool ControlsExit) { // Check if the controlling expression for this loop is an And or Or. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { if (BO->getOpcode() == Instruction::And) { // Recurse on the operands of the and. bool EitherMayExit = L->contains(TBB); ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit); ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit); const SCEV *BECount = getCouldNotCompute(); const SCEV *MaxBECount = getCouldNotCompute(); if (EitherMayExit) { // Both conditions must be true for the loop to continue executing. // Choose the less conservative count. if (EL0.Exact == getCouldNotCompute() || EL1.Exact == getCouldNotCompute()) BECount = getCouldNotCompute(); else BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); if (EL0.Max == getCouldNotCompute()) MaxBECount = EL1.Max; else if (EL1.Max == getCouldNotCompute()) MaxBECount = EL0.Max; else MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); } else { // Both conditions must be true at the same time for the loop to exit. // For now, be conservative. assert(L->contains(FBB) && "Loop block has no successor in loop!"); if (EL0.Max == EL1.Max) MaxBECount = EL0.Max; if (EL0.Exact == EL1.Exact) BECount = EL0.Exact; } return ExitLimit(BECount, MaxBECount); } if (BO->getOpcode() == Instruction::Or) { // Recurse on the operands of the or. bool EitherMayExit = L->contains(FBB); ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit); ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit); const SCEV *BECount = getCouldNotCompute(); const SCEV *MaxBECount = getCouldNotCompute(); if (EitherMayExit) { // Both conditions must be false for the loop to continue executing. // Choose the less conservative count. if (EL0.Exact == getCouldNotCompute() || EL1.Exact == getCouldNotCompute()) BECount = getCouldNotCompute(); else BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); if (EL0.Max == getCouldNotCompute()) MaxBECount = EL1.Max; else if (EL1.Max == getCouldNotCompute()) MaxBECount = EL0.Max; else MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); } else { // Both conditions must be false at the same time for the loop to exit. // For now, be conservative. assert(L->contains(TBB) && "Loop block has no successor in loop!"); if (EL0.Max == EL1.Max) MaxBECount = EL0.Max; if (EL0.Exact == EL1.Exact) BECount = EL0.Exact; } return ExitLimit(BECount, MaxBECount); } } // With an icmp, it may be feasible to compute an exact backedge-taken count. // Proceed to the next level to examine the icmp. if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); // Check for a constant condition. These are normally stripped out by // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to // preserve the CFG and is temporarily leaving constant conditions // in place. if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { if (L->contains(FBB) == !CI->getZExtValue()) // The backedge is always taken. return getCouldNotCompute(); else // The backedge is never taken. return getConstant(CI->getType(), 0); } // If it's not an integer or pointer comparison then compute it the hard way. return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); } /// ComputeExitLimitFromICmp - Compute the number of times the /// backedge of the specified loop will execute if its exit condition /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. ScalarEvolution::ExitLimit ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L, ICmpInst *ExitCond, BasicBlock *TBB, BasicBlock *FBB, bool ControlsExit) { // If the condition was exit on true, convert the condition to exit on false ICmpInst::Predicate Cond; if (!L->contains(FBB)) Cond = ExitCond->getPredicate(); else Cond = ExitCond->getInversePredicate(); // Handle common loops like: for (X = "string"; *X; ++X) if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { ExitLimit ItCnt = ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond); if (ItCnt.hasAnyInfo()) return ItCnt; } const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); // Try to evaluate any dependencies out of the loop. LHS = getSCEVAtScope(LHS, L); RHS = getSCEVAtScope(RHS, L); // At this point, we would like to compute how many iterations of the // loop the predicate will return true for these inputs. if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { // If there is a loop-invariant, force it into the RHS. std::swap(LHS, RHS); Cond = ICmpInst::getSwappedPredicate(Cond); } // Simplify the operands before analyzing them. (void)SimplifyICmpOperands(Cond, LHS, RHS); // If we have a comparison of a chrec against a constant, try to use value // ranges to answer this query. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) if (AddRec->getLoop() == L) { // Form the constant range. ConstantRange CompRange( ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; } // HLSL Change - begin // Try to compute the value exhaustively *right now*. Before trying the more pessimistic // partial evaluation. const SCEV *AggresiveResult = ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); if (AggresiveResult != getCouldNotCompute()) return AggresiveResult; // HLSL Change - end switch (Cond) { case ICmpInst::ICMP_NE: { // while (X != Y) // Convert to: while (X-Y != 0) ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); if (EL.hasAnyInfo()) return EL; break; } case ICmpInst::ICMP_EQ: { // while (X == Y) // Convert to: while (X-Y == 0) ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); if (EL.hasAnyInfo()) return EL; break; } case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_ULT: { // while (X < Y) bool IsSigned = Cond == ICmpInst::ICMP_SLT; ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, ControlsExit); if (EL.hasAnyInfo()) return EL; break; } case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_UGT: { // while (X > Y) bool IsSigned = Cond == ICmpInst::ICMP_SGT; ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit); if (EL.hasAnyInfo()) return EL; break; } default: #if 0 dbgs() << "ComputeBackedgeTakenCount "; if (ExitCond->getOperand(0)->getType()->isUnsigned()) dbgs() << "[unsigned] "; dbgs() << *LHS << " " << Instruction::getOpcodeName(Instruction::ICmp) << " " << *RHS << "\n"; #endif break; } // return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); // HLSL Change return getCouldNotCompute(); // HLSL Change - We already tried the exhaustive approach earlier, so don't try again and just give up. } ScalarEvolution::ExitLimit ScalarEvolution::ComputeExitLimitFromSingleExitSwitch(const Loop *L, SwitchInst *Switch, BasicBlock *ExitingBlock, bool ControlsExit) { assert(!L->contains(ExitingBlock) && "Not an exiting block!"); // Give up if the exit is the default dest of a switch. if (Switch->getDefaultDest() == ExitingBlock) return getCouldNotCompute(); assert(L->contains(Switch->getDefaultDest()) && "Default case must not exit the loop!"); const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); // while (X != Y) --> while (X-Y != 0) ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); if (EL.hasAnyInfo()) return EL; return getCouldNotCompute(); } static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE) { const SCEV *InVal = SE.getConstant(C); const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); assert(isa<SCEVConstant>(Val) && "Evaluation of SCEV at constant didn't fold correctly?"); return cast<SCEVConstant>(Val)->getValue(); } /// ComputeLoadConstantCompareExitLimit - Given an exit condition of /// 'icmp op load X, cst', try to see if we can compute the backedge /// execution count. ScalarEvolution::ExitLimit ScalarEvolution::ComputeLoadConstantCompareExitLimit( LoadInst *LI, Constant *RHS, const Loop *L, ICmpInst::Predicate predicate) { if (LI->isVolatile()) return getCouldNotCompute(); // Check to see if the loaded pointer is a getelementptr of a global. // TODO: Use SCEV instead of manually grubbing with GEPs. GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); if (!GEP) return getCouldNotCompute(); // Make sure that it is really a constant global we are gepping, with an // initializer, and make sure the first IDX is really 0. GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || !cast<Constant>(GEP->getOperand(1))->isNullValue()) return getCouldNotCompute(); // Okay, we allow one non-constant index into the GEP instruction. Value *VarIdx = nullptr; std::vector<Constant*> Indexes; unsigned VarIdxNum = 0; for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { Indexes.push_back(CI); } else if (!isa<ConstantInt>(GEP->getOperand(i))) { if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. VarIdx = GEP->getOperand(i); VarIdxNum = i-2; Indexes.push_back(nullptr); } // Loop-invariant loads may be a byproduct of loop optimization. Skip them. if (!VarIdx) return getCouldNotCompute(); // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. // Check to see if X is a loop variant variable value now. const SCEV *Idx = getSCEV(VarIdx); Idx = getSCEVAtScope(Idx, L); // We can only recognize very limited forms of loop index expressions, in // particular, only affine AddRec's like {C1,+,C2}. const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || !isa<SCEVConstant>(IdxExpr->getOperand(0)) || !isa<SCEVConstant>(IdxExpr->getOperand(1))) return getCouldNotCompute(); unsigned MaxSteps = MaxBruteForceIterations; for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { ConstantInt *ItCst = ConstantInt::get( cast<IntegerType>(IdxExpr->getType()), IterationNum); ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); // Form the GEP offset. Indexes[VarIdxNum] = Val; Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), Indexes); if (!Result) break; // Cannot compute! // Evaluate the condition for this iteration. Result = ConstantExpr::getICmp(predicate, Result, RHS); if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure if (cast<ConstantInt>(Result)->getValue().isMinValue()) { #if 0 dbgs() << "\n***\n*** Computed loop count " << *ItCst << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() << "***\n"; #endif ++NumArrayLenItCounts; return getConstant(ItCst); // Found terminating iteration! } } return getCouldNotCompute(); } /// CanConstantFold - Return true if we can constant fold an instruction of the /// specified type, assuming that all operands were constants. static bool CanConstantFold(const Instruction *I) { if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || isa<LoadInst>(I)) return true; if (const CallInst *CI = dyn_cast<CallInst>(I)) if (const Function *F = CI->getCalledFunction()) return canConstantFoldCallTo(F); return false; } /// Determine whether this instruction can constant evolve within this loop /// assuming its operands can all constant evolve. static bool canConstantEvolve(Instruction *I, const Loop *L) { // An instruction outside of the loop can't be derived from a loop PHI. if (!L->contains(I)) return false; if (isa<PHINode>(I)) { // We don't currently keep track of the control flow needed to evaluate // PHIs, so we cannot handle PHIs inside of loops. return L->getHeader() == I->getParent(); } // If we won't be able to constant fold this expression even if the operands // are constants, bail early. return CanConstantFold(I); } /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by /// recursing through each instruction operand until reaching a loop header phi. static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DxilValueCache *DVC, // HLSL Change DenseMap<Instruction *, PHINode *> &PHIMap) { // Otherwise, we can evaluate this instruction if all of its operands are // constant or derived from a PHI node themselves. PHINode *PHI = nullptr; for (Instruction::op_iterator OpI = UseInst->op_begin(), OpE = UseInst->op_end(); OpI != OpE; ++OpI) { if (isa<Constant>(*OpI)) continue; // HLSL Change begin if (DVC->GetConstValue(*OpI)) continue; // HLSL Change end Instruction *OpInst = dyn_cast<Instruction>(*OpI); if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; PHINode *P = dyn_cast<PHINode>(OpInst); if (!P) // If this operand is already visited, reuse the prior result. // We may have P != PHI if this is the deepest point at which the // inconsistent paths meet. P = PHIMap.lookup(OpInst); if (!P) { // Recurse and memoize the results, whether a phi is found or not. // This recursive call invalidates pointers into PHIMap. //P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap); // HLSL Change P = getConstantEvolvingPHIOperands(OpInst, L, DVC, PHIMap); // HLSL Change - Pass DVC PHIMap[OpInst] = P; } if (!P) return nullptr; // Not evolving from PHI if (PHI && PHI != P) return nullptr; // Evolving from multiple different PHIs. PHI = P; } // This is a expression evolving from a constant PHI! return PHI; } /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node /// in the loop that V is derived from. We allow arbitrary operations along the /// way, but the operands of an operation must either be constants or a value /// derived from a constant PHI. If this expression does not fit with these /// constraints, return null. // static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { // HLSL Change static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L, DxilValueCache *DVC) { // HLSL Change Instruction *I = dyn_cast<Instruction>(V); if (!I || !canConstantEvolve(I, L)) return nullptr; if (PHINode *PN = dyn_cast<PHINode>(I)) { return PN; } // Record non-constant instructions contained by the loop. DenseMap<Instruction *, PHINode *> PHIMap; // return getConstantEvolvingPHIOperands(I, L, PHIMap); // HLSL Change return getConstantEvolvingPHIOperands(I, L, DVC, PHIMap); // HLSL Change } /// EvaluateExpression - Given an expression that passes the /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node /// in the loop has the value PHIVal. If we can't fold this expression for some /// reason, return null. static Constant *EvaluateExpression(Value *V, const Loop *L, DenseMap<Instruction *, Constant *> &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI) { // Convenient constant check, but redundant for recursive calls. if (Constant *C = dyn_cast<Constant>(V)) return C; Instruction *I = dyn_cast<Instruction>(V); if (!I) return nullptr; if (Constant *C = Vals.lookup(I)) return C; // An instruction inside the loop depends on a value outside the loop that we // weren't given a mapping for, or a value such as a call inside the loop. if (!canConstantEvolve(I, L)) return nullptr; // An unmapped PHI can be due to a branch or another loop inside this loop, // or due to this not being the initial iteration through a loop where we // couldn't compute the evolution of this particular PHI last time. if (isa<PHINode>(I)) return nullptr; std::vector<Constant*> Operands(I->getNumOperands()); for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); if (!Operand) { Operands[i] = dyn_cast<Constant>(I->getOperand(i)); if (!Operands[i]) return nullptr; continue; } Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); Vals[Operand] = C; if (!C) return nullptr; Operands[i] = C; } if (CmpInst *CI = dyn_cast<CmpInst>(I)) return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], Operands[1], DL, TLI); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (!LI->isVolatile()) return ConstantFoldLoadFromConstPtr(Operands[0], DL); } return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL, TLI); } /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is /// in the header of its containing loop, we know the loop executes a /// constant number of times, and the PHI node is just a recurrence /// involving constants, fold it. Constant * ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, const APInt &BEs, const Loop *L) { DenseMap<PHINode*, Constant*>::const_iterator I = ConstantEvolutionLoopExitValue.find(PN); if (I != ConstantEvolutionLoopExitValue.end()) return I->second; if (BEs.ugt(MaxBruteForceIterations)) return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; DenseMap<Instruction *, Constant *> CurrentIterVals; BasicBlock *Header = L->getHeader(); assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); // Since the loop is canonicalized, the PHI node must have two entries. One // entry must be a constant (coming in from outside of the loop), and the // second must be derived from the same PHI. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); PHINode *PHI = nullptr; for (BasicBlock::iterator I = Header->begin(); (PHI = dyn_cast<PHINode>(I)); ++I) { Constant *StartCST = dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge)); if (!StartCST) continue; CurrentIterVals[PHI] = StartCST; } if (!CurrentIterVals.count(PN)) return RetVal = nullptr; Value *BEValue = PN->getIncomingValue(SecondIsBackedge); // Execute the loop symbolically to determine the exit value. if (BEs.getActiveBits() >= 32) return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it! unsigned NumIterations = BEs.getZExtValue(); // must be in range unsigned IterationNum = 0; const DataLayout &DL = F->getParent()->getDataLayout(); for (; ; ++IterationNum) { if (IterationNum == NumIterations) return RetVal = CurrentIterVals[PN]; // Got exit value! // Compute the value of the PHIs for the next iteration. // EvaluateExpression adds non-phi values to the CurrentIterVals map. DenseMap<Instruction *, Constant *> NextIterVals; Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); if (!NextPHI) return nullptr; // Couldn't evaluate! NextIterVals[PN] = NextPHI; bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; // Also evaluate the other PHI nodes. However, we don't get to stop if we // cease to be able to evaluate one of them or if they stop evolving, // because that doesn't necessarily prevent us from computing PN. SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; for (DenseMap<Instruction *, Constant *>::const_iterator I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){ PHINode *PHI = dyn_cast<PHINode>(I->first); if (!PHI || PHI == PN || PHI->getParent() != Header) continue; PHIsToCompute.push_back(std::make_pair(PHI, I->second)); } // We use two distinct loops because EvaluateExpression may invalidate any // iterators into CurrentIterVals. for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) { PHINode *PHI = I->first; Constant *&NextPHI = NextIterVals[PHI]; if (!NextPHI) { // Not already computed. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); } if (NextPHI != I->second) StoppedEvolving = false; } // If all entries in CurrentIterVals == NextIterVals then we can stop // iterating, the loop can't continue to change. if (StoppedEvolving) return RetVal = CurrentIterVals[PN]; CurrentIterVals.swap(NextIterVals); } } /// ComputeExitCountExhaustively - If the loop is known to execute a /// constant number of times (the condition evolves only from constants), /// try to evaluate a few iterations of the loop until we get the exit /// condition gets a value of ExitWhen (true or false). If we cannot /// evaluate the trip count of the loop, return getCouldNotCompute(). const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen) { // PHINode *PN = getConstantEvolvingPHI(Cond, L); // HLSL Change PHINode *PN = getConstantEvolvingPHI(Cond, L, &getAnalysis<DxilValueCache>()); // HLSL Change if (!PN) return getCouldNotCompute(); // If the loop is canonicalized, the PHI will have exactly two entries. // That's the only form we support here. if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); DenseMap<Instruction *, Constant *> CurrentIterVals; BasicBlock *Header = L->getHeader(); assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); // One entry must be a constant (coming in from outside of the loop), and the // second must be derived from the same PHI. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); PHINode *PHI = nullptr; for (BasicBlock::iterator I = Header->begin(); (PHI = dyn_cast<PHINode>(I)); ++I) { Constant *StartCST = dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge)); // HLSL Change begin // If we don't have a constant, try getting a constant from the value cache. if (!StartCST) if (Constant *C = getAnalysis<DxilValueCache>().GetConstValue(PHI->getIncomingValue(!SecondIsBackedge))) StartCST = C; // HLSL Change end if (!StartCST) continue; CurrentIterVals[PHI] = StartCST; } if (!CurrentIterVals.count(PN)) return getCouldNotCompute(); // HLSL Change begin SmallVector<std::pair<Instruction *, Constant *>, 4> KnownInvariantOps; if (Instruction *CondI = dyn_cast<Instruction>(Cond)) { SmallVector<Instruction *, 4> Worklist; DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); Worklist.push_back(CondI); while (Worklist.size()) { Instruction *I = Worklist.pop_back_val(); if (Constant *C = DVC->GetConstValue(I)) { KnownInvariantOps.push_back({ I, C }); } else if (CurrentIterVals.count(I)) { continue; } else if (L->contains(I)) { for (Use &U : I->operands()) { if (Instruction *OpI = dyn_cast<Instruction>(U.get())) { Worklist.push_back(OpI); } } } } } // HLSL Change end // Okay, we find a PHI node that defines the trip count of this loop. Execute // the loop symbolically to determine when the condition gets a value of // "ExitWhen". unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. const DataLayout &DL = F->getParent()->getDataLayout(); for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ // HLSL Change begin for (std::pair<Instruction *, Constant *> &Pair : KnownInvariantOps) CurrentIterVals[Pair.first] = Pair.second; // HLSL Change end ConstantInt *CondVal = dyn_cast_or_null<ConstantInt>( EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI)); // Couldn't symbolically evaluate. if (!CondVal) return getCouldNotCompute(); if (CondVal->getValue() == uint64_t(ExitWhen)) { ++NumBruteForceTripCountsComputed; return getConstant(Type::getInt32Ty(getContext()), IterationNum); } // Update all the PHI nodes for the next iteration. DenseMap<Instruction *, Constant *> NextIterVals; // Create a list of which PHIs we need to compute. We want to do this before // calling EvaluateExpression on them because that may invalidate iterators // into CurrentIterVals. SmallVector<PHINode *, 8> PHIsToCompute; for (DenseMap<Instruction *, Constant *>::const_iterator I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){ PHINode *PHI = dyn_cast<PHINode>(I->first); if (!PHI || PHI->getParent() != Header) continue; PHIsToCompute.push_back(PHI); } for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) { PHINode *PHI = *I; Constant *&NextPHI = NextIterVals[PHI]; if (NextPHI) continue; // Already computed! Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); } CurrentIterVals.swap(NextIterVals); } // Too many iterations were needed to evaluate. return getCouldNotCompute(); } /// getSCEVAtScope - Return a SCEV expression for the specified value /// at the specified scope in the program. The L value specifies a loop /// nest to evaluate the expression at, where null is the top-level or a /// specified loop is immediately inside of the loop. /// /// This method can be used to compute the exit value for a variable defined /// in a loop by querying what the value will hold in the parent loop. /// /// In the case that a relevant loop exit value cannot be computed, the /// original value V is returned. const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { // Check to see if we've folded this expression at this loop before. SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V]; for (unsigned u = 0; u < Values.size(); u++) { if (Values[u].first == L) return Values[u].second ? Values[u].second : V; } Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr))); // Otherwise compute it. const SCEV *C = computeSCEVAtScope(V, L); SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V]; for (unsigned u = Values2.size(); u > 0; u--) { if (Values2[u - 1].first == L) { Values2[u - 1].second = C; break; } } return C; } /// This builds up a Constant using the ConstantExpr interface. That way, we /// will return Constants for objects which aren't represented by a /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. /// Returns NULL if the SCEV isn't representable as a Constant. static Constant *BuildConstantFromSCEV(const SCEV *V) { switch (static_cast<SCEVTypes>(V->getSCEVType())) { case scCouldNotCompute: case scAddRecExpr: break; case scConstant: return cast<SCEVConstant>(V)->getValue(); case scUnknown: return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); case scSignExtend: { const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) return ConstantExpr::getSExt(CastOp, SS->getType()); break; } case scZeroExtend: { const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) return ConstantExpr::getZExt(CastOp, SZ->getType()); break; } case scTruncate: { const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) return ConstantExpr::getTrunc(CastOp, ST->getType()); break; } case scAddExpr: { const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { unsigned AS = PTy->getAddressSpace(); Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); C = ConstantExpr::getBitCast(C, DestPtrTy); } for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); if (!C2) return nullptr; // First pointer! if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { unsigned AS = C2->getType()->getPointerAddressSpace(); std::swap(C, C2); Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); // The offsets have been converted to bytes. We can add bytes to an // i8* by GEP with the byte count in the first index. C = ConstantExpr::getBitCast(C, DestPtrTy); } // Don't bother trying to sum two pointers. We probably can't // statically compute a load that results from it anyway. if (C2->getType()->isPointerTy()) return nullptr; if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { if (PTy->getElementType()->isStructTy()) C2 = ConstantExpr::getIntegerCast( C2, Type::getInt32Ty(C->getContext()), true); C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); } else C = ConstantExpr::getAdd(C, C2); } return C; } break; } case scMulExpr: { const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { // Don't bother with pointers at all. if (C->getType()->isPointerTy()) return nullptr; for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); if (!C2 || C2->getType()->isPointerTy()) return nullptr; C = ConstantExpr::getMul(C, C2); } return C; } break; } case scUDivExpr: { const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) if (LHS->getType() == RHS->getType()) return ConstantExpr::getUDiv(LHS, RHS); break; } case scSMaxExpr: case scUMaxExpr: break; // TODO: smax, umax. } return nullptr; } const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { if (isa<SCEVConstant>(V)) return V; // If this instruction is evolved from a constant-evolving PHI, compute the // exit value from the loop without using SCEVs. if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { const Loop *LI = (*this->LI)[I->getParent()]; if (LI && LI->getParentLoop() == L) // Looking for loop exit value. if (PHINode *PN = dyn_cast<PHINode>(I)) if (PN->getParent() == LI->getHeader()) { // Okay, there is no closed form solution for the PHI node. Check // to see if the loop that contains it has a known backedge-taken // count. If so, we may be able to force computation of the exit // value. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); if (const SCEVConstant *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { // Okay, we know how many times the containing loop executes. If // this is a constant evolving PHI node, get the final value at // the specified iteration number. Constant *RV = getConstantEvolutionLoopExitValue(PN, BTCC->getValue()->getValue(), LI); if (RV) return getSCEV(RV); } } // Okay, this is an expression that we cannot symbolically evaluate // into a SCEV. Check to see if it's possible to symbolically evaluate // the arguments into constants, and if so, try to constant propagate the // result. This is particularly useful for computing loop exit values. if (CanConstantFold(I)) { SmallVector<Constant *, 4> Operands; bool MadeImprovement = false; for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { Value *Op = I->getOperand(i); if (Constant *C = dyn_cast<Constant>(Op)) { Operands.push_back(C); continue; } // If any of the operands is non-constant and if they are // non-integer and non-pointer, don't even try to analyze them // with scev techniques. if (!isSCEVable(Op->getType())) return V; const SCEV *OrigV = getSCEV(Op); const SCEV *OpV = getSCEVAtScope(OrigV, L); MadeImprovement |= OrigV != OpV; Constant *C = BuildConstantFromSCEV(OpV); if (!C) return V; if (C->getType() != Op->getType()) C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, Op->getType(), false), C, Op->getType()); Operands.push_back(C); } // Check to see if getSCEVAtScope actually made an improvement. if (MadeImprovement) { Constant *C = nullptr; const DataLayout &DL = F->getParent()->getDataLayout(); if (const CmpInst *CI = dyn_cast<CmpInst>(I)) C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], Operands[1], DL, TLI); else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { if (!LI->isVolatile()) C = ConstantFoldLoadFromConstPtr(Operands[0], DL); } else C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL, TLI); if (!C) return V; return getSCEV(C); } } } // This is some other type of SCEVUnknown, just return it. return V; } if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); if (OpAtScope != Comm->getOperand(i)) { // Okay, at least one of these operands is loop variant but might be // foldable. Build a new instance of the folded commutative expression. SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), Comm->op_begin()+i); NewOps.push_back(OpAtScope); for (++i; i != e; ++i) { OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); NewOps.push_back(OpAtScope); } if (isa<SCEVAddExpr>(Comm)) return getAddExpr(NewOps); if (isa<SCEVMulExpr>(Comm)) return getMulExpr(NewOps); if (isa<SCEVSMaxExpr>(Comm)) return getSMaxExpr(NewOps); if (isa<SCEVUMaxExpr>(Comm)) return getUMaxExpr(NewOps); llvm_unreachable("Unknown commutative SCEV type!"); } } // If we got here, all operands are loop invariant. return Comm; } if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); if (LHS == Div->getLHS() && RHS == Div->getRHS()) return Div; // must be loop invariant return getUDivExpr(LHS, RHS); } // If this is a loop recurrence for a loop that does not contain L, then we // are dealing with the final value computed by the loop. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { // First, attempt to evaluate each operand. // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); if (OpAtScope == AddRec->getOperand(i)) continue; // Okay, at least one of these operands is loop variant but might be // foldable. Build a new instance of the folded commutative expression. SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), AddRec->op_begin()+i); NewOps.push_back(OpAtScope); for (++i; i != e; ++i) NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); const SCEV *FoldedRec = getAddRecExpr(NewOps, AddRec->getLoop(), AddRec->getNoWrapFlags(SCEV::FlagNW)); AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); // The addrec may be folded to a nonrecurrence, for example, if the // induction variable is multiplied by zero after constant folding. Go // ahead and return the folded value. if (!AddRec) return FoldedRec; break; } // If the scope is outside the addrec's loop, evaluate it by using the // loop exit value of the addrec. if (!AddRec->getLoop()->contains(L)) { // To evaluate this recurrence, we need to know how many times the AddRec // loop iterates. Compute this now. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; // Then, evaluate the AddRec. return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); } return AddRec; } if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getZeroExtendExpr(Op, Cast->getType()); } if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getSignExtendExpr(Op, Cast->getType()); } if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getTruncateExpr(Op, Cast->getType()); } llvm_unreachable("Unknown SCEV type!"); } /// getSCEVAtScope - This is a convenience function which does /// getSCEVAtScope(getSCEV(V), L). const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { return getSCEVAtScope(getSCEV(V), L); } /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the /// following equation: /// /// A * X = B (mod N) /// /// where N = 2^BW and BW is the common bit width of A and B. The signedness of /// A and B isn't important. /// /// If the equation does not have a solution, SCEVCouldNotCompute is returned. static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, ScalarEvolution &SE) { uint32_t BW = A.getBitWidth(); assert(BW == B.getBitWidth() && "Bit widths must be the same."); assert(A != 0 && "A must be non-zero."); // 1. D = gcd(A, N) // // The gcd of A and N may have only one prime factor: 2. The number of // trailing zeros in A is its multiplicity uint32_t Mult2 = A.countTrailingZeros(); // D = 2^Mult2 // 2. Check if B is divisible by D. // // B is divisible by D if and only if the multiplicity of prime factor 2 for B // is not less than multiplicity of this prime factor for D. if (B.countTrailingZeros() < Mult2) return SE.getCouldNotCompute(); // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic // modulo (N / D). // // (N / D) may need BW+1 bits in its representation. Hence, we'll use this // bit width during computations. APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D APInt Mod(BW + 1, 0); Mod.setBit(BW - Mult2); // Mod = N / D APInt I = AD.multiplicativeInverse(Mod); // 4. Compute the minimum unsigned root of the equation: // I * (B / D) mod (N / D) APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); // The result is guaranteed to be less than 2^BW so we may truncate it to BW // bits. return SE.getConstant(Result.trunc(BW)); } /// SolveQuadraticEquation - Find the roots of the quadratic equation for the /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which /// might be the same) or two SCEVCouldNotCompute objects. /// static std::pair<const SCEV *,const SCEV *> SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); // We currently can only solve this if the coefficients are constants. if (!LC || !MC || !NC) { const SCEV *CNC = SE.getCouldNotCompute(); return std::make_pair(CNC, CNC); } uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); const APInt &L = LC->getValue()->getValue(); const APInt &M = MC->getValue()->getValue(); const APInt &N = NC->getValue()->getValue(); APInt Two(BitWidth, 2); APInt Four(BitWidth, 4); { using namespace APIntOps; const APInt& C = L; // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C // The B coefficient is M-N/2 APInt B(M); B -= sdiv(N,Two); // The A coefficient is N/2 APInt A(N.sdiv(Two)); // Compute the B^2-4ac term. APInt SqrtTerm(B); SqrtTerm *= B; SqrtTerm -= Four * (A * C); if (SqrtTerm.isNegative()) { // The loop is provably infinite. const SCEV *CNC = SE.getCouldNotCompute(); return std::make_pair(CNC, CNC); } // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest // integer value or else APInt::sqrt() will assert. APInt SqrtVal(SqrtTerm.sqrt()); // Compute the two solutions for the quadratic formula. // The divisions must be performed as signed divisions. APInt NegB(-B); APInt TwoA(A << 1); if (TwoA.isMinValue()) { const SCEV *CNC = SE.getCouldNotCompute(); return std::make_pair(CNC, CNC); } LLVMContext &Context = SE.getContext(); ConstantInt *Solution1 = ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); ConstantInt *Solution2 = ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); return std::make_pair(SE.getConstant(Solution1), SE.getConstant(Solution2)); } // end APIntOps namespace } /// HowFarToZero - Return the number of times a backedge comparing the specified /// value to zero will execute. If not computable, return CouldNotCompute. /// /// This is only used for loops with a "x != y" exit test. The exit condition is /// now expressed as a single expression, V = x-y. So the exit test is /// effectively V != 0. We know and take advantage of the fact that this /// expression only being used in a comparison by zero context. ScalarEvolution::ExitLimit ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool ControlsExit) { // If the value is a constant if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { // If the value is already zero, the branch will execute zero times. if (C->getValue()->isZero()) return C; return getCouldNotCompute(); // Otherwise it will loop infinitely. } const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); if (!AddRec || AddRec->getLoop() != L) return getCouldNotCompute(); // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of // the quadratic equation to solve it. if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec, *this); const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); if (R1 && R2) { #if 0 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1 << " sol#2: " << *R2 << "\n"; #endif // Pick the smallest positive root value. if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { if (!CB->getZExtValue()) std::swap(R1, R2); // R1 is the minimum root now. // We can only use this value if the chrec ends up with an exact zero // value at this index. When solving for "X*X != 5", for example, we // should not accept a root of 2. const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); if (Val->isZero()) return R1; // We found a quadratic root! } } return getCouldNotCompute(); } // Otherwise we can only handle this if it is affine. if (!AddRec->isAffine()) return getCouldNotCompute(); // If this is an affine expression, the execution count of this branch is // the minimum unsigned root of the following equation: // // Start + Step*N = 0 (mod 2^BW) // // equivalent to: // // Step*N = -Start (mod 2^BW) // // where BW is the common bit width of Start and Step. // Get the initial value for the loop. const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); // For now we handle only constant steps. // // TODO: Handle a nonconstant Step given AddRec<NUW>. If the // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. // We have not yet seen any such cases. const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); if (!StepC || StepC->getValue()->equalsInt(0)) return getCouldNotCompute(); // For positive steps (counting up until unsigned overflow): // N = -Start/Step (as unsigned) // For negative steps (counting down to zero): // N = Start/-Step // First compute the unsigned distance from zero in the direction of Step. bool CountDown = StepC->getValue()->getValue().isNegative(); const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); // Handle unitary steps, which cannot wraparound. // 1*N = -Start; -1*N = Start (mod 2^BW), so: // N = Distance (as unsigned) if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) { ConstantRange CR = getUnsignedRange(Start); const SCEV *MaxBECount; if (!CountDown && CR.getUnsignedMin().isMinValue()) // When counting up, the worst starting value is 1, not 0. MaxBECount = CR.getUnsignedMax().isMinValue() ? getConstant(APInt::getMinValue(CR.getBitWidth())) : getConstant(APInt::getMaxValue(CR.getBitWidth())); else MaxBECount = getConstant(CountDown ? CR.getUnsignedMax() : -CR.getUnsignedMin()); return ExitLimit(Distance, MaxBECount); } // As a special case, handle the instance where Step is a positive power of // two. In this case, determining whether Step divides Distance evenly can be // done by counting and comparing the number of trailing zeros of Step and // Distance. if (!CountDown) { const APInt &StepV = StepC->getValue()->getValue(); // StepV.isPowerOf2() returns true if StepV is an positive power of two. It // also returns true if StepV is maximally negative (eg, INT_MIN), but that // case is not handled as this code is guarded by !CountDown. if (StepV.isPowerOf2() && GetMinTrailingZeros(Distance) >= StepV.countTrailingZeros()) return getUDivExactExpr(Distance, Step); } // If the condition controls loop exit (the loop exits only if the expression // is true) and the addition is no-wrap we can use unsigned divide to // compute the backedge count. In this case, the step may not divide the // distance, but we don't care because if the condition is "missed" the loop // will have undefined behavior due to wrapping. if (ControlsExit && AddRec->getNoWrapFlags(SCEV::FlagNW)) { const SCEV *Exact = getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); return ExitLimit(Exact, Exact); } // Then, try to solve the above equation provided that Start is constant. if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), -StartC->getValue()->getValue(), *this); return getCouldNotCompute(); } /// HowFarToNonZero - Return the number of times a backedge checking the /// specified value for nonzero will execute. If not computable, return /// CouldNotCompute ScalarEvolution::ExitLimit ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { // Loops that look like: while (X == 0) are very strange indeed. We don't // handle them yet except for the trivial case. This could be expanded in the // future as needed. // If the value is a constant, check to see if it is known to be non-zero // already. If so, the backedge will execute zero times. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { if (!C->getValue()->isNullValue()) return getConstant(C->getType(), 0); return getCouldNotCompute(); // Otherwise it will loop infinitely. } // We could implement others, but I really doubt anyone writes loops like // this, and if they did, they would already be constant folded. return getCouldNotCompute(); } /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB /// (which may not be an immediate predecessor) which has exactly one /// successor from which BB is reachable, or null if no such block is /// found. /// std::pair<BasicBlock *, BasicBlock *> ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { // If the block has a unique predecessor, then there is no path from the // predecessor to the block that does not go through the direct edge // from the predecessor to the block. if (BasicBlock *Pred = BB->getSinglePredecessor()) return std::make_pair(Pred, BB); // A loop's header is defined to be a block that dominates the loop. // If the header has a unique predecessor outside the loop, it must be // a block that has exactly one successor that can reach the loop. if (Loop *L = LI->getLoopFor(BB)) return std::make_pair(L->getLoopPredecessor(), L->getHeader()); return std::pair<BasicBlock *, BasicBlock *>(); } /// HasSameValue - SCEV structural equivalence is usually sufficient for /// testing whether two expressions are equal, however for the purposes of /// looking for a condition guarding a loop, it can be useful to be a little /// more general, since a front-end may have replicated the controlling /// expression. /// static bool HasSameValue(const SCEV *A, const SCEV *B) { // Quick check to see if they are the same SCEV. if (A == B) return true; // Otherwise, if they're both SCEVUnknown, it's possible that they hold // two different instructions with the same value. Check for this case. if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory()) return true; // Otherwise assume they may have a different value. return false; } /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with /// predicate Pred. Return true iff any changes were made. /// bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth) { bool Changed = false; // If we hit the max recursion limit bail out. if (Depth >= 3) return false; // Canonicalize a constant to the right side. if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { // Check for both operands constant. if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { if (ConstantExpr::getICmp(Pred, LHSC->getValue(), RHSC->getValue())->isNullValue()) goto trivially_false; else goto trivially_true; } // Otherwise swap the operands to put the constant on the right. std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); Changed = true; } // If we're comparing an addrec with a value which is loop-invariant in the // addrec's loop, put the addrec on the left. Also make a dominance check, // as both operands could be addrecs loop-invariant in each other's loop. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { const Loop *L = AR->getLoop(); if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); Changed = true; } } // If there's a constant operand, canonicalize comparisons with boundary // cases, and canonicalize *-or-equal comparisons to regular comparisons. if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { const APInt &RA = RC->getValue()->getValue(); switch (Pred) { default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_NE: // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. if (!RA) if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0))) if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) { RHS = AE->getOperand(1); LHS = ME->getOperand(1); Changed = true; } break; case ICmpInst::ICMP_UGE: if ((RA - 1).isMinValue()) { Pred = ICmpInst::ICMP_NE; RHS = getConstant(RA - 1); Changed = true; break; } if (RA.isMaxValue()) { Pred = ICmpInst::ICMP_EQ; Changed = true; break; } if (RA.isMinValue()) goto trivially_true; Pred = ICmpInst::ICMP_UGT; RHS = getConstant(RA - 1); Changed = true; break; case ICmpInst::ICMP_ULE: if ((RA + 1).isMaxValue()) { Pred = ICmpInst::ICMP_NE; RHS = getConstant(RA + 1); Changed = true; break; } if (RA.isMinValue()) { Pred = ICmpInst::ICMP_EQ; Changed = true; break; } if (RA.isMaxValue()) goto trivially_true; Pred = ICmpInst::ICMP_ULT; RHS = getConstant(RA + 1); Changed = true; break; case ICmpInst::ICMP_SGE: if ((RA - 1).isMinSignedValue()) { Pred = ICmpInst::ICMP_NE; RHS = getConstant(RA - 1); Changed = true; break; } if (RA.isMaxSignedValue()) { Pred = ICmpInst::ICMP_EQ; Changed = true; break; } if (RA.isMinSignedValue()) goto trivially_true; Pred = ICmpInst::ICMP_SGT; RHS = getConstant(RA - 1); Changed = true; break; case ICmpInst::ICMP_SLE: if ((RA + 1).isMaxSignedValue()) { Pred = ICmpInst::ICMP_NE; RHS = getConstant(RA + 1); Changed = true; break; } if (RA.isMinSignedValue()) { Pred = ICmpInst::ICMP_EQ; Changed = true; break; } if (RA.isMaxSignedValue()) goto trivially_true; Pred = ICmpInst::ICMP_SLT; RHS = getConstant(RA + 1); Changed = true; break; case ICmpInst::ICMP_UGT: if (RA.isMinValue()) { Pred = ICmpInst::ICMP_NE; Changed = true; break; } if ((RA + 1).isMaxValue()) { Pred = ICmpInst::ICMP_EQ; RHS = getConstant(RA + 1); Changed = true; break; } if (RA.isMaxValue()) goto trivially_false; break; case ICmpInst::ICMP_ULT: if (RA.isMaxValue()) { Pred = ICmpInst::ICMP_NE; Changed = true; break; } if ((RA - 1).isMinValue()) { Pred = ICmpInst::ICMP_EQ; RHS = getConstant(RA - 1); Changed = true; break; } if (RA.isMinValue()) goto trivially_false; break; case ICmpInst::ICMP_SGT: if (RA.isMinSignedValue()) { Pred = ICmpInst::ICMP_NE; Changed = true; break; } if ((RA + 1).isMaxSignedValue()) { Pred = ICmpInst::ICMP_EQ; RHS = getConstant(RA + 1); Changed = true; break; } if (RA.isMaxSignedValue()) goto trivially_false; break; case ICmpInst::ICMP_SLT: if (RA.isMaxSignedValue()) { Pred = ICmpInst::ICMP_NE; Changed = true; break; } if ((RA - 1).isMinSignedValue()) { Pred = ICmpInst::ICMP_EQ; RHS = getConstant(RA - 1); Changed = true; break; } if (RA.isMinSignedValue()) goto trivially_false; break; } } // Check for obvious equality. if (HasSameValue(LHS, RHS)) { if (ICmpInst::isTrueWhenEqual(Pred)) goto trivially_true; if (ICmpInst::isFalseWhenEqual(Pred)) goto trivially_false; } // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by // adding or subtracting 1 from one of the operands. switch (Pred) { case ICmpInst::ICMP_SLE: if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SLT; Changed = true; } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SLT; Changed = true; } break; case ICmpInst::ICMP_SGE: if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SGT; Changed = true; } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, SCEV::FlagNSW); Pred = ICmpInst::ICMP_SGT; Changed = true; } break; case ICmpInst::ICMP_ULE: if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, SCEV::FlagNUW); Pred = ICmpInst::ICMP_ULT; Changed = true; } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, SCEV::FlagNUW); Pred = ICmpInst::ICMP_ULT; Changed = true; } break; case ICmpInst::ICMP_UGE: if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, SCEV::FlagNUW); Pred = ICmpInst::ICMP_UGT; Changed = true; } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, SCEV::FlagNUW); Pred = ICmpInst::ICMP_UGT; Changed = true; } break; default: break; } // TODO: More simplifications are possible here. // Recursively simplify until we either hit a recursion limit or nothing // changes. if (Changed) return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); return Changed; trivially_true: // Return 0 == 0. LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); Pred = ICmpInst::ICMP_EQ; return true; trivially_false: // Return 0 != 0. LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); Pred = ICmpInst::ICMP_NE; return true; } bool ScalarEvolution::isKnownNegative(const SCEV *S) { return getSignedRange(S).getSignedMax().isNegative(); } bool ScalarEvolution::isKnownPositive(const SCEV *S) { return getSignedRange(S).getSignedMin().isStrictlyPositive(); } bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { return !getSignedRange(S).getSignedMin().isNegative(); } bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { return !getSignedRange(S).getSignedMax().isStrictlyPositive(); } bool ScalarEvolution::isKnownNonZero(const SCEV *S) { return isKnownNegative(S) || isKnownPositive(S); } bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Canonicalize the inputs first. (void)SimplifyICmpOperands(Pred, LHS, RHS); // If LHS or RHS is an addrec, check to see if the condition is true in // every iteration of the loop. // If LHS and RHS are both addrec, both conditions must be true in // every iteration of the loop. const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); bool LeftGuarded = false; bool RightGuarded = false; if (LAR) { const Loop *L = LAR->getLoop(); if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { if (!RAR) return true; LeftGuarded = true; } } if (RAR) { const Loop *L = RAR->getLoop(); if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { if (!LAR) return true; RightGuarded = true; } } if (LeftGuarded && RightGuarded) return true; // Otherwise see what can be done with known constant ranges. return isKnownPredicateWithRanges(Pred, LHS, RHS); } bool ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { if (HasSameValue(LHS, RHS)) return ICmpInst::isTrueWhenEqual(Pred); // This code is split out from isKnownPredicate because it is called from // within isLoopEntryGuardedByCond. switch (Pred) { default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); case ICmpInst::ICMP_SGT: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_SLT: { ConstantRange LHSRange = getSignedRange(LHS); ConstantRange RHSRange = getSignedRange(RHS); if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin())) return true; if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax())) return false; break; } case ICmpInst::ICMP_SGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_SLE: { ConstantRange LHSRange = getSignedRange(LHS); ConstantRange RHSRange = getSignedRange(RHS); if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin())) return true; if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax())) return false; break; } case ICmpInst::ICMP_UGT: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_ULT: { ConstantRange LHSRange = getUnsignedRange(LHS); ConstantRange RHSRange = getUnsignedRange(RHS); if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin())) return true; if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax())) return false; break; } case ICmpInst::ICMP_UGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_ULE: { ConstantRange LHSRange = getUnsignedRange(LHS); ConstantRange RHSRange = getUnsignedRange(RHS); if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin())) return true; if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax())) return false; break; } case ICmpInst::ICMP_NE: { if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet()) return true; if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet()) return true; const SCEV *Diff = getMinusSCEV(LHS, RHS); if (isKnownNonZero(Diff)) return true; break; } case ICmpInst::ICMP_EQ: // The check at the top of the function catches the case where // the values are known to be equal. break; } return false; } /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is /// protected by a conditional between LHS and RHS. This is used to /// to eliminate casts. bool ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Interpret a null as meaning no loop, where there is obviously no guard // (interprocedural conditions notwithstanding). if (!L) return true; if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true; BasicBlock *Latch = L->getLoopLatch(); if (!Latch) return false; BranchInst *LoopContinuePredicate = dyn_cast<BranchInst>(Latch->getTerminator()); if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->getCondition(), LoopContinuePredicate->getSuccessor(0) != L->getHeader())) return true; // Check conditions due to any @llvm.assume intrinsics. for (auto &AssumeVH : AC->assumptions()) { if (!AssumeVH) continue; auto *CI = cast<CallInst>(AssumeVH); if (!DT->dominates(CI, Latch->getTerminator())) continue; if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) return true; } struct ClearWalkingBEDominatingCondsOnExit { ScalarEvolution &SE; explicit ClearWalkingBEDominatingCondsOnExit(ScalarEvolution &SE) : SE(SE){}; ~ClearWalkingBEDominatingCondsOnExit() { SE.WalkingBEDominatingConds = false; } }; // We don't want more than one activation of the following loop on the stack // -- that can lead to O(n!) time complexity. if (WalkingBEDominatingConds) return false; WalkingBEDominatingConds = true; ClearWalkingBEDominatingCondsOnExit ClearOnExit(*this); // If the loop is not reachable from the entry block, we risk running into an // infinite loop as we walk up into the dom tree. These loops do not matter // anyway, so we just return a conservative answer when we see them. if (!DT->isReachableFromEntry(L->getHeader())) return false; for (DomTreeNode *DTN = (*DT)[Latch], *HeaderDTN = (*DT)[L->getHeader()]; DTN != HeaderDTN; DTN = DTN->getIDom()) { assert(DTN && "should reach the loop header before reaching the root!"); BasicBlock *BB = DTN->getBlock(); BasicBlock *PBB = BB->getSinglePredecessor(); if (!PBB) continue; BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); if (!ContinuePredicate || !ContinuePredicate->isConditional()) continue; Value *Condition = ContinuePredicate->getCondition(); // If we have an edge `E` within the loop body that dominates the only // latch, the condition guarding `E` also guards the backedge. This // reasoning works only for loops with a single latch. BasicBlockEdge DominatingEdge(PBB, BB); if (DominatingEdge.isSingleEdge()) { // We're constructively (and conservatively) enumerating edges within the // loop body that dominate the latch. The dominator tree better agree // with us on this: assert(DT->dominates(DominatingEdge, Latch) && "should be!"); if (isImpliedCond(Pred, LHS, RHS, Condition, BB != ContinuePredicate->getSuccessor(0))) return true; } } return false; } /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected /// by a conditional between LHS and RHS. This is used to help avoid max /// expressions in loop trip counts, and to eliminate casts. bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { // Interpret a null as meaning no loop, where there is obviously no guard // (interprocedural conditions notwithstanding). if (!L) return false; if (isKnownPredicateWithRanges(Pred, LHS, RHS)) return true; // Starting at the loop predecessor, climb up the predecessor chain, as long // as there are predecessors that can be found that have unique successors // leading to the original header. for (std::pair<BasicBlock *, BasicBlock *> Pair(L->getLoopPredecessor(), L->getHeader()); Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { BranchInst *LoopEntryPredicate = dyn_cast<BranchInst>(Pair.first->getTerminator()); if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) continue; if (isImpliedCond(Pred, LHS, RHS, LoopEntryPredicate->getCondition(), LoopEntryPredicate->getSuccessor(0) != Pair.second)) return true; } // Check conditions due to any @llvm.assume intrinsics. for (auto &AssumeVH : AC->assumptions()) { if (!AssumeVH) continue; auto *CI = cast<CallInst>(AssumeVH); if (!DT->dominates(CI, L->getHeader())) continue; if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) return true; } return false; } /// RAII wrapper to prevent recursive application of isImpliedCond. /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are /// currently evaluating isImpliedCond. struct MarkPendingLoopPredicate { Value *Cond; DenseSet<Value*> &LoopPreds; bool Pending; MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP) : Cond(C), LoopPreds(LP) { Pending = !LoopPreds.insert(Cond).second; } ~MarkPendingLoopPredicate() { if (!Pending) LoopPreds.erase(Cond); } }; /// isImpliedCond - Test whether the condition described by Pred, LHS, /// and RHS is true whenever the given Cond value evaluates to true. bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, Value *FoundCondValue, bool Inverse) { MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates); if (Mark.Pending) return false; // Recursively handle And and Or conditions. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { if (BO->getOpcode() == Instruction::And) { if (!Inverse) return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); } else if (BO->getOpcode() == Instruction::Or) { if (Inverse) return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); } } ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); if (!ICI) return false; // Now that we found a conditional branch that dominates the loop or controls // the loop latch. Check to see if it is the comparison we are looking for. ICmpInst::Predicate FoundPred; if (Inverse) FoundPred = ICI->getInversePredicate(); else FoundPred = ICI->getPredicate(); const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); // Balance the types. if (getTypeSizeInBits(LHS->getType()) < getTypeSizeInBits(FoundLHS->getType())) { if (CmpInst::isSigned(Pred)) { LHS = getSignExtendExpr(LHS, FoundLHS->getType()); RHS = getSignExtendExpr(RHS, FoundLHS->getType()); } else { LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); } } else if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(FoundLHS->getType())) { if (CmpInst::isSigned(FoundPred)) { FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); } else { FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); } } // Canonicalize the query to match the way instcombine will have // canonicalized the comparison. if (SimplifyICmpOperands(Pred, LHS, RHS)) if (LHS == RHS) return CmpInst::isTrueWhenEqual(Pred); if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) if (FoundLHS == FoundRHS) return CmpInst::isFalseWhenEqual(FoundPred); // Check to see if we can make the LHS or RHS match. if (LHS == FoundRHS || RHS == FoundLHS) { if (isa<SCEVConstant>(RHS)) { std::swap(FoundLHS, FoundRHS); FoundPred = ICmpInst::getSwappedPredicate(FoundPred); } else { std::swap(LHS, RHS); Pred = ICmpInst::getSwappedPredicate(Pred); } } // Check whether the found predicate is the same as the desired predicate. if (FoundPred == Pred) return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); // Check whether swapping the found predicate makes it the same as the // desired predicate. if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { if (isa<SCEVConstant>(RHS)) return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); else return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS, LHS, FoundLHS, FoundRHS); } // Check if we can make progress by sharpening ranges. if (FoundPred == ICmpInst::ICMP_NE && (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { const SCEVConstant *C = nullptr; const SCEV *V = nullptr; if (isa<SCEVConstant>(FoundLHS)) { C = cast<SCEVConstant>(FoundLHS); V = FoundRHS; } else { C = cast<SCEVConstant>(FoundRHS); V = FoundLHS; } // The guarding predicate tells us that C != V. If the known range // of V is [C, t), we can sharpen the range to [C + 1, t). The // range we consider has to correspond to same signedness as the // predicate we're interested in folding. APInt Min = ICmpInst::isSigned(Pred) ? getSignedRange(V).getSignedMin() : getUnsignedRange(V).getUnsignedMin(); if (Min == C->getValue()->getValue()) { // Given (V >= Min && V != Min) we conclude V >= (Min + 1). // This is true even if (Min + 1) wraps around -- in case of // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). APInt SharperMin = Min + 1; switch (Pred) { case ICmpInst::ICMP_SGE: case ICmpInst::ICMP_UGE: // We know V `Pred` SharperMin. If this implies LHS `Pred` // RHS, we're done. if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin))) return true; LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_UGT: // We know from the range information that (V `Pred` Min || // V == Min). We know from the guarding condition that !(V // == Min). This gives us // // V `Pred` Min || V == Min && !(V == Min) // => V `Pred` Min // // If V `Pred` Min implies LHS `Pred` RHS, we're done. if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) return true; break; default: // No change break; } } } // Check whether the actual condition is beyond sufficient. if (FoundPred == ICmpInst::ICMP_EQ) if (ICmpInst::isTrueWhenEqual(Pred)) if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) return true; if (Pred == ICmpInst::ICMP_NE) if (!ICmpInst::isTrueWhenEqual(FoundPred)) if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) return true; // Otherwise assume the worst. return false; } /// isImpliedCondOperands - Test whether the condition described by Pred, /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS, /// and FoundRHS is true. bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS) { if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) return true; return isImpliedCondOperandsHelper(Pred, LHS, RHS, FoundLHS, FoundRHS) || // ~x < ~y --> x > y isImpliedCondOperandsHelper(Pred, LHS, RHS, getNotSCEV(FoundRHS), getNotSCEV(FoundLHS)); } /// If Expr computes ~A, return A else return nullptr static const SCEV *MatchNotExpr(const SCEV *Expr) { const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); if (!Add || Add->getNumOperands() != 2) return nullptr; const SCEVConstant *AddLHS = dyn_cast<SCEVConstant>(Add->getOperand(0)); if (!(AddLHS && AddLHS->getValue()->getValue().isAllOnesValue())) return nullptr; const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); if (!AddRHS || AddRHS->getNumOperands() != 2) return nullptr; const SCEVConstant *MulLHS = dyn_cast<SCEVConstant>(AddRHS->getOperand(0)); if (!(MulLHS && MulLHS->getValue()->getValue().isAllOnesValue())) return nullptr; return AddRHS->getOperand(1); } /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? template<typename MaxExprType> static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, const SCEV *Candidate) { const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); if (!MaxExpr) return false; auto It = std::find(MaxExpr->op_begin(), MaxExpr->op_end(), Candidate); return It != MaxExpr->op_end(); } /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? template<typename MaxExprType> static bool IsMinConsistingOf(ScalarEvolution &SE, const SCEV *MaybeMinExpr, const SCEV *Candidate) { const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); if (!MaybeMaxExpr) return false; return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); } /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max /// expression? static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { switch (Pred) { default: return false; case ICmpInst::ICMP_SGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_SLE: return // min(A, ...) <= A IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || // A <= max(A, ...) IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); case ICmpInst::ICMP_UGE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_ULE: return // min(A, ...) <= A IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || // A <= max(A, ...) IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); } llvm_unreachable("covered switch fell through?!"); } /// isImpliedCondOperandsHelper - Test whether the condition described by /// Pred, LHS, and RHS is true whenever the condition described by Pred, /// FoundLHS, and FoundRHS is true. bool ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS) { auto IsKnownPredicateFull = [this](ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { return isKnownPredicateWithRanges(Pred, LHS, RHS) || IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS); }; switch (Pred) { default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_NE: if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: if (IsKnownPredicateFull(ICmpInst::ICMP_SLE, LHS, FoundLHS) && IsKnownPredicateFull(ICmpInst::ICMP_SGE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGE: if (IsKnownPredicateFull(ICmpInst::ICMP_SGE, LHS, FoundLHS) && IsKnownPredicateFull(ICmpInst::ICMP_SLE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: if (IsKnownPredicateFull(ICmpInst::ICMP_ULE, LHS, FoundLHS) && IsKnownPredicateFull(ICmpInst::ICMP_UGE, RHS, FoundRHS)) return true; break; case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGE: if (IsKnownPredicateFull(ICmpInst::ICMP_UGE, LHS, FoundLHS) && IsKnownPredicateFull(ICmpInst::ICMP_ULE, RHS, FoundRHS)) return true; break; } return false; } /// isImpliedCondOperandsViaRanges - helper function for isImpliedCondOperands. /// Tries to get cases like "X `sgt` 0 => X - 1 `sgt` -1". bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const SCEV *FoundLHS, const SCEV *FoundRHS) { if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) // The restriction on `FoundRHS` be lifted easily -- it exists only to // reduce the compile time impact of this optimization. return false; const SCEVAddExpr *AddLHS = dyn_cast<SCEVAddExpr>(LHS); if (!AddLHS || AddLHS->getOperand(1) != FoundLHS || !isa<SCEVConstant>(AddLHS->getOperand(0))) return false; APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getValue()->getValue(); // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the // antecedent "`FoundLHS` `Pred` `FoundRHS`". ConstantRange FoundLHSRange = ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); // Since `LHS` is `FoundLHS` + `AddLHS->getOperand(0)`, we can compute a range // for `LHS`: APInt Addend = cast<SCEVConstant>(AddLHS->getOperand(0))->getValue()->getValue(); ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(Addend)); // We can also compute the range of values for `LHS` that satisfy the // consequent, "`LHS` `Pred` `RHS`": APInt ConstRHS = cast<SCEVConstant>(RHS)->getValue()->getValue(); ConstantRange SatisfyingLHSRange = ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); // The antecedent implies the consequent if every value of `LHS` that // satisfies the antecedent also satisfies the consequent. return SatisfyingLHSRange.contains(LHSRange); } // Verify if an linear IV with positive stride can overflow when in a // less-than comparison, knowing the invariant term of the comparison, the // stride and the knowledge of NSW/NUW flags on the recurrence. bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned, bool NoWrap) { if (NoWrap) return false; unsigned BitWidth = getTypeSizeInBits(RHS->getType()); const SCEV *One = getConstant(Stride->getType(), 1); if (IsSigned) { APInt MaxRHS = getSignedRange(RHS).getSignedMax(); APInt MaxValue = APInt::getSignedMaxValue(BitWidth); APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) .getSignedMax(); // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! return (MaxValue - MaxStrideMinusOne).slt(MaxRHS); } APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax(); APInt MaxValue = APInt::getMaxValue(BitWidth); APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) .getUnsignedMax(); // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! return (MaxValue - MaxStrideMinusOne).ult(MaxRHS); } // Verify if an linear IV with negative stride can overflow when in a // greater-than comparison, knowing the invariant term of the comparison, // the stride and the knowledge of NSW/NUW flags on the recurrence. bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned, bool NoWrap) { if (NoWrap) return false; unsigned BitWidth = getTypeSizeInBits(RHS->getType()); const SCEV *One = getConstant(Stride->getType(), 1); if (IsSigned) { APInt MinRHS = getSignedRange(RHS).getSignedMin(); APInt MinValue = APInt::getSignedMinValue(BitWidth); APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) .getSignedMax(); // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! return (MinValue + MaxStrideMinusOne).sgt(MinRHS); } APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin(); APInt MinValue = APInt::getMinValue(BitWidth); APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) .getUnsignedMax(); // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! return (MinValue + MaxStrideMinusOne).ugt(MinRHS); } // Compute the backedge taken count knowing the interval difference, the // stride and presence of the equality in the comparison. const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, bool Equality) { const SCEV *One = getConstant(Step->getType(), 1); Delta = Equality ? getAddExpr(Delta, Step) : getAddExpr(Delta, getMinusSCEV(Step, One)); return getUDivExpr(Delta, Step); } /// HowManyLessThans - Return the number of times a backedge containing the /// specified less-than comparison will execute. If not computable, return /// CouldNotCompute. /// /// @param ControlsExit is true when the LHS < RHS condition directly controls /// the branch (loops exits only if condition is true). In this case, we can use /// NoWrapFlags to skip overflow checks. ScalarEvolution::ExitLimit ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L, bool IsSigned, bool ControlsExit) { // We handle only IV < Invariant if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); // Avoid weird loops if (!IV || IV->getLoop() != L || !IV->isAffine()) return getCouldNotCompute(); bool NoWrap = ControlsExit && IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); const SCEV *Stride = IV->getStepRecurrence(*this); // Avoid negative or zero stride values if (!isKnownPositive(Stride)) return getCouldNotCompute(); // Avoid proven overflow cases: this will ensure that the backedge taken count // will not generate any unsigned overflow. Relaxed no-overflow conditions // exploit NoWrapFlags, allowing to optimize in presence of undefined // behaviors like the case of C language. if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) return getCouldNotCompute(); ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; const SCEV *Start = IV->getStart(); const SCEV *End = RHS; if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) { const SCEV *Diff = getMinusSCEV(RHS, Start); // If we have NoWrap set, then we can assume that the increment won't // overflow, in which case if RHS - Start is a constant, we don't need to // do a max operation since we can just figure it out statically if (NoWrap && isa<SCEVConstant>(Diff)) { APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue(); if (D.isNegative()) End = Start; } else End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); } const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin() : getUnsignedRange(Start).getUnsignedMin(); APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin() : getUnsignedRange(Stride).getUnsignedMin(); unsigned BitWidth = getTypeSizeInBits(LHS->getType()); APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1) : APInt::getMaxValue(BitWidth) - (MinStride - 1); // Although End can be a MAX expression we estimate MaxEnd considering only // the case End = RHS. This is safe because in the other case (End - Start) // is zero, leading to a zero maximum backedge taken count. APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit) : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit); const SCEV *MaxBECount; if (isa<SCEVConstant>(BECount)) MaxBECount = BECount; else MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), getConstant(MinStride), false); if (isa<SCEVCouldNotCompute>(MaxBECount)) MaxBECount = BECount; return ExitLimit(BECount, MaxBECount); } ScalarEvolution::ExitLimit ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS, const Loop *L, bool IsSigned, bool ControlsExit) { // We handle only IV > Invariant if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); // Avoid weird loops if (!IV || IV->getLoop() != L || !IV->isAffine()) return getCouldNotCompute(); bool NoWrap = ControlsExit && IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); // Avoid negative or zero stride values if (!isKnownPositive(Stride)) return getCouldNotCompute(); // Avoid proven overflow cases: this will ensure that the backedge taken count // will not generate any unsigned overflow. Relaxed no-overflow conditions // exploit NoWrapFlags, allowing to optimize in presence of undefined // behaviors like the case of C language. if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) return getCouldNotCompute(); ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; const SCEV *Start = IV->getStart(); const SCEV *End = RHS; if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { const SCEV *Diff = getMinusSCEV(RHS, Start); // If we have NoWrap set, then we can assume that the increment won't // overflow, in which case if RHS - Start is a constant, we don't need to // do a max operation since we can just figure it out statically if (NoWrap && isa<SCEVConstant>(Diff)) { APInt D = dyn_cast<const SCEVConstant>(Diff)->getValue()->getValue(); if (!D.isNegative()) End = Start; } else End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); } const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax() : getUnsignedRange(Start).getUnsignedMax(); APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin() : getUnsignedRange(Stride).getUnsignedMin(); unsigned BitWidth = getTypeSizeInBits(LHS->getType()); APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) : APInt::getMinValue(BitWidth) + (MinStride - 1); // Although End can be a MIN expression we estimate MinEnd considering only // the case End = RHS. This is safe because in the other case (Start - End) // is zero, leading to a zero maximum backedge taken count. APInt MinEnd = IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit) : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit); const SCEV *MaxBECount = getCouldNotCompute(); if (isa<SCEVConstant>(BECount)) MaxBECount = BECount; else MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), getConstant(MinStride), false); if (isa<SCEVCouldNotCompute>(MaxBECount)) MaxBECount = BECount; return ExitLimit(BECount, MaxBECount); } /// getNumIterationsInRange - Return the number of iterations of this loop that /// produce values in the specified constant range. Another way of looking at /// this is that it returns the first iteration number where the value is not in /// the condition, thus computing the exit count. If the iteration count can't /// be computed, an instance of SCEVCouldNotCompute is returned. const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, ScalarEvolution &SE) const { if (Range.isFullSet()) // Infinite loop. return SE.getCouldNotCompute(); // If the start is a non-zero constant, shift the range to simplify things. if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) if (!SC->getValue()->isZero()) { SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); Operands[0] = SE.getConstant(SC->getType(), 0); const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), getNoWrapFlags(FlagNW)); if (const SCEVAddRecExpr *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) return ShiftedAddRec->getNumIterationsInRange( Range.subtract(SC->getValue()->getValue()), SE); // This is strange and shouldn't happen. return SE.getCouldNotCompute(); } // The only time we can solve this is when we have all constant indices. // Otherwise, we cannot determine the overflow conditions. for (unsigned i = 0, e = getNumOperands(); i != e; ++i) if (!isa<SCEVConstant>(getOperand(i))) return SE.getCouldNotCompute(); // Okay at this point we know that all elements of the chrec are constants and // that the start element is zero. // First check to see if the range contains zero. If not, the first // iteration exits. unsigned BitWidth = SE.getTypeSizeInBits(getType()); if (!Range.contains(APInt(BitWidth, 0))) return SE.getConstant(getType(), 0); if (isAffine()) { // If this is an affine expression then we have this situation: // Solve {0,+,A} in Range === Ax in Range // We know that zero is in the range. If A is positive then we know that // the upper value of the range must be the first possible exit value. // If A is negative then the lower of the range is the last possible loop // value. Also note that we already checked for a full range. APInt One(BitWidth,1); APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); // The exit value should be (End+A)/A. APInt ExitVal = (End + A).udiv(A); ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); // Evaluate at the exit value. If we really did fall out of the valid // range, then we computed our trip count, otherwise wrap around or other // things must have happened. ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); if (Range.contains(Val->getValue())) return SE.getCouldNotCompute(); // Something strange happened // Ensure that the previous value is in the range. This is a sanity check. assert(Range.contains( EvaluateConstantChrecAtConstant(this, ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && "Linear scev computation is off in a bad way!"); return SE.getConstant(ExitValue); } else if (isQuadratic()) { // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the // quadratic equation to solve it. To do this, we must frame our problem in // terms of figuring out when zero is crossed, instead of when // Range.getUpper() is crossed. SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), // getNoWrapFlags(FlagNW) FlagAnyWrap); // Next, solve the constructed addrec std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); if (R1) { // Pick the smallest positive root value. if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { if (!CB->getZExtValue()) std::swap(R1, R2); // R1 is the minimum root now. // Make sure the root is not off by one. The returned iteration should // not be in the range, but the previous one should be. When solving // for "X*X < 5", for example, we should not return a root of 2. ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); if (Range.contains(R1Val->getValue())) { // The next iteration must be out of the range... ConstantInt *NextVal = ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1); R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); if (!Range.contains(R1Val->getValue())) return SE.getConstant(NextVal); return SE.getCouldNotCompute(); // Something strange happened } // If R1 was not in the range, then it is a good return value. Make // sure that R1-1 WAS in the range though, just in case. ConstantInt *NextVal = ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1); R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); if (Range.contains(R1Val->getValue())) return R1; return SE.getCouldNotCompute(); // Something strange happened } } } return SE.getCouldNotCompute(); } namespace { struct FindUndefs { bool Found; FindUndefs() : Found(false) {} bool follow(const SCEV *S) { if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) { if (isa<UndefValue>(C->getValue())) Found = true; } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { if (isa<UndefValue>(C->getValue())) Found = true; } // Keep looking if we haven't found it yet. return !Found; } bool isDone() const { // Stop recursion if we have found an undef. return Found; } }; } // Return true when S contains at least an undef value. static inline bool containsUndefs(const SCEV *S) { FindUndefs F; SCEVTraversal<FindUndefs> ST(F); ST.visitAll(S); return F.Found; } namespace { // Collect all steps of SCEV expressions. struct SCEVCollectStrides { ScalarEvolution &SE; SmallVectorImpl<const SCEV *> &Strides; SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) : SE(SE), Strides(S) {} bool follow(const SCEV *S) { if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) Strides.push_back(AR->getStepRecurrence(SE)); return true; } bool isDone() const { return false; } }; // Collect all SCEVUnknown and SCEVMulExpr expressions. struct SCEVCollectTerms { SmallVectorImpl<const SCEV *> &Terms; SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} bool follow(const SCEV *S) { if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) { if (!containsUndefs(S)) Terms.push_back(S); // Stop recursion: once we collected a term, do not walk its operands. return false; } // Keep looking. return true; } bool isDone() const { return false; } }; } /// Find parametric terms in this SCEVAddRecExpr. void ScalarEvolution::collectParametricTerms(const SCEV *Expr, SmallVectorImpl<const SCEV *> &Terms) { SmallVector<const SCEV *, 4> Strides; SCEVCollectStrides StrideCollector(*this, Strides); visitAll(Expr, StrideCollector); DEBUG({ dbgs() << "Strides:\n"; for (const SCEV *S : Strides) dbgs() << *S << "\n"; }); for (const SCEV *S : Strides) { SCEVCollectTerms TermCollector(Terms); visitAll(S, TermCollector); } DEBUG({ dbgs() << "Terms:\n"; for (const SCEV *T : Terms) dbgs() << *T << "\n"; }); } static bool findArrayDimensionsRec(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &Terms, SmallVectorImpl<const SCEV *> &Sizes) { int Last = Terms.size() - 1; const SCEV *Step = Terms[Last]; // End of recursion. if (Last == 0) { if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { SmallVector<const SCEV *, 2> Qs; for (const SCEV *Op : M->operands()) if (!isa<SCEVConstant>(Op)) Qs.push_back(Op); Step = SE.getMulExpr(Qs); } Sizes.push_back(Step); return true; } for (const SCEV *&Term : Terms) { // Normalize the terms before the next call to findArrayDimensionsRec. const SCEV *Q, *R; SCEVDivision::divide(SE, Term, Step, &Q, &R); // Bail out when GCD does not evenly divide one of the terms. if (!R->isZero()) return false; Term = Q; } // Remove all SCEVConstants. Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) { return isa<SCEVConstant>(E); }), Terms.end()); if (Terms.size() > 0) if (!findArrayDimensionsRec(SE, Terms, Sizes)) return false; Sizes.push_back(Step); return true; } namespace { struct FindParameter { bool FoundParameter; FindParameter() : FoundParameter(false) {} bool follow(const SCEV *S) { if (isa<SCEVUnknown>(S)) { FoundParameter = true; // Stop recursion: we found a parameter. return false; } // Keep looking. return true; } bool isDone() const { // Stop recursion if we have found a parameter. return FoundParameter; } }; } // Returns true when S contains at least a SCEVUnknown parameter. static inline bool containsParameters(const SCEV *S) { FindParameter F; SCEVTraversal<FindParameter> ST(F); ST.visitAll(S); return F.FoundParameter; } // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { for (const SCEV *T : Terms) if (containsParameters(T)) return true; return false; } // Return the number of product terms in S. static inline int numberOfTerms(const SCEV *S) { if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) return Expr->getNumOperands(); return 1; } static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { if (isa<SCEVConstant>(T)) return nullptr; if (isa<SCEVUnknown>(T)) return T; if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { SmallVector<const SCEV *, 2> Factors; for (const SCEV *Op : M->operands()) if (!isa<SCEVConstant>(Op)) Factors.push_back(Op); return SE.getMulExpr(Factors); } return T; } /// Return the size of an element read or written by Inst. const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { Type *Ty; if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) Ty = Store->getValueOperand()->getType(); else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) Ty = Load->getType(); else return nullptr; Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); return getSizeOfExpr(ETy, Ty); } /// Second step of delinearization: compute the array dimensions Sizes from the /// set of Terms extracted from the memory access function of this SCEVAddRec. void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, SmallVectorImpl<const SCEV *> &Sizes, const SCEV *ElementSize) const { if (Terms.size() < 1 || !ElementSize) return; // Early return when Terms do not contain parameters: we do not delinearize // non parametric SCEVs. if (!containsParameters(Terms)) return; DEBUG({ dbgs() << "Terms:\n"; for (const SCEV *T : Terms) dbgs() << *T << "\n"; }); // Remove duplicates. std::sort(Terms.begin(), Terms.end()); Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); // Put larger terms first. std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { return numberOfTerms(LHS) > numberOfTerms(RHS); }); ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); // Divide all terms by the element size. for (const SCEV *&Term : Terms) { const SCEV *Q, *R; SCEVDivision::divide(SE, Term, ElementSize, &Q, &R); Term = Q; } SmallVector<const SCEV *, 4> NewTerms; // Remove constant factors. for (const SCEV *T : Terms) if (const SCEV *NewT = removeConstantFactors(SE, T)) NewTerms.push_back(NewT); DEBUG({ dbgs() << "Terms after sorting:\n"; for (const SCEV *T : NewTerms) dbgs() << *T << "\n"; }); if (NewTerms.empty() || !findArrayDimensionsRec(SE, NewTerms, Sizes)) { Sizes.clear(); return; } // The last element to be pushed into Sizes is the size of an element. Sizes.push_back(ElementSize); DEBUG({ dbgs() << "Sizes:\n"; for (const SCEV *S : Sizes) dbgs() << *S << "\n"; }); } /// Third step of delinearization: compute the access functions for the /// Subscripts based on the dimensions in Sizes. void ScalarEvolution::computeAccessFunctions( const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, SmallVectorImpl<const SCEV *> &Sizes) { // Early exit in case this SCEV is not an affine multivariate function. if (Sizes.empty()) return; if (auto AR = dyn_cast<SCEVAddRecExpr>(Expr)) if (!AR->isAffine()) return; const SCEV *Res = Expr; int Last = Sizes.size() - 1; for (int i = Last; i >= 0; i--) { const SCEV *Q, *R; SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); DEBUG({ dbgs() << "Res: " << *Res << "\n"; dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; dbgs() << "Res divided by Sizes[i]:\n"; dbgs() << "Quotient: " << *Q << "\n"; dbgs() << "Remainder: " << *R << "\n"; }); Res = Q; // Do not record the last subscript corresponding to the size of elements in // the array. if (i == Last) { // Bail out if the remainder is too complex. if (isa<SCEVAddRecExpr>(R)) { Subscripts.clear(); Sizes.clear(); return; } continue; } // Record the access function for the current subscript. Subscripts.push_back(R); } // Also push in last position the remainder of the last division: it will be // the access function of the innermost dimension. Subscripts.push_back(Res); std::reverse(Subscripts.begin(), Subscripts.end()); DEBUG({ dbgs() << "Subscripts:\n"; for (const SCEV *S : Subscripts) dbgs() << *S << "\n"; }); } /// Splits the SCEV into two vectors of SCEVs representing the subscripts and /// sizes of an array access. Returns the remainder of the delinearization that /// is the offset start of the array. The SCEV->delinearize algorithm computes /// the multiples of SCEV coefficients: that is a pattern matching of sub /// expressions in the stride and base of a SCEV corresponding to the /// computation of a GCD (greatest common divisor) of base and stride. When /// SCEV->delinearize fails, it returns the SCEV unchanged. /// /// For example: when analyzing the memory access A[i][j][k] in this loop nest /// /// void foo(long n, long m, long o, double A[n][m][o]) { /// /// for (long i = 0; i < n; i++) /// for (long j = 0; j < m; j++) /// for (long k = 0; k < o; k++) /// A[i][j][k] = 1.0; /// } /// /// the delinearization input is the following AddRec SCEV: /// /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> /// /// From this SCEV, we are able to say that the base offset of the access is %A /// because it appears as an offset that does not divide any of the strides in /// the loops: /// /// CHECK: Base offset: %A /// /// and then SCEV->delinearize determines the size of some of the dimensions of /// the array as these are the multiples by which the strides are happening: /// /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. /// /// Note that the outermost dimension remains of UnknownSize because there are /// no strides that would help identifying the size of the last dimension: when /// the array has been statically allocated, one could compute the size of that /// dimension by dividing the overall size of the array by the size of the known /// dimensions: %m * %o * 8. /// /// Finally delinearize provides the access functions for the array reference /// that does correspond to A[i][j][k] of the above C testcase: /// /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] /// /// The testcases are checking the output of a function pass: /// DelinearizationPass that walks through all loads and stores of a function /// asking for the SCEV of the memory access with respect to all enclosing /// loops, calling SCEV->delinearize on that and printing the results. void ScalarEvolution::delinearize(const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, SmallVectorImpl<const SCEV *> &Sizes, const SCEV *ElementSize) { // First step: collect parametric terms. SmallVector<const SCEV *, 4> Terms; collectParametricTerms(Expr, Terms); if (Terms.empty()) return; // Second step: find subscript sizes. findArrayDimensions(Terms, Sizes, ElementSize); if (Sizes.empty()) return; // Third step: compute the access functions for each subscript. computeAccessFunctions(Expr, Subscripts, Sizes); if (Subscripts.empty()) return; DEBUG({ dbgs() << "succeeded to delinearize " << *Expr << "\n"; dbgs() << "ArrayDecl[UnknownSize]"; for (const SCEV *S : Sizes) dbgs() << "[" << *S << "]"; dbgs() << "\nArrayRef"; for (const SCEV *S : Subscripts) dbgs() << "[" << *S << "]"; dbgs() << "\n"; }); } //===----------------------------------------------------------------------===// // SCEVCallbackVH Class Implementation //===----------------------------------------------------------------------===// void ScalarEvolution::SCEVCallbackVH::deleted() { assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->ValueExprMap.erase(getValPtr()); // this now dangles! } void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); // Forget all the expressions associated with users of the old value, // so that future queries will recompute the expressions using the new // value. Value *Old = getValPtr(); SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); SmallPtrSet<User *, 8> Visited; while (!Worklist.empty()) { User *U = Worklist.pop_back_val(); // Deleting the Old value will cause this to dangle. Postpone // that until everything else is done. if (U == Old) continue; if (!Visited.insert(U).second) continue; if (PHINode *PN = dyn_cast<PHINode>(U)) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->ValueExprMap.erase(U); Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); } // Delete the Old value. if (PHINode *PN = dyn_cast<PHINode>(Old)) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->ValueExprMap.erase(Old); // this now dangles! } ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) : CallbackVH(V), SE(se) {} //===----------------------------------------------------------------------===// // ScalarEvolution Class Implementation //===----------------------------------------------------------------------===// ScalarEvolution::ScalarEvolution() : FunctionPass(ID), WalkingBEDominatingConds(false), ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), FirstUnknown(nullptr) { initializeScalarEvolutionPass(*PassRegistry::getPassRegistry()); } bool ScalarEvolution::runOnFunction(Function &F) { this->F = &F; AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); return false; } void ScalarEvolution::releaseMemory() { // Iterate through all the SCEVUnknown instances and call their // destructors, so that they release their references to their values. for (SCEVUnknown *U = FirstUnknown; U; U = U->Next) U->~SCEVUnknown(); FirstUnknown = nullptr; ValueExprMap.clear(); // Free any extra memory created for ExitNotTakenInfo in the unlikely event // that a loop had multiple computable exits. for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I = BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ++I) { I->second.clear(); } assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); BackedgeTakenCounts.clear(); ConstantEvolutionLoopExitValue.clear(); ValuesAtScopes.clear(); LoopDispositions.clear(); BlockDispositions.clear(); UnsignedRanges.clear(); SignedRanges.clear(); UniqueSCEVs.clear(); SCEVAllocator.Reset(); } void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequiredTransitive<LoopInfoWrapperPass>(); AU.addRequiredTransitive<DominatorTreeWrapperPass>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<DxilValueCache>(); // HLSL Change } bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); } static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L) { // Print all inner loops first for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) PrintLoopInfo(OS, SE, *I); OS << "Loop "; L->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": "; SmallVector<BasicBlock *, 8> ExitBlocks; L->getExitBlocks(ExitBlocks); if (ExitBlocks.size() != 1) OS << "<multiple exits> "; if (SE->hasLoopInvariantBackedgeTakenCount(L)) { OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); } else { OS << "Unpredictable backedge-taken count. "; } OS << "\n" "Loop "; L->getHeader()->printAsOperand(OS, /*PrintType=*/false); OS << ": "; if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); } else { OS << "Unpredictable max backedge-taken count. "; } OS << "\n"; } void ScalarEvolution::print(raw_ostream &OS, const Module *) const { // ScalarEvolution's implementation of the print method is to print // out SCEV values of all instructions that are interesting. Doing // this potentially causes it to create new SCEV objects though, // which technically conflicts with the const qualifier. This isn't // observable from outside the class though, so casting away the // const isn't dangerous. ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); OS << "Classifying expressions for: "; F->printAsOperand(OS, /*PrintType=*/false); OS << "\n"; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) { OS << *I << '\n'; OS << " --> "; const SCEV *SV = SE.getSCEV(&*I); SV->print(OS); if (!isa<SCEVCouldNotCompute>(SV)) { OS << " U: "; SE.getUnsignedRange(SV).print(OS); OS << " S: "; SE.getSignedRange(SV).print(OS); } const Loop *L = LI->getLoopFor((*I).getParent()); const SCEV *AtUse = SE.getSCEVAtScope(SV, L); if (AtUse != SV) { OS << " --> "; AtUse->print(OS); if (!isa<SCEVCouldNotCompute>(AtUse)) { OS << " U: "; SE.getUnsignedRange(AtUse).print(OS); OS << " S: "; SE.getSignedRange(AtUse).print(OS); } } if (L) { OS << "\t\t" "Exits: "; const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); if (!SE.isLoopInvariant(ExitValue, L)) { OS << "<<Unknown>>"; } else { OS << *ExitValue; } } OS << "\n"; } OS << "Determining loop execution counts for: "; F->printAsOperand(OS, /*PrintType=*/false); OS << "\n"; for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) PrintLoopInfo(OS, &SE, *I); } ScalarEvolution::LoopDisposition ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { auto &Values = LoopDispositions[S]; for (auto &V : Values) { if (V.getPointer() == L) return V.getInt(); } Values.emplace_back(L, LoopVariant); LoopDisposition D = computeLoopDisposition(S, L); auto &Values2 = LoopDispositions[S]; for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { if (V.getPointer() == L) { V.setInt(D); break; } } return D; } ScalarEvolution::LoopDisposition ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { switch (static_cast<SCEVTypes>(S->getSCEVType())) { case scConstant: return LoopInvariant; case scTruncate: case scZeroExtend: case scSignExtend: return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); case scAddRecExpr: { const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); // If L is the addrec's loop, it's computable. if (AR->getLoop() == L) return LoopComputable; // Add recurrences are never invariant in the function-body (null loop). if (!L) return LoopVariant; // This recurrence is variant w.r.t. L if L contains AR's loop. if (L->contains(AR->getLoop())) return LoopVariant; // This recurrence is invariant w.r.t. L if AR's loop contains L. if (AR->getLoop()->contains(L)) return LoopInvariant; // This recurrence is variant w.r.t. L if any of its operands // are variant. for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); I != E; ++I) if (!isLoopInvariant(*I, L)) return LoopVariant; // Otherwise it's loop-invariant. return LoopInvariant; } case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: { const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); bool HasVarying = false; for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); I != E; ++I) { LoopDisposition D = getLoopDisposition(*I, L); if (D == LoopVariant) return LoopVariant; if (D == LoopComputable) HasVarying = true; } return HasVarying ? LoopComputable : LoopInvariant; } case scUDivExpr: { const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); if (LD == LoopVariant) return LoopVariant; LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); if (RD == LoopVariant) return LoopVariant; return (LD == LoopInvariant && RD == LoopInvariant) ? LoopInvariant : LoopComputable; } case scUnknown: // All non-instruction values are loop invariant. All instructions are loop // invariant if they are not contained in the specified loop. // Instructions are never considered invariant in the function body // (null loop) because they are defined within the "loop". if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; return LoopInvariant; case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { return getLoopDisposition(S, L) == LoopInvariant; } bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { return getLoopDisposition(S, L) == LoopComputable; } ScalarEvolution::BlockDisposition ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { auto &Values = BlockDispositions[S]; for (auto &V : Values) { if (V.getPointer() == BB) return V.getInt(); } Values.emplace_back(BB, DoesNotDominateBlock); BlockDisposition D = computeBlockDisposition(S, BB); auto &Values2 = BlockDispositions[S]; for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { if (V.getPointer() == BB) { V.setInt(D); break; } } return D; } ScalarEvolution::BlockDisposition ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { switch (static_cast<SCEVTypes>(S->getSCEVType())) { case scConstant: return ProperlyDominatesBlock; case scTruncate: case scZeroExtend: case scSignExtend: return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); case scAddRecExpr: { // This uses a "dominates" query instead of "properly dominates" query // to test for proper dominance too, because the instruction which // produces the addrec's value is a PHI, and a PHI effectively properly // dominates its entire containing block. const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); if (!DT->dominates(AR->getLoop()->getHeader(), BB)) return DoesNotDominateBlock; } // FALL THROUGH into SCEVNAryExpr handling. LLVM_FALLTHROUGH; // HLSL Change case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: { const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); bool Proper = true; for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); I != E; ++I) { BlockDisposition D = getBlockDisposition(*I, BB); if (D == DoesNotDominateBlock) return DoesNotDominateBlock; if (D == DominatesBlock) Proper = false; } return Proper ? ProperlyDominatesBlock : DominatesBlock; } case scUDivExpr: { const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); BlockDisposition LD = getBlockDisposition(LHS, BB); if (LD == DoesNotDominateBlock) return DoesNotDominateBlock; BlockDisposition RD = getBlockDisposition(RHS, BB); if (RD == DoesNotDominateBlock) return DoesNotDominateBlock; return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? ProperlyDominatesBlock : DominatesBlock; } case scUnknown: if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { if (I->getParent() == BB) return DominatesBlock; if (DT->properlyDominates(I->getParent(), BB)) return ProperlyDominatesBlock; return DoesNotDominateBlock; } return ProperlyDominatesBlock; case scCouldNotCompute: llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); } llvm_unreachable("Unknown SCEV kind!"); } bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { return getBlockDisposition(S, BB) >= DominatesBlock; } bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { return getBlockDisposition(S, BB) == ProperlyDominatesBlock; } namespace { // Search for a SCEV expression node within an expression tree. // Implements SCEVTraversal::Visitor. struct SCEVSearch { const SCEV *Node; bool IsFound; SCEVSearch(const SCEV *N): Node(N), IsFound(false) {} bool follow(const SCEV *S) { IsFound |= (S == Node); return !IsFound; } bool isDone() const { return IsFound; } }; } bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { SCEVSearch Search(Op); visitAll(S, Search); return Search.IsFound; } void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { ValuesAtScopes.erase(S); LoopDispositions.erase(S); BlockDispositions.erase(S); UnsignedRanges.erase(S); SignedRanges.erase(S); for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I = BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) { BackedgeTakenInfo &BEInfo = I->second; if (BEInfo.hasOperand(S, this)) { BEInfo.clear(); BackedgeTakenCounts.erase(I++); } else ++I; } } typedef DenseMap<const Loop *, std::string> VerifyMap; /// replaceSubString - Replaces all occurrences of From in Str with To. static void replaceSubString(std::string &Str, StringRef From, StringRef To) { size_t Pos = 0; while ((Pos = Str.find(From, Pos)) != std::string::npos) { Str.replace(Pos, From.size(), To.data(), To.size()); Pos += To.size(); } } /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis. static void getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) { for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) { getLoopBackedgeTakenCounts(*I, Map, SE); // recurse. std::string &S = Map[L]; if (S.empty()) { raw_string_ostream OS(S); SE.getBackedgeTakenCount(L)->print(OS); // false and 0 are semantically equivalent. This can happen in dead loops. replaceSubString(OS.str(), "false", "0"); // Remove wrap flags, their use in SCEV is highly fragile. // FIXME: Remove this when SCEV gets smarter about them. replaceSubString(OS.str(), "<nw>", ""); replaceSubString(OS.str(), "<nsw>", ""); replaceSubString(OS.str(), "<nuw>", ""); } } } void ScalarEvolution::verifyAnalysis() const { if (!VerifySCEV) return; ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); // Gather stringified backedge taken counts for all loops using SCEV's caches. // FIXME: It would be much better to store actual values instead of strings, // but SCEV pointers will change if we drop the caches. VerifyMap BackedgeDumpsOld, BackedgeDumpsNew; for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I) getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE); // Gather stringified backedge taken counts for all loops without using // SCEV's caches. SE.releaseMemory(); for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I) getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE); // Now compare whether they're the same with and without caches. This allows // verifying that no pass changed the cache. assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() && "New loops suddenly appeared!"); for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(), OldE = BackedgeDumpsOld.end(), NewI = BackedgeDumpsNew.begin(); OldI != OldE; ++OldI, ++NewI) { assert(OldI->first == NewI->first && "Loop order changed!"); // Compare the stringified SCEVs. We don't care if undef backedgetaken count // changes. // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This // means that a pass is buggy or SCEV has to learn a new pattern but is // usually not harmful. if (OldI->second != NewI->second && OldI->second.find("undef") == std::string::npos && NewI->second.find("undef") == std::string::npos && OldI->second != "***COULDNOTCOMPUTE***" && NewI->second != "***COULDNOTCOMPUTE***") { dbgs() << "SCEVValidator: SCEV for loop '" << OldI->first->getHeader()->getName() << "' changed from '" << OldI->second << "' to '" << NewI->second << "'!\n"; std::abort(); } } // TODO: Verify more things. }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/TypeBasedAliasAnalysis.cpp
//===- TypeBasedAliasAnalysis.cpp - Type-Based Alias Analysis -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the TypeBasedAliasAnalysis pass, which implements // metadata-based TBAA. // // In LLVM IR, memory does not have types, so LLVM's own type system is not // suitable for doing TBAA. Instead, metadata is added to the IR to describe // a type system of a higher level language. This can be used to implement // typical C/C++ TBAA, but it can also be used to implement custom alias // analysis behavior for other languages. // // We now support two types of metadata format: scalar TBAA and struct-path // aware TBAA. After all testing cases are upgraded to use struct-path aware // TBAA and we can auto-upgrade existing bc files, the support for scalar TBAA // can be dropped. // // The scalar TBAA metadata format is very simple. TBAA MDNodes have up to // three fields, e.g.: // !0 = metadata !{ metadata !"an example type tree" } // !1 = metadata !{ metadata !"int", metadata !0 } // !2 = metadata !{ metadata !"float", metadata !0 } // !3 = metadata !{ metadata !"const float", metadata !2, i64 1 } // // The first field is an identity field. It can be any value, usually // an MDString, which uniquely identifies the type. The most important // name in the tree is the name of the root node. Two trees with // different root node names are entirely disjoint, even if they // have leaves with common names. // // The second field identifies the type's parent node in the tree, or // is null or omitted for a root node. A type is considered to alias // all of its descendants and all of its ancestors in the tree. Also, // a type is considered to alias all types in other trees, so that // bitcode produced from multiple front-ends is handled conservatively. // // If the third field is present, it's an integer which if equal to 1 // indicates that the type is "constant" (meaning pointsToConstantMemory // should return true; see // http://llvm.org/docs/AliasAnalysis.html#OtherItfs). // // With struct-path aware TBAA, the MDNodes attached to an instruction using // "!tbaa" are called path tag nodes. // // The path tag node has 4 fields with the last field being optional. // // The first field is the base type node, it can be a struct type node // or a scalar type node. The second field is the access type node, it // must be a scalar type node. The third field is the offset into the base type. // The last field has the same meaning as the last field of our scalar TBAA: // it's an integer which if equal to 1 indicates that the access is "constant". // // The struct type node has a name and a list of pairs, one pair for each member // of the struct. The first element of each pair is a type node (a struct type // node or a sclar type node), specifying the type of the member, the second // element of each pair is the offset of the member. // // Given an example // typedef struct { // short s; // } A; // typedef struct { // uint16_t s; // A a; // } B; // // For an acess to B.a.s, we attach !5 (a path tag node) to the load/store // instruction. The base type is !4 (struct B), the access type is !2 (scalar // type short) and the offset is 4. // // !0 = metadata !{metadata !"Simple C/C++ TBAA"} // !1 = metadata !{metadata !"omnipotent char", metadata !0} // Scalar type node // !2 = metadata !{metadata !"short", metadata !1} // Scalar type node // !3 = metadata !{metadata !"A", metadata !2, i64 0} // Struct type node // !4 = metadata !{metadata !"B", metadata !2, i64 0, metadata !3, i64 4} // // Struct type node // !5 = metadata !{metadata !4, metadata !2, i64 4} // Path tag node // // The struct type nodes and the scalar type nodes form a type DAG. // Root (!0) // char (!1) -- edge to Root // short (!2) -- edge to char // A (!3) -- edge with offset 0 to short // B (!4) -- edge with offset 0 to short and edge with offset 4 to A // // To check if two tags (tagX and tagY) can alias, we start from the base type // of tagX, follow the edge with the correct offset in the type DAG and adjust // the offset until we reach the base type of tagY or until we reach the Root // node. // If we reach the base type of tagY, compare the adjusted offset with // offset of tagY, return Alias if the offsets are the same, return NoAlias // otherwise. // If we reach the Root node, perform the above starting from base type of tagY // to see if we reach base type of tagX. // // If they have different roots, they're part of different potentially // unrelated type systems, so we return Alias to be conservative. // If neither node is an ancestor of the other and they have the same root, // then we say NoAlias. // // TODO: The current metadata format doesn't support struct // fields. For example: // struct X { // double d; // int i; // }; // void foo(struct X *x, struct X *y, double *p) { // *x = *y; // *p = 0.0; // } // Struct X has a double member, so the store to *x can alias the store to *p. // Currently it's not possible to precisely describe all the things struct X // aliases, so struct assignments must use conservative TBAA nodes. There's // no scheme for attaching metadata to @llvm.memcpy yet either. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/IR/Constants.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/ADT/SetVector.h" using namespace llvm; // A handy option for disabling TBAA functionality. The same effect can also be // achieved by stripping the !tbaa tags from IR, but this option is sometimes // more convenient. #if 0 // HLSL Change Starts - option pending static cl::opt<bool> EnableTBAA("enable-tbaa", cl::init(true)); #else static const bool EnableTBAA = true; #endif // HLSL Change Ends namespace { /// TBAANode - This is a simple wrapper around an MDNode which provides a /// higher-level interface by hiding the details of how alias analysis /// information is encoded in its operands. class TBAANode { const MDNode *Node; public: TBAANode() : Node(nullptr) {} explicit TBAANode(const MDNode *N) : Node(N) {} /// getNode - Get the MDNode for this TBAANode. const MDNode *getNode() const { return Node; } /// getParent - Get this TBAANode's Alias tree parent. TBAANode getParent() const { if (Node->getNumOperands() < 2) return TBAANode(); MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(1)); if (!P) return TBAANode(); // Ok, this node has a valid parent. Return it. return TBAANode(P); } /// TypeIsImmutable - Test if this TBAANode represents a type for objects /// which are not modified (by any means) in the context where this /// AliasAnalysis is relevant. bool TypeIsImmutable() const { if (Node->getNumOperands() < 3) return false; ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(Node->getOperand(2)); if (!CI) return false; return CI->getValue()[0]; } }; /// This is a simple wrapper around an MDNode which provides a /// higher-level interface by hiding the details of how alias analysis /// information is encoded in its operands. class TBAAStructTagNode { /// This node should be created with createTBAAStructTagNode. const MDNode *Node; public: explicit TBAAStructTagNode(const MDNode *N) : Node(N) {} /// Get the MDNode for this TBAAStructTagNode. const MDNode *getNode() const { return Node; } const MDNode *getBaseType() const { return dyn_cast_or_null<MDNode>(Node->getOperand(0)); } const MDNode *getAccessType() const { return dyn_cast_or_null<MDNode>(Node->getOperand(1)); } uint64_t getOffset() const { return mdconst::extract<ConstantInt>(Node->getOperand(2))->getZExtValue(); } /// TypeIsImmutable - Test if this TBAAStructTagNode represents a type for /// objects which are not modified (by any means) in the context where this /// AliasAnalysis is relevant. bool TypeIsImmutable() const { if (Node->getNumOperands() < 4) return false; ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(Node->getOperand(3)); if (!CI) return false; return CI->getValue()[0]; } }; /// This is a simple wrapper around an MDNode which provides a /// higher-level interface by hiding the details of how alias analysis /// information is encoded in its operands. class TBAAStructTypeNode { /// This node should be created with createTBAAStructTypeNode. const MDNode *Node; public: TBAAStructTypeNode() : Node(nullptr) {} explicit TBAAStructTypeNode(const MDNode *N) : Node(N) {} /// Get the MDNode for this TBAAStructTypeNode. const MDNode *getNode() const { return Node; } /// Get this TBAAStructTypeNode's field in the type DAG with /// given offset. Update the offset to be relative to the field type. TBAAStructTypeNode getParent(uint64_t &Offset) const { // Parent can be omitted for the root node. if (Node->getNumOperands() < 2) return TBAAStructTypeNode(); // Fast path for a scalar type node and a struct type node with a single // field. if (Node->getNumOperands() <= 3) { uint64_t Cur = Node->getNumOperands() == 2 ? 0 : mdconst::extract<ConstantInt>(Node->getOperand(2)) ->getZExtValue(); Offset -= Cur; MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(1)); if (!P) return TBAAStructTypeNode(); return TBAAStructTypeNode(P); } // Assume the offsets are in order. We return the previous field if // the current offset is bigger than the given offset. unsigned TheIdx = 0; for (unsigned Idx = 1; Idx < Node->getNumOperands(); Idx += 2) { uint64_t Cur = mdconst::extract<ConstantInt>(Node->getOperand(Idx + 1)) ->getZExtValue(); if (Cur > Offset) { assert(Idx >= 3 && "TBAAStructTypeNode::getParent should have an offset match!"); TheIdx = Idx - 2; break; } } // Move along the last field. if (TheIdx == 0) TheIdx = Node->getNumOperands() - 2; uint64_t Cur = mdconst::extract<ConstantInt>(Node->getOperand(TheIdx + 1)) ->getZExtValue(); Offset -= Cur; MDNode *P = dyn_cast_or_null<MDNode>(Node->getOperand(TheIdx)); if (!P) return TBAAStructTypeNode(); return TBAAStructTypeNode(P); } }; } namespace { /// TypeBasedAliasAnalysis - This is a simple alias analysis /// implementation that uses TypeBased to answer queries. class TypeBasedAliasAnalysis : public ImmutablePass, public AliasAnalysis { public: static char ID; // Class identification, replacement for typeinfo TypeBasedAliasAnalysis() : ImmutablePass(ID) { initializeTypeBasedAliasAnalysisPass(*PassRegistry::getPassRegistry()); } bool doInitialization(Module &M) override; /// getAdjustedAnalysisPointer - This method is used when a pass implements /// an analysis interface through multiple inheritance. If needed, it /// should override this to adjust the this pointer as needed for the /// specified pass info. void *getAdjustedAnalysisPointer(const void *PI) override { if (PI == &AliasAnalysis::ID) return (AliasAnalysis*)this; return this; } bool Aliases(const MDNode *A, const MDNode *B) const; bool PathAliases(const MDNode *A, const MDNode *B) const; private: void getAnalysisUsage(AnalysisUsage &AU) const override; AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) override; bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) override; ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; ModRefBehavior getModRefBehavior(const Function *F) override; ModRefResult getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) override; ModRefResult getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) override; }; } // End of anonymous namespace // Register this pass... char TypeBasedAliasAnalysis::ID = 0; INITIALIZE_AG_PASS(TypeBasedAliasAnalysis, AliasAnalysis, "tbaa", "Type-Based Alias Analysis", false, true, false) ImmutablePass *llvm::createTypeBasedAliasAnalysisPass() { return new TypeBasedAliasAnalysis(); } bool TypeBasedAliasAnalysis::doInitialization(Module &M) { InitializeAliasAnalysis(this, &M.getDataLayout()); return true; } void TypeBasedAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AliasAnalysis::getAnalysisUsage(AU); } /// Check the first operand of the tbaa tag node, if it is a MDNode, we treat /// it as struct-path aware TBAA format, otherwise, we treat it as scalar TBAA /// format. static bool isStructPathTBAA(const MDNode *MD) { // Anonymous TBAA root starts with a MDNode and dragonegg uses it as // a TBAA tag. return isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3; } /// Aliases - Test whether the type represented by A may alias the /// type represented by B. bool TypeBasedAliasAnalysis::Aliases(const MDNode *A, const MDNode *B) const { // Make sure that both MDNodes are struct-path aware. if (isStructPathTBAA(A) && isStructPathTBAA(B)) return PathAliases(A, B); // Keep track of the root node for A and B. TBAANode RootA, RootB; // Climb the tree from A to see if we reach B. for (TBAANode T(A); ; ) { if (T.getNode() == B) // B is an ancestor of A. return true; RootA = T; T = T.getParent(); if (!T.getNode()) break; } // Climb the tree from B to see if we reach A. for (TBAANode T(B); ; ) { if (T.getNode() == A) // A is an ancestor of B. return true; RootB = T; T = T.getParent(); if (!T.getNode()) break; } // Neither node is an ancestor of the other. // If they have different roots, they're part of different potentially // unrelated type systems, so we must be conservative. if (RootA.getNode() != RootB.getNode()) return true; // If they have the same root, then we've proved there's no alias. return false; } /// Test whether the struct-path tag represented by A may alias the /// struct-path tag represented by B. bool TypeBasedAliasAnalysis::PathAliases(const MDNode *A, const MDNode *B) const { // Verify that both input nodes are struct-path aware. assert(isStructPathTBAA(A) && "MDNode A is not struct-path aware."); assert(isStructPathTBAA(B) && "MDNode B is not struct-path aware."); // Keep track of the root node for A and B. TBAAStructTypeNode RootA, RootB; TBAAStructTagNode TagA(A), TagB(B); // TODO: We need to check if AccessType of TagA encloses AccessType of // TagB to support aggregate AccessType. If yes, return true. // Start from the base type of A, follow the edge with the correct offset in // the type DAG and adjust the offset until we reach the base type of B or // until we reach the Root node. // Compare the adjusted offset once we have the same base. // Climb the type DAG from base type of A to see if we reach base type of B. const MDNode *BaseA = TagA.getBaseType(); const MDNode *BaseB = TagB.getBaseType(); uint64_t OffsetA = TagA.getOffset(), OffsetB = TagB.getOffset(); for (TBAAStructTypeNode T(BaseA); ; ) { if (T.getNode() == BaseB) // Base type of A encloses base type of B, check if the offsets match. return OffsetA == OffsetB; RootA = T; // Follow the edge with the correct offset, OffsetA will be adjusted to // be relative to the field type. T = T.getParent(OffsetA); if (!T.getNode()) break; } // Reset OffsetA and climb the type DAG from base type of B to see if we reach // base type of A. OffsetA = TagA.getOffset(); for (TBAAStructTypeNode T(BaseB); ; ) { if (T.getNode() == BaseA) // Base type of B encloses base type of A, check if the offsets match. return OffsetA == OffsetB; RootB = T; // Follow the edge with the correct offset, OffsetB will be adjusted to // be relative to the field type. T = T.getParent(OffsetB); if (!T.getNode()) break; } // Neither node is an ancestor of the other. // If they have different roots, they're part of different potentially // unrelated type systems, so we must be conservative. if (RootA.getNode() != RootB.getNode()) return true; // If they have the same root, then we've proved there's no alias. return false; } AliasResult TypeBasedAliasAnalysis::alias(const MemoryLocation &LocA, const MemoryLocation &LocB) { if (!EnableTBAA) return AliasAnalysis::alias(LocA, LocB); // Get the attached MDNodes. If either value lacks a tbaa MDNode, we must // be conservative. const MDNode *AM = LocA.AATags.TBAA; if (!AM) return AliasAnalysis::alias(LocA, LocB); const MDNode *BM = LocB.AATags.TBAA; if (!BM) return AliasAnalysis::alias(LocA, LocB); // If they may alias, chain to the next AliasAnalysis. if (Aliases(AM, BM)) return AliasAnalysis::alias(LocA, LocB); // Otherwise return a definitive result. return NoAlias; } bool TypeBasedAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) { if (!EnableTBAA) return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); const MDNode *M = Loc.AATags.TBAA; if (!M) return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); // If this is an "immutable" type, we can assume the pointer is pointing // to constant memory. if ((!isStructPathTBAA(M) && TBAANode(M).TypeIsImmutable()) || (isStructPathTBAA(M) && TBAAStructTagNode(M).TypeIsImmutable())) return true; return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); } AliasAnalysis::ModRefBehavior TypeBasedAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { if (!EnableTBAA) return AliasAnalysis::getModRefBehavior(CS); ModRefBehavior Min = UnknownModRefBehavior; // If this is an "immutable" type, we can assume the call doesn't write // to memory. if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if ((!isStructPathTBAA(M) && TBAANode(M).TypeIsImmutable()) || (isStructPathTBAA(M) && TBAAStructTagNode(M).TypeIsImmutable())) Min = OnlyReadsMemory; return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); } AliasAnalysis::ModRefBehavior TypeBasedAliasAnalysis::getModRefBehavior(const Function *F) { // Functions don't have metadata. Just chain to the next implementation. return AliasAnalysis::getModRefBehavior(F); } AliasAnalysis::ModRefResult TypeBasedAliasAnalysis::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { if (!EnableTBAA) return AliasAnalysis::getModRefInfo(CS, Loc); if (const MDNode *L = Loc.AATags.TBAA) if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(L, M)) return NoModRef; return AliasAnalysis::getModRefInfo(CS, Loc); } AliasAnalysis::ModRefResult TypeBasedAliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { if (!EnableTBAA) return AliasAnalysis::getModRefInfo(CS1, CS2); if (const MDNode *M1 = CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if (const MDNode *M2 = CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(M1, M2)) return NoModRef; return AliasAnalysis::getModRefInfo(CS1, CS2); } bool MDNode::isTBAAVtableAccess() const { if (!isStructPathTBAA(this)) { if (getNumOperands() < 1) return false; if (MDString *Tag1 = dyn_cast<MDString>(getOperand(0))) { if (Tag1->getString() == "vtable pointer") return true; } return false; } // For struct-path aware TBAA, we use the access type of the tag. if (getNumOperands() < 2) return false; MDNode *Tag = cast_or_null<MDNode>(getOperand(1)); if (!Tag) return false; if (MDString *Tag1 = dyn_cast<MDString>(Tag->getOperand(0))) { if (Tag1->getString() == "vtable pointer") return true; } return false; } MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) { if (!A || !B) return nullptr; if (A == B) return A; // For struct-path aware TBAA, we use the access type of the tag. bool StructPath = isStructPathTBAA(A) && isStructPathTBAA(B); if (StructPath) { A = cast_or_null<MDNode>(A->getOperand(1)); if (!A) return nullptr; B = cast_or_null<MDNode>(B->getOperand(1)); if (!B) return nullptr; } SmallSetVector<MDNode *, 4> PathA; MDNode *T = A; while (T) { if (PathA.count(T)) report_fatal_error("Cycle found in TBAA metadata."); PathA.insert(T); T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : nullptr; } SmallSetVector<MDNode *, 4> PathB; T = B; while (T) { if (PathB.count(T)) report_fatal_error("Cycle found in TBAA metadata."); PathB.insert(T); T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : nullptr; } int IA = PathA.size() - 1; int IB = PathB.size() - 1; MDNode *Ret = nullptr; while (IA >= 0 && IB >=0) { if (PathA[IA] == PathB[IB]) Ret = PathA[IA]; else break; --IA; --IB; } if (!StructPath) return Ret; if (!Ret) return nullptr; // We need to convert from a type node to a tag node. Type *Int64 = IntegerType::get(A->getContext(), 64); Metadata *Ops[3] = {Ret, Ret, ConstantAsMetadata::get(ConstantInt::get(Int64, 0))}; return MDNode::get(A->getContext(), Ops); } void Instruction::getAAMetadata(AAMDNodes &N, bool Merge) const { if (Merge) N.TBAA = MDNode::getMostGenericTBAA(N.TBAA, getMetadata(LLVMContext::MD_tbaa)); else N.TBAA = getMetadata(LLVMContext::MD_tbaa); if (Merge) N.Scope = MDNode::getMostGenericAliasScope( N.Scope, getMetadata(LLVMContext::MD_alias_scope)); else N.Scope = getMetadata(LLVMContext::MD_alias_scope); if (Merge) N.NoAlias = MDNode::intersect(N.NoAlias, getMetadata(LLVMContext::MD_noalias)); else N.NoAlias = getMetadata(LLVMContext::MD_noalias); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/Loads.cpp
//===- Loads.cpp - Local load analysis ------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines simple local analyses for load instructions. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" using namespace llvm; /// \brief Test if A and B will obviously have the same value. /// /// This includes recognizing that %t0 and %t1 will have the same /// value in code like this: /// \code /// %t0 = getelementptr \@a, 0, 3 /// store i32 0, i32* %t0 /// %t1 = getelementptr \@a, 0, 3 /// %t2 = load i32* %t1 /// \endcode /// static bool AreEquivalentAddressValues(const Value *A, const Value *B) { // Test if the values are trivially equivalent. if (A == B) return true; // Test if the values come from identical arithmetic instructions. // Use isIdenticalToWhenDefined instead of isIdenticalTo because // this function is only used when one address use dominates the // other, which means that they'll always either have the same // value or one of them will have an undefined value. if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) || isa<GetElementPtrInst>(A)) if (const Instruction *BI = dyn_cast<Instruction>(B)) if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) return true; // Otherwise they may not be equivalent. return false; } /// \brief Check if executing a load of this pointer value cannot trap. /// /// If it is not obviously safe to load from the specified pointer, we do /// a quick local scan of the basic block containing \c ScanFrom, to determine /// if the address is already accessed. /// /// This uses the pointee type to determine how many bytes need to be safe to /// load from the pointer. bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, unsigned Align) { const DataLayout &DL = ScanFrom->getModule()->getDataLayout(); // Zero alignment means that the load has the ABI alignment for the target if (Align == 0) Align = DL.getABITypeAlignment(V->getType()->getPointerElementType()); assert(isPowerOf2_32(Align)); int64_t ByteOffset = 0; Value *Base = V; Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL); if (ByteOffset < 0) // out of bounds return false; Type *BaseType = nullptr; unsigned BaseAlign = 0; if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) { // An alloca is safe to load from as load as it is suitably aligned. BaseType = AI->getAllocatedType(); BaseAlign = AI->getAlignment(); } else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) { // Global variables are not necessarily safe to load from if they are // overridden. Their size may change or they may be weak and require a test // to determine if they were in fact provided. if (!GV->mayBeOverridden()) { BaseType = GV->getType()->getElementType(); BaseAlign = GV->getAlignment(); } } PointerType *AddrTy = cast<PointerType>(V->getType()); uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType()); // If we found a base allocated type from either an alloca or global variable, // try to see if we are definitively within the allocated region. We need to // know the size of the base type and the loaded type to do anything in this // case. if (BaseType && BaseType->isSized()) { if (BaseAlign == 0) BaseAlign = DL.getPrefTypeAlignment(BaseType); if (Align <= BaseAlign) { // Check if the load is within the bounds of the underlying object. if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) && ((ByteOffset % Align) == 0)) return true; } } // Otherwise, be a little bit aggressive by scanning the local block where we // want to check to see if the pointer is already being loaded or stored // from/to. If so, the previous load or store would have already trapped, // so there is no harm doing an extra load (also, CSE will later eliminate // the load entirely). BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin(); // We can at least always strip pointer casts even though we can't use the // base here. V = V->stripPointerCasts(); while (BBI != E) { --BBI; // If we see a free or a call which may write to memory (i.e. which might do // a free) the pointer could be marked invalid. if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() && !isa<DbgInfoIntrinsic>(BBI)) return false; Value *AccessedPtr; unsigned AccessedAlign; if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { AccessedPtr = LI->getPointerOperand(); AccessedAlign = LI->getAlignment(); } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) { AccessedPtr = SI->getPointerOperand(); AccessedAlign = SI->getAlignment(); } else continue; Type *AccessedTy = AccessedPtr->getType()->getPointerElementType(); if (AccessedAlign == 0) AccessedAlign = DL.getABITypeAlignment(AccessedTy); if (AccessedAlign < Align) continue; // Handle trivial cases. if (AccessedPtr == V) return true; if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) && LoadSize <= DL.getTypeStoreSize(AccessedTy)) return true; } return false; } /// \brief Scan the ScanBB block backwards to see if we have the value at the /// memory address *Ptr locally available within a small number of instructions. /// /// The scan starts from \c ScanFrom. \c MaxInstsToScan specifies the maximum /// instructions to scan in the block. If it is set to \c 0, it will scan the whole /// block. /// /// If the value is available, this function returns it. If not, it returns the /// iterator for the last validated instruction that the value would be live /// through. If we scanned the entire block and didn't find something that /// invalidates \c *Ptr or provides it, \c ScanFrom is left at the last /// instruction processed and this returns null. /// /// You can also optionally specify an alias analysis implementation, which /// makes this more precise. /// /// If \c AATags is non-null and a load or store is found, the AA tags from the /// load or store are recorded there. If there are no AA tags or if no access is /// found, it is left unmodified. Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, AliasAnalysis *AA, AAMDNodes *AATags) { if (MaxInstsToScan == 0) MaxInstsToScan = ~0U; Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType(); const DataLayout &DL = ScanBB->getModule()->getDataLayout(); // Try to get the store size for the type. uint64_t AccessSize = DL.getTypeStoreSize(AccessTy); Value *StrippedPtr = Ptr->stripPointerCasts(); while (ScanFrom != ScanBB->begin()) { // We must ignore debug info directives when counting (otherwise they // would affect codegen). Instruction *Inst = --ScanFrom; if (isa<DbgInfoIntrinsic>(Inst)) continue; // Restore ScanFrom to expected value in case next test succeeds ScanFrom++; // Don't scan huge blocks. if (MaxInstsToScan-- == 0) return nullptr; --ScanFrom; // If this is a load of Ptr, the loaded value is available. // (This is true even if the load is volatile or atomic, although // those cases are unlikely.) if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) if (AreEquivalentAddressValues( LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) && CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) { if (AATags) LI->getAAMetadata(*AATags); return LI; } if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); // If this is a store through Ptr, the value is available! // (This is true even if the store is volatile or atomic, although // those cases are unlikely.) if (AreEquivalentAddressValues(StorePtr, StrippedPtr) && CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(), AccessTy, DL)) { if (AATags) SI->getAAMetadata(*AATags); return SI->getOperand(0); } // If both StrippedPtr and StorePtr reach all the way to an alloca or // global and they are different, ignore the store. This is a trivial form // of alias analysis that is important for reg2mem'd code. if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) && (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) && StrippedPtr != StorePtr) continue; // If we have alias analysis and it says the store won't modify the loaded // value, ignore the store. if (AA && (AA->getModRefInfo(SI, StrippedPtr, AccessSize) & AliasAnalysis::Mod) == 0) continue; // Otherwise the store that may or may not alias the pointer, bail out. ++ScanFrom; return nullptr; } // If this is some other instruction that may clobber Ptr, bail out. if (Inst->mayWriteToMemory()) { // If alias analysis claims that it really won't modify the load, // ignore it. if (AA && (AA->getModRefInfo(Inst, StrippedPtr, AccessSize) & AliasAnalysis::Mod) == 0) continue; // May modify the pointer, bail out. ++ScanFrom; return nullptr; } } // Got to the start of the block, we didn't find it, but are done for this // block. return nullptr; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Analysis/RegionPass.cpp
//===- RegionPass.cpp - Region Pass and Region Pass Manager ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements RegionPass and RGPassManager. All region optimization // and transformation passes are derived from RegionPass. RGPassManager is // responsible for managing RegionPasses. // most of these codes are COPY from LoopPass.cpp // //===----------------------------------------------------------------------===// #include "llvm/Analysis/RegionPass.h" #include "llvm/Analysis/RegionIterator.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "regionpassmgr" //===----------------------------------------------------------------------===// // RGPassManager // char RGPassManager::ID = 0; RGPassManager::RGPassManager() : FunctionPass(ID), PMDataManager() { skipThisRegion = false; redoThisRegion = false; RI = nullptr; CurrentRegion = nullptr; } // Recurse through all subregions and all regions into RQ. static void addRegionIntoQueue(Region &R, std::deque<Region *> &RQ) { RQ.push_back(&R); for (const auto &E : R) addRegionIntoQueue(*E, RQ); } /// Pass Manager itself does not invalidate any analysis info. void RGPassManager::getAnalysisUsage(AnalysisUsage &Info) const { Info.addRequired<RegionInfoPass>(); Info.setPreservesAll(); } /// run - Execute all of the passes scheduled for execution. Keep track of /// whether any of the passes modifies the function, and if so, return true. bool RGPassManager::runOnFunction(Function &F) { RI = &getAnalysis<RegionInfoPass>().getRegionInfo(); bool Changed = false; // Collect inherited analysis from Module level pass manager. populateInheritedAnalysis(TPM->activeStack); addRegionIntoQueue(*RI->getTopLevelRegion(), RQ); if (RQ.empty()) // No regions, skip calling finalizers return false; // Initialization for (std::deque<Region *>::const_iterator I = RQ.begin(), E = RQ.end(); I != E; ++I) { Region *R = *I; for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { RegionPass *RP = (RegionPass *)getContainedPass(Index); Changed |= RP->doInitialization(R, *this); } } // Walk Regions while (!RQ.empty()) { CurrentRegion = RQ.back(); skipThisRegion = false; redoThisRegion = false; // Run all passes on the current Region. for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { RegionPass *P = (RegionPass*)getContainedPass(Index); if (isPassDebuggingExecutionsOrMore()) { dumpPassInfo(P, EXECUTION_MSG, ON_REGION_MSG, CurrentRegion->getNameStr()); dumpRequiredSet(P); } initializeAnalysisImpl(P); { PassManagerPrettyStackEntry X(P, *CurrentRegion->getEntry()); TimeRegion PassTimer(getPassTimer(P)); Changed |= P->runOnRegion(CurrentRegion, *this); } if (isPassDebuggingExecutionsOrMore()) { if (Changed) dumpPassInfo(P, MODIFICATION_MSG, ON_REGION_MSG, skipThisRegion ? "<deleted>" : CurrentRegion->getNameStr()); dumpPreservedSet(P); } if (!skipThisRegion) { // Manually check that this region is still healthy. This is done // instead of relying on RegionInfo::verifyRegion since RegionInfo // is a function pass and it's really expensive to verify every // Region in the function every time. That level of checking can be // enabled with the -verify-region-info option. { TimeRegion PassTimer(getPassTimer(P)); CurrentRegion->verifyRegion(); } // Then call the regular verifyAnalysis functions. verifyPreservedAnalysis(P); } removeNotPreservedAnalysis(P); recordAvailableAnalysis(P); removeDeadPasses(P, (!isPassDebuggingExecutionsOrMore() || skipThisRegion) ? "<deleted>" : CurrentRegion->getNameStr(), ON_REGION_MSG); if (skipThisRegion) // Do not run other passes on this region. break; } // If the region was deleted, release all the region passes. This frees up // some memory, and avoids trouble with the pass manager trying to call // verifyAnalysis on them. if (skipThisRegion) for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { Pass *P = getContainedPass(Index); freePass(P, "<deleted>", ON_REGION_MSG); } // Pop the region from queue after running all passes. RQ.pop_back(); if (redoThisRegion) RQ.push_back(CurrentRegion); // Free all region nodes created in region passes. RI->clearNodeCache(); } // Finalization for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { RegionPass *P = (RegionPass*)getContainedPass(Index); Changed |= P->doFinalization(); } // Print the region tree after all pass. DEBUG( dbgs() << "\nRegion tree of function " << F.getName() << " after all region Pass:\n"; RI->dump(); dbgs() << "\n"; ); return Changed; } /// Print passes managed by this manager void RGPassManager::dumpPassStructure(unsigned Offset) { errs().indent(Offset*2) << "Region Pass Manager\n"; for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { Pass *P = getContainedPass(Index); P->dumpPassStructure(Offset + 1); dumpLastUses(P, Offset+1); } } namespace { //===----------------------------------------------------------------------===// // PrintRegionPass class PrintRegionPass : public RegionPass { private: std::string Banner; raw_ostream &Out; // raw_ostream to print on. public: static char ID; PrintRegionPass(const std::string &B, raw_ostream &o) : RegionPass(ID), Banner(B), Out(o) {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } bool runOnRegion(Region *R, RGPassManager &RGM) override { Out << Banner; for (const auto *BB : R->blocks()) { if (BB) BB->print(Out); else Out << "Printing <null> Block"; } return false; } }; char PrintRegionPass::ID = 0; } //end anonymous namespace //===----------------------------------------------------------------------===// // RegionPass // Check if this pass is suitable for the current RGPassManager, if // available. This pass P is not suitable for a RGPassManager if P // is not preserving higher level analysis info used by other // RGPassManager passes. In such case, pop RGPassManager from the // stack. This will force assignPassManager() to create new // LPPassManger as expected. void RegionPass::preparePassManager(PMStack &PMS) { // Find RGPassManager while (!PMS.empty() && PMS.top()->getPassManagerType() > PMT_RegionPassManager) PMS.pop(); // If this pass is destroying high level information that is used // by other passes that are managed by LPM then do not insert // this pass in current LPM. Use new RGPassManager. if (PMS.top()->getPassManagerType() == PMT_RegionPassManager && !PMS.top()->preserveHigherLevelAnalysis(this)) PMS.pop(); } /// Assign pass manager to manage this pass. void RegionPass::assignPassManager(PMStack &PMS, PassManagerType PreferredType) { std::unique_ptr<RegionPass> thisPtr(this); // HLSL Change // Find RGPassManager while (!PMS.empty() && PMS.top()->getPassManagerType() > PMT_RegionPassManager) PMS.pop(); RGPassManager *RGPM; // Create new Region Pass Manager if it does not exist. if (PMS.top()->getPassManagerType() == PMT_RegionPassManager) RGPM = (RGPassManager*)PMS.top(); else { assert (!PMS.empty() && "Unable to create Region Pass Manager"); PMDataManager *PMD = PMS.top(); // [1] Create new Region Pass Manager RGPM = new RGPassManager(); RGPM->populateInheritedAnalysis(PMS); // [2] Set up new manager's top level manager PMTopLevelManager *TPM = PMD->getTopLevelManager(); TPM->addIndirectPassManager(RGPM); // [3] Assign manager to manage this new manager. This may create // and push new managers into PMS TPM->schedulePass(RGPM); // [4] Push new manager into PMS PMS.push(RGPM); } thisPtr.release(); // HLSL Change RGPM->add(this); } /// Get the printer pass Pass *RegionPass::createPrinterPass(raw_ostream &O, const std::string &Banner) const { return new PrintRegionPass(Banner, O); }
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/CallGraphSCCPass.cpp
//===- CallGraphSCCPass.cpp - Pass that operates BU on call graph ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the CallGraphSCCPass class, which is used for passes // which are implemented as bottom-up traversals on the call graph. Because // there may be cycles in the call graph, passes of this type operate on the // call-graph in SCC order: that is, they process function bottom-up, except for // recursive functions, which they process all at once. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/CallGraphSCCPass.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/IR/Function.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManagers.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Timer.h" #include "llvm/Support/TimeProfiler.h" // HLSL Change #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "cgscc-passmgr" #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned> MaxIterations("max-cg-scc-iterations", cl::ReallyHidden, cl::init(4)); #else static const unsigned MaxIterations = 4; #endif STATISTIC(MaxSCCIterations, "Maximum CGSCCPassMgr iterations on one SCC"); //===----------------------------------------------------------------------===// // CGPassManager // /// CGPassManager manages FPPassManagers and CallGraphSCCPasses. namespace { class CGPassManager : public ModulePass, public PMDataManager { public: static char ID; explicit CGPassManager() : ModulePass(ID), PMDataManager() { } /// Execute all of the passes scheduled for execution. Keep track of /// whether any of the passes modifies the module, and if so, return true. bool runOnModule(Module &M) override; using ModulePass::doInitialization; using ModulePass::doFinalization; bool doInitialization(CallGraph &CG); bool doFinalization(CallGraph &CG); /// Pass Manager itself does not invalidate any analysis info. void getAnalysisUsage(AnalysisUsage &Info) const override { // CGPassManager walks SCC and it needs CallGraph. Info.addRequired<CallGraphWrapperPass>(); Info.setPreservesAll(); } StringRef getPassName() const override { return "CallGraph Pass Manager"; } PMDataManager *getAsPMDataManager() override { return this; } Pass *getAsPass() override { return this; } // Print passes managed by this manager void dumpPassStructure(unsigned Offset) override { errs().indent(Offset*2) << "Call Graph SCC Pass Manager\n"; for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) { Pass *P = getContainedPass(Index); P->dumpPassStructure(Offset + 1); dumpLastUses(P, Offset+1); } } Pass *getContainedPass(unsigned N) { assert(N < PassVector.size() && "Pass number out of range!"); return static_cast<Pass *>(PassVector[N]); } PassManagerType getPassManagerType() const override { return PMT_CallGraphPassManager; } private: bool RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG, bool &DevirtualizedCall); bool RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC, CallGraph &CG, bool &CallGraphUpToDate, bool &DevirtualizedCall); bool RefreshCallGraph(CallGraphSCC &CurSCC, CallGraph &CG, bool IsCheckingMode); }; } // end anonymous namespace. char CGPassManager::ID = 0; bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC, CallGraph &CG, bool &CallGraphUpToDate, bool &DevirtualizedCall) { bool Changed = false; PMDataManager *PM = P->getAsPMDataManager(); if (!PM) { CallGraphSCCPass *CGSP = (CallGraphSCCPass*)P; if (!CallGraphUpToDate) { DevirtualizedCall |= RefreshCallGraph(CurSCC, CG, false); CallGraphUpToDate = true; } { // HLSL Change Begin - Support hierarchial time tracing. StringRef FnName = (*CurSCC.begin())->getFunction() ? (*CurSCC.begin())->getFunction()->getName() : "Unnamed"; TimeTraceScope FunctionScope("CGSCCPass-Function", FnName); // HLSL Change End - Support hierarchial time tracing. TimeRegion PassTimer(getPassTimer(CGSP)); Changed = CGSP->runOnSCC(CurSCC); } // After the CGSCCPass is done, when assertions are enabled, use // RefreshCallGraph to verify that the callgraph was correctly updated. #ifndef NDEBUG if (Changed) RefreshCallGraph(CurSCC, CG, true); #endif return Changed; } assert(PM->getPassManagerType() == PMT_FunctionPassManager && "Invalid CGPassManager member"); FPPassManager *FPP = (FPPassManager*)P; // Run pass P on all functions in the current SCC. for (CallGraphNode *CGN : CurSCC) { if (Function *F = CGN->getFunction()) { dumpPassInfo(P, EXECUTION_MSG, ON_FUNCTION_MSG, F->getName()); { // HLSL Change Begin - Support hierarchial time tracing. TimeTraceScope FunctionScope("CGSCCPass-Function", F->getName()); // HLSL Change End - Support hierarchial time tracing. TimeRegion PassTimer(getPassTimer(FPP)); Changed |= FPP->runOnFunction(*F); } F->getContext().yield(); } } // The function pass(es) modified the IR, they may have clobbered the // callgraph. if (Changed && CallGraphUpToDate) { DEBUG(dbgs() << "CGSCCPASSMGR: Pass Dirtied SCC: " << P->getPassName() << '\n'); CallGraphUpToDate = false; } return Changed; } /// Scan the functions in the specified CFG and resync the /// callgraph with the call sites found in it. This is used after /// FunctionPasses have potentially munged the callgraph, and can be used after /// CallGraphSCC passes to verify that they correctly updated the callgraph. /// /// This function returns true if it devirtualized an existing function call, /// meaning it turned an indirect call into a direct call. This happens when /// a function pass like GVN optimizes away stuff feeding the indirect call. /// This never happens in checking mode. /// bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC, CallGraph &CG, bool CheckingMode) { DenseMap<Value*, CallGraphNode*> CallSites; DEBUG(dbgs() << "CGSCCPASSMGR: Refreshing SCC with " << CurSCC.size() << " nodes:\n"; for (CallGraphNode *CGN : CurSCC) CGN->dump(); ); bool MadeChange = false; bool DevirtualizedCall = false; // Scan all functions in the SCC. unsigned FunctionNo = 0; for (CallGraphSCC::iterator SCCIdx = CurSCC.begin(), E = CurSCC.end(); SCCIdx != E; ++SCCIdx, ++FunctionNo) { CallGraphNode *CGN = *SCCIdx; Function *F = CGN->getFunction(); if (!F || F->isDeclaration()) continue; // Walk the function body looking for call sites. Sync up the call sites in // CGN with those actually in the function. // Keep track of the number of direct and indirect calls that were // invalidated and removed. unsigned NumDirectRemoved = 0, NumIndirectRemoved = 0; // Get the set of call sites currently in the function. for (CallGraphNode::iterator I = CGN->begin(), E = CGN->end(); I != E; ) { // If this call site is null, then the function pass deleted the call // entirely and the WeakTrackingVH nulled it out. if (!I->first || // If we've already seen this call site, then the FunctionPass RAUW'd // one call with another, which resulted in two "uses" in the edge // list of the same call. CallSites.count(I->first) || // If the call edge is not from a call or invoke, or it is a // instrinsic call, then the function pass RAUW'd a call with // another value. This can happen when constant folding happens // of well known functions etc. !CallSite(I->first) || (CallSite(I->first).getCalledFunction() && CallSite(I->first).getCalledFunction()->isIntrinsic() && Intrinsic::isLeaf( CallSite(I->first).getCalledFunction()->getIntrinsicID()))) { assert(!CheckingMode && "CallGraphSCCPass did not update the CallGraph correctly!"); // If this was an indirect call site, count it. if (!I->second->getFunction()) ++NumIndirectRemoved; else ++NumDirectRemoved; // Just remove the edge from the set of callees, keep track of whether // I points to the last element of the vector. bool WasLast = I + 1 == E; CGN->removeCallEdge(I); // If I pointed to the last element of the vector, we have to bail out: // iterator checking rejects comparisons of the resultant pointer with // end. if (WasLast) break; E = CGN->end(); continue; } assert(!CallSites.count(I->first) && "Call site occurs in node multiple times"); CallSite CS(I->first); if (CS) { Function *Callee = CS.getCalledFunction(); // Ignore intrinsics because they're not really function calls. if (!Callee || !(Callee->isIntrinsic())) CallSites.insert(std::make_pair(I->first, I->second)); } ++I; } // Loop over all of the instructions in the function, getting the callsites. // Keep track of the number of direct/indirect calls added. unsigned NumDirectAdded = 0, NumIndirectAdded = 0; for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { CallSite CS(cast<Value>(I)); if (!CS) continue; Function *Callee = CS.getCalledFunction(); if (Callee && Callee->isIntrinsic()) continue; // If this call site already existed in the callgraph, just verify it // matches up to expectations and remove it from CallSites. DenseMap<Value*, CallGraphNode*>::iterator ExistingIt = CallSites.find(CS.getInstruction()); if (ExistingIt != CallSites.end()) { CallGraphNode *ExistingNode = ExistingIt->second; // Remove from CallSites since we have now seen it. CallSites.erase(ExistingIt); // Verify that the callee is right. if (ExistingNode->getFunction() == CS.getCalledFunction()) continue; // If we are in checking mode, we are not allowed to actually mutate // the callgraph. If this is a case where we can infer that the // callgraph is less precise than it could be (e.g. an indirect call // site could be turned direct), don't reject it in checking mode, and // don't tweak it to be more precise. if (CheckingMode && CS.getCalledFunction() && ExistingNode->getFunction() == nullptr) continue; assert(!CheckingMode && "CallGraphSCCPass did not update the CallGraph correctly!"); // If not, we either went from a direct call to indirect, indirect to // direct, or direct to different direct. CallGraphNode *CalleeNode; if (Function *Callee = CS.getCalledFunction()) { CalleeNode = CG.getOrInsertFunction(Callee); // Keep track of whether we turned an indirect call into a direct // one. if (!ExistingNode->getFunction()) { DevirtualizedCall = true; DEBUG(dbgs() << " CGSCCPASSMGR: Devirtualized call to '" << Callee->getName() << "'\n"); } } else { CalleeNode = CG.getCallsExternalNode(); } // Update the edge target in CGN. CGN->replaceCallEdge(CS, CS, CalleeNode); MadeChange = true; continue; } assert(!CheckingMode && "CallGraphSCCPass did not update the CallGraph correctly!"); // If the call site didn't exist in the CGN yet, add it. CallGraphNode *CalleeNode; if (Function *Callee = CS.getCalledFunction()) { CalleeNode = CG.getOrInsertFunction(Callee); ++NumDirectAdded; } else { CalleeNode = CG.getCallsExternalNode(); ++NumIndirectAdded; } CGN->addCalledFunction(CS, CalleeNode); MadeChange = true; } // We scanned the old callgraph node, removing invalidated call sites and // then added back newly found call sites. One thing that can happen is // that an old indirect call site was deleted and replaced with a new direct // call. In this case, we have devirtualized a call, and CGSCCPM would like // to iteratively optimize the new code. Unfortunately, we don't really // have a great way to detect when this happens. As an approximation, we // just look at whether the number of indirect calls is reduced and the // number of direct calls is increased. There are tons of ways to fool this // (e.g. DCE'ing an indirect call and duplicating an unrelated block with a // direct call) but this is close enough. if (NumIndirectRemoved > NumIndirectAdded && NumDirectRemoved < NumDirectAdded) DevirtualizedCall = true; // After scanning this function, if we still have entries in callsites, then // they are dangling pointers. WeakTrackingVH should save us for this, so // abort if // this happens. assert(CallSites.empty() && "Dangling pointers found in call sites map"); // Periodically do an explicit clear to remove tombstones when processing // large scc's. if ((FunctionNo & 15) == 15) CallSites.clear(); } DEBUG(if (MadeChange) { dbgs() << "CGSCCPASSMGR: Refreshed SCC is now:\n"; for (CallGraphNode *CGN : CurSCC) CGN->dump(); if (DevirtualizedCall) dbgs() << "CGSCCPASSMGR: Refresh devirtualized a call!\n"; } else { dbgs() << "CGSCCPASSMGR: SCC Refresh didn't change call graph.\n"; } ); (void)MadeChange; return DevirtualizedCall; } /// Execute the body of the entire pass manager on the specified SCC. /// This keeps track of whether a function pass devirtualizes /// any calls and returns it in DevirtualizedCall. bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG, bool &DevirtualizedCall) { bool Changed = false; // Keep track of whether the callgraph is known to be up-to-date or not. // The CGSSC pass manager runs two types of passes: // CallGraphSCC Passes and other random function passes. Because other // random function passes are not CallGraph aware, they may clobber the // call graph by introducing new calls or deleting other ones. This flag // is set to false when we run a function pass so that we know to clean up // the callgraph when we need to run a CGSCCPass again. bool CallGraphUpToDate = true; // Run all passes on current SCC. for (unsigned PassNo = 0, e = getNumContainedPasses(); PassNo != e; ++PassNo) { Pass *P = getContainedPass(PassNo); // HLSL Change Begin - Support hierarchial time tracing. TimeTraceScope PassScope("RunCallGraphSCCPass", P->getPassName()); // HLSL Change End - Support hierarchial time tracing. // If we're in -debug-pass=Executions mode, construct the SCC node list, // otherwise avoid constructing this string as it is expensive. if (isPassDebuggingExecutionsOrMore()) { std::string Functions; #ifndef NDEBUG raw_string_ostream OS(Functions); for (CallGraphSCC::iterator I = CurSCC.begin(), E = CurSCC.end(); I != E; ++I) { if (I != CurSCC.begin()) OS << ", "; (*I)->print(OS); } OS.flush(); #endif dumpPassInfo(P, EXECUTION_MSG, ON_CG_MSG, Functions); } dumpRequiredSet(P); initializeAnalysisImpl(P); // Actually run this pass on the current SCC. Changed |= RunPassOnSCC(P, CurSCC, CG, CallGraphUpToDate, DevirtualizedCall); if (Changed) dumpPassInfo(P, MODIFICATION_MSG, ON_CG_MSG, ""); dumpPreservedSet(P); verifyPreservedAnalysis(P); removeNotPreservedAnalysis(P); recordAvailableAnalysis(P); removeDeadPasses(P, "", ON_CG_MSG); } // If the callgraph was left out of date (because the last pass run was a // functionpass), refresh it before we move on to the next SCC. if (!CallGraphUpToDate) DevirtualizedCall |= RefreshCallGraph(CurSCC, CG, false); return Changed; } /// Execute all of the passes scheduled for execution. Keep track of /// whether any of the passes modifies the module, and if so, return true. bool CGPassManager::runOnModule(Module &M) { CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); bool Changed = doInitialization(CG); // Walk the callgraph in bottom-up SCC order. scc_iterator<CallGraph*> CGI = scc_begin(&CG); CallGraphSCC CurSCC(&CGI); while (!CGI.isAtEnd()) { // Copy the current SCC and increment past it so that the pass can hack // on the SCC if it wants to without invalidating our iterator. const std::vector<CallGraphNode *> &NodeVec = *CGI; CurSCC.initialize(NodeVec.data(), NodeVec.data() + NodeVec.size()); ++CGI; // At the top level, we run all the passes in this pass manager on the // functions in this SCC. However, we support iterative compilation in the // case where a function pass devirtualizes a call to a function. For // example, it is very common for a function pass (often GVN or instcombine) // to eliminate the addressing that feeds into a call. With that improved // information, we would like the call to be an inline candidate, infer // mod-ref information etc. // // Because of this, we allow iteration up to a specified iteration count. // This only happens in the case of a devirtualized call, so we only burn // compile time in the case that we're making progress. We also have a hard // iteration count limit in case there is crazy code. unsigned Iteration = 0; bool DevirtualizedCall = false; do { DEBUG(if (Iteration) dbgs() << " SCCPASSMGR: Re-visiting SCC, iteration #" << Iteration << '\n'); DevirtualizedCall = false; Changed |= RunAllPassesOnSCC(CurSCC, CG, DevirtualizedCall); } while (Iteration++ < MaxIterations && DevirtualizedCall); if (DevirtualizedCall) DEBUG(dbgs() << " CGSCCPASSMGR: Stopped iteration after " << Iteration << " times, due to -max-cg-scc-iterations\n"); if (Iteration > MaxSCCIterations) MaxSCCIterations = Iteration; } Changed |= doFinalization(CG); return Changed; } /// Initialize CG bool CGPassManager::doInitialization(CallGraph &CG) { bool Changed = false; for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) { if (PMDataManager *PM = getContainedPass(i)->getAsPMDataManager()) { assert(PM->getPassManagerType() == PMT_FunctionPassManager && "Invalid CGPassManager member"); Changed |= ((FPPassManager*)PM)->doInitialization(CG.getModule()); } else { Changed |= ((CallGraphSCCPass*)getContainedPass(i))->doInitialization(CG); } } return Changed; } /// Finalize CG bool CGPassManager::doFinalization(CallGraph &CG) { bool Changed = false; for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) { if (PMDataManager *PM = getContainedPass(i)->getAsPMDataManager()) { assert(PM->getPassManagerType() == PMT_FunctionPassManager && "Invalid CGPassManager member"); Changed |= ((FPPassManager*)PM)->doFinalization(CG.getModule()); } else { Changed |= ((CallGraphSCCPass*)getContainedPass(i))->doFinalization(CG); } } return Changed; } //===----------------------------------------------------------------------===// // CallGraphSCC Implementation //===----------------------------------------------------------------------===// /// This informs the SCC and the pass manager that the specified /// Old node has been deleted, and New is to be used in its place. void CallGraphSCC::ReplaceNode(CallGraphNode *Old, CallGraphNode *New) { assert(Old != New && "Should not replace node with self"); for (unsigned i = 0; ; ++i) { assert(i != Nodes.size() && "Node not in SCC"); if (Nodes[i] != Old) continue; Nodes[i] = New; break; } // Update the active scc_iterator so that it doesn't contain dangling // pointers to the old CallGraphNode. scc_iterator<CallGraph*> *CGI = (scc_iterator<CallGraph*>*)Context; CGI->ReplaceNode(Old, New); } //===----------------------------------------------------------------------===// // CallGraphSCCPass Implementation //===----------------------------------------------------------------------===// /// Assign pass manager to manage this pass. void CallGraphSCCPass::assignPassManager(PMStack &PMS, PassManagerType PreferredType) { std::unique_ptr<CallGraphSCCPass> thisPtr(this); // HLSL Change // Find CGPassManager while (!PMS.empty() && PMS.top()->getPassManagerType() > PMT_CallGraphPassManager) PMS.pop(); assert(!PMS.empty() && "Unable to handle Call Graph Pass"); CGPassManager *CGP; if (PMS.top()->getPassManagerType() == PMT_CallGraphPassManager) CGP = (CGPassManager*)PMS.top(); else { // Create new Call Graph SCC Pass Manager if it does not exist. assert(!PMS.empty() && "Unable to create Call Graph Pass Manager"); PMDataManager *PMD = PMS.top(); // [1] Create new Call Graph Pass Manager CGP = new CGPassManager(); // [2] Set up new manager's top level manager PMTopLevelManager *TPM = PMD->getTopLevelManager(); TPM->addIndirectPassManager(CGP); // [3] Assign manager to manage this new manager. This may create // and push new managers into PMS Pass *P = CGP; TPM->schedulePass(P); // [4] Push new manager into PMS PMS.push(CGP); } thisPtr.release(); CGP->add(this); } /// For this class, we declare that we require and preserve the call graph. /// If the derived class implements this method, it should /// always explicitly call the implementation here. void CallGraphSCCPass::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<CallGraphWrapperPass>(); AU.addPreserved<CallGraphWrapperPass>(); } //===----------------------------------------------------------------------===// // PrintCallGraphPass Implementation //===----------------------------------------------------------------------===// namespace { /// PrintCallGraphPass - Print a Module corresponding to a call graph. /// class PrintCallGraphPass : public CallGraphSCCPass { std::string Banner; raw_ostream &Out; // raw_ostream to print on. public: static char ID; PrintCallGraphPass(const std::string &B, raw_ostream &o) : CallGraphSCCPass(ID), Banner(B), Out(o) {} void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } bool runOnSCC(CallGraphSCC &SCC) override { Out << Banner; for (CallGraphNode *CGN : SCC) { if (CGN->getFunction()) CGN->getFunction()->print(Out); else Out << "\nPrinting <null> Function\n"; } return false; } }; } // end anonymous namespace. char PrintCallGraphPass::ID = 0; Pass *CallGraphSCCPass::createPrinterPass(raw_ostream &O, const std::string &Banner) const { return new PrintCallGraphPass(Banner, O); }
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/GlobalsModRef.cpp
//===- GlobalsModRef.cpp - Simple Mod/Ref Analysis for Globals ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This simple pass provides alias and mod/ref information for global values // that do not have their address taken, and keeps track of whether functions // read or write memory (are "pure"). For this simple (but very common) case, // we can provide pretty accurate and useful information. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/Passes.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include <set> using namespace llvm; #define DEBUG_TYPE "globalsmodref-aa" STATISTIC(NumNonAddrTakenGlobalVars, "Number of global vars without address taken"); STATISTIC(NumNonAddrTakenFunctions,"Number of functions without address taken"); STATISTIC(NumNoMemFunctions, "Number of functions that do not access memory"); STATISTIC(NumReadMemFunctions, "Number of functions that only read memory"); STATISTIC(NumIndirectGlobalVars, "Number of indirect global objects"); namespace { /// FunctionRecord - One instance of this structure is stored for every /// function in the program. Later, the entries for these functions are /// removed if the function is found to call an external function (in which /// case we know nothing about it. struct FunctionRecord { /// GlobalInfo - Maintain mod/ref info for all of the globals without /// addresses taken that are read or written (transitively) by this /// function. std::map<const GlobalValue *, unsigned> GlobalInfo; /// MayReadAnyGlobal - May read global variables, but it is not known which. bool MayReadAnyGlobal; unsigned getInfoForGlobal(const GlobalValue *GV) const { unsigned Effect = MayReadAnyGlobal ? AliasAnalysis::Ref : 0; std::map<const GlobalValue *, unsigned>::const_iterator I = GlobalInfo.find(GV); if (I != GlobalInfo.end()) Effect |= I->second; return Effect; } /// FunctionEffect - Capture whether or not this function reads or writes to /// ANY memory. If not, we can do a lot of aggressive analysis on it. unsigned FunctionEffect; FunctionRecord() : MayReadAnyGlobal(false), FunctionEffect(0) {} }; /// GlobalsModRef - The actual analysis pass. class GlobalsModRef : public ModulePass, public AliasAnalysis { /// NonAddressTakenGlobals - The globals that do not have their addresses /// taken. std::set<const GlobalValue *> NonAddressTakenGlobals; /// IndirectGlobals - The memory pointed to by this global is known to be /// 'owned' by the global. std::set<const GlobalValue *> IndirectGlobals; /// AllocsForIndirectGlobals - If an instruction allocates memory for an /// indirect global, this map indicates which one. std::map<const Value *, const GlobalValue *> AllocsForIndirectGlobals; /// FunctionInfo - For each function, keep track of what globals are /// modified or read. std::map<const Function *, FunctionRecord> FunctionInfo; public: static char ID; GlobalsModRef() : ModulePass(ID) { initializeGlobalsModRefPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override { InitializeAliasAnalysis(this, &M.getDataLayout()); // Find non-addr taken globals. AnalyzeGlobals(M); // Propagate on CG. AnalyzeCallGraph(getAnalysis<CallGraphWrapperPass>().getCallGraph(), M); return false; } void getAnalysisUsage(AnalysisUsage &AU) const override { AliasAnalysis::getAnalysisUsage(AU); AU.addRequired<CallGraphWrapperPass>(); AU.setPreservesAll(); // Does not transform code } //------------------------------------------------ // Implement the AliasAnalysis API // AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) override; ModRefResult getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) override; ModRefResult getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) override { return AliasAnalysis::getModRefInfo(CS1, CS2); } /// getModRefBehavior - Return the behavior of the specified function if /// called from the specified call site. The call site may be null in which /// case the most generic behavior of this function should be returned. ModRefBehavior getModRefBehavior(const Function *F) override { ModRefBehavior Min = UnknownModRefBehavior; if (FunctionRecord *FR = getFunctionInfo(F)) { if (FR->FunctionEffect == 0) Min = DoesNotAccessMemory; else if ((FR->FunctionEffect & Mod) == 0) Min = OnlyReadsMemory; } return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); } /// getModRefBehavior - Return the behavior of the specified function if /// called from the specified call site. The call site may be null in which /// case the most generic behavior of this function should be returned. ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override { ModRefBehavior Min = UnknownModRefBehavior; if (const Function *F = CS.getCalledFunction()) if (FunctionRecord *FR = getFunctionInfo(F)) { if (FR->FunctionEffect == 0) Min = DoesNotAccessMemory; else if ((FR->FunctionEffect & Mod) == 0) Min = OnlyReadsMemory; } return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); } void deleteValue(Value *V) override; void addEscapingUse(Use &U) override; /// getAdjustedAnalysisPointer - This method is used when a pass implements /// an analysis interface through multiple inheritance. If needed, it /// should override this to adjust the this pointer as needed for the /// specified pass info. void *getAdjustedAnalysisPointer(AnalysisID PI) override { if (PI == &AliasAnalysis::ID) return (AliasAnalysis *)this; return this; } private: /// getFunctionInfo - Return the function info for the function, or null if /// we don't have anything useful to say about it. FunctionRecord *getFunctionInfo(const Function *F) { std::map<const Function *, FunctionRecord>::iterator I = FunctionInfo.find(F); if (I != FunctionInfo.end()) return &I->second; return nullptr; } void AnalyzeGlobals(Module &M); void AnalyzeCallGraph(CallGraph &CG, Module &M); bool AnalyzeUsesOfPointer(Value *V, std::vector<Function *> &Readers, std::vector<Function *> &Writers, GlobalValue *OkayStoreDest = nullptr); bool AnalyzeIndirectGlobalMemory(GlobalValue *GV); }; } char GlobalsModRef::ID = 0; INITIALIZE_AG_PASS_BEGIN(GlobalsModRef, AliasAnalysis, "globalsmodref-aa", "Simple mod/ref analysis for globals", false, true, false) INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) INITIALIZE_AG_PASS_END(GlobalsModRef, AliasAnalysis, "globalsmodref-aa", "Simple mod/ref analysis for globals", false, true, false) Pass *llvm::createGlobalsModRefPass() { return new GlobalsModRef(); } /// AnalyzeGlobals - Scan through the users of all of the internal /// GlobalValue's in the program. If none of them have their "address taken" /// (really, their address passed to something nontrivial), record this fact, /// and record the functions that they are used directly in. void GlobalsModRef::AnalyzeGlobals(Module &M) { std::vector<Function *> Readers, Writers; for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (I->hasLocalLinkage()) { if (!AnalyzeUsesOfPointer(I, Readers, Writers)) { // Remember that we are tracking this global. NonAddressTakenGlobals.insert(I); ++NumNonAddrTakenFunctions; } Readers.clear(); Writers.clear(); } for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) if (I->hasLocalLinkage()) { if (!AnalyzeUsesOfPointer(I, Readers, Writers)) { // Remember that we are tracking this global, and the mod/ref fns NonAddressTakenGlobals.insert(I); for (unsigned i = 0, e = Readers.size(); i != e; ++i) FunctionInfo[Readers[i]].GlobalInfo[I] |= Ref; if (!I->isConstant()) // No need to keep track of writers to constants for (unsigned i = 0, e = Writers.size(); i != e; ++i) FunctionInfo[Writers[i]].GlobalInfo[I] |= Mod; ++NumNonAddrTakenGlobalVars; // If this global holds a pointer type, see if it is an indirect global. if (I->getType()->getElementType()->isPointerTy() && AnalyzeIndirectGlobalMemory(I)) ++NumIndirectGlobalVars; } Readers.clear(); Writers.clear(); } } /// AnalyzeUsesOfPointer - Look at all of the users of the specified pointer. /// If this is used by anything complex (i.e., the address escapes), return /// true. Also, while we are at it, keep track of those functions that read and /// write to the value. /// /// If OkayStoreDest is non-null, stores into this global are allowed. bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V, std::vector<Function *> &Readers, std::vector<Function *> &Writers, GlobalValue *OkayStoreDest) { if (!V->getType()->isPointerTy()) return true; for (Use &U : V->uses()) { User *I = U.getUser(); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { Readers.push_back(LI->getParent()->getParent()); } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { if (V == SI->getOperand(1)) { Writers.push_back(SI->getParent()->getParent()); } else if (SI->getOperand(1) != OkayStoreDest) { return true; // Storing the pointer } } else if (Operator::getOpcode(I) == Instruction::GetElementPtr) { if (AnalyzeUsesOfPointer(I, Readers, Writers)) return true; } else if (Operator::getOpcode(I) == Instruction::BitCast) { if (AnalyzeUsesOfPointer(I, Readers, Writers, OkayStoreDest)) return true; } else if (auto CS = CallSite(I)) { // Make sure that this is just the function being called, not that it is // passing into the function. if (!CS.isCallee(&U)) { // Detect calls to free. if (isFreeCall(I, TLI)) Writers.push_back(CS->getParent()->getParent()); else return true; // Argument of an unknown call. } } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) { if (!isa<ConstantPointerNull>(ICI->getOperand(1))) return true; // Allow comparison against null. } else { return true; } } return false; } /// AnalyzeIndirectGlobalMemory - We found an non-address-taken global variable /// which holds a pointer type. See if the global always points to non-aliased /// heap memory: that is, all initializers of the globals are allocations, and /// those allocations have no use other than initialization of the global. /// Further, all loads out of GV must directly use the memory, not store the /// pointer somewhere. If this is true, we consider the memory pointed to by /// GV to be owned by GV and can disambiguate other pointers from it. bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) { // Keep track of values related to the allocation of the memory, f.e. the // value produced by the malloc call and any casts. std::vector<Value *> AllocRelatedValues; // Walk the user list of the global. If we find anything other than a direct // load or store, bail out. for (User *U : GV->users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { // The pointer loaded from the global can only be used in simple ways: // we allow addressing of it and loading storing to it. We do *not* allow // storing the loaded pointer somewhere else or passing to a function. std::vector<Function *> ReadersWriters; if (AnalyzeUsesOfPointer(LI, ReadersWriters, ReadersWriters)) return false; // Loaded pointer escapes. // TODO: Could try some IP mod/ref of the loaded pointer. } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { // Storing the global itself. if (SI->getOperand(0) == GV) return false; // If storing the null pointer, ignore it. if (isa<ConstantPointerNull>(SI->getOperand(0))) continue; // Check the value being stored. Value *Ptr = GetUnderlyingObject(SI->getOperand(0), GV->getParent()->getDataLayout()); if (!isAllocLikeFn(Ptr, TLI)) return false; // Too hard to analyze. // Analyze all uses of the allocation. If any of them are used in a // non-simple way (e.g. stored to another global) bail out. std::vector<Function *> ReadersWriters; if (AnalyzeUsesOfPointer(Ptr, ReadersWriters, ReadersWriters, GV)) return false; // Loaded pointer escapes. // Remember that this allocation is related to the indirect global. AllocRelatedValues.push_back(Ptr); } else { // Something complex, bail out. return false; } } // Okay, this is an indirect global. Remember all of the allocations for // this global in AllocsForIndirectGlobals. while (!AllocRelatedValues.empty()) { AllocsForIndirectGlobals[AllocRelatedValues.back()] = GV; AllocRelatedValues.pop_back(); } IndirectGlobals.insert(GV); return true; } /// AnalyzeCallGraph - At this point, we know the functions where globals are /// immediately stored to and read from. Propagate this information up the call /// graph to all callers and compute the mod/ref info for all memory for each /// function. void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) { // We do a bottom-up SCC traversal of the call graph. In other words, we // visit all callees before callers (leaf-first). for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) { const std::vector<CallGraphNode *> &SCC = *I; assert(!SCC.empty() && "SCC with no functions?"); if (!SCC[0]->getFunction()) { // Calls externally - can't say anything useful. Remove any existing // function records (may have been created when scanning globals). for (unsigned i = 0, e = SCC.size(); i != e; ++i) FunctionInfo.erase(SCC[i]->getFunction()); continue; } FunctionRecord &FR = FunctionInfo[SCC[0]->getFunction()]; bool KnowNothing = false; unsigned FunctionEffect = 0; // Collect the mod/ref properties due to called functions. We only compute // one mod-ref set. for (unsigned i = 0, e = SCC.size(); i != e && !KnowNothing; ++i) { Function *F = SCC[i]->getFunction(); if (!F) { KnowNothing = true; break; } if (F->isDeclaration()) { // Try to get mod/ref behaviour from function attributes. if (F->doesNotAccessMemory()) { // Can't do better than that! } else if (F->onlyReadsMemory()) { FunctionEffect |= Ref; if (!F->isIntrinsic()) // This function might call back into the module and read a global - // consider every global as possibly being read by this function. FR.MayReadAnyGlobal = true; } else { FunctionEffect |= ModRef; // Can't say anything useful unless it's an intrinsic - they don't // read or write global variables of the kind considered here. KnowNothing = !F->isIntrinsic(); } continue; } for (CallGraphNode::iterator CI = SCC[i]->begin(), E = SCC[i]->end(); CI != E && !KnowNothing; ++CI) if (Function *Callee = CI->second->getFunction()) { if (FunctionRecord *CalleeFR = getFunctionInfo(Callee)) { // Propagate function effect up. FunctionEffect |= CalleeFR->FunctionEffect; // Incorporate callee's effects on globals into our info. for (const auto &G : CalleeFR->GlobalInfo) FR.GlobalInfo[G.first] |= G.second; FR.MayReadAnyGlobal |= CalleeFR->MayReadAnyGlobal; } else { // Can't say anything about it. However, if it is inside our SCC, // then nothing needs to be done. CallGraphNode *CalleeNode = CG[Callee]; if (std::find(SCC.begin(), SCC.end(), CalleeNode) == SCC.end()) KnowNothing = true; } } else { KnowNothing = true; } } // If we can't say anything useful about this SCC, remove all SCC functions // from the FunctionInfo map. if (KnowNothing) { for (unsigned i = 0, e = SCC.size(); i != e; ++i) FunctionInfo.erase(SCC[i]->getFunction()); continue; } // Scan the function bodies for explicit loads or stores. for (auto *Node : SCC) { if (FunctionEffect == ModRef) break; // The mod/ref lattice saturates here. for (Instruction &I : inst_range(Node->getFunction())) { if (FunctionEffect == ModRef) break; // The mod/ref lattice saturates here. // We handle calls specially because the graph-relevant aspects are // handled above. if (auto CS = CallSite(&I)) { if (isAllocationFn(&I, TLI) || isFreeCall(&I, TLI)) { // FIXME: It is completely unclear why this is necessary and not // handled by the above graph code. FunctionEffect |= ModRef; } else if (Function *Callee = CS.getCalledFunction()) { // The callgraph doesn't include intrinsic calls. if (Callee->isIntrinsic()) { ModRefBehavior Behaviour = AliasAnalysis::getModRefBehavior(Callee); FunctionEffect |= (Behaviour & ModRef); } } continue; } // All non-call instructions we use the primary predicates for whether // thay read or write memory. if (I.mayReadFromMemory()) FunctionEffect |= Ref; if (I.mayWriteToMemory()) FunctionEffect |= Mod; } } if ((FunctionEffect & Mod) == 0) ++NumReadMemFunctions; if (FunctionEffect == 0) ++NumNoMemFunctions; FR.FunctionEffect = FunctionEffect; // Finally, now that we know the full effect on this SCC, clone the // information to each function in the SCC. for (unsigned i = 1, e = SCC.size(); i != e; ++i) FunctionInfo[SCC[i]->getFunction()] = FR; } } /// alias - If one of the pointers is to a global that we are tracking, and the /// other is some random pointer, we know there cannot be an alias, because the /// address of the global isn't taken. AliasResult GlobalsModRef::alias(const MemoryLocation &LocA, const MemoryLocation &LocB) { // Get the base object these pointers point to. const Value *UV1 = GetUnderlyingObject(LocA.Ptr, *DL); const Value *UV2 = GetUnderlyingObject(LocB.Ptr, *DL); // If either of the underlying values is a global, they may be non-addr-taken // globals, which we can answer queries about. const GlobalValue *GV1 = dyn_cast<GlobalValue>(UV1); const GlobalValue *GV2 = dyn_cast<GlobalValue>(UV2); if (GV1 || GV2) { // If the global's address is taken, pretend we don't know it's a pointer to // the global. if (GV1 && !NonAddressTakenGlobals.count(GV1)) GV1 = nullptr; if (GV2 && !NonAddressTakenGlobals.count(GV2)) GV2 = nullptr; // If the two pointers are derived from two different non-addr-taken // globals, or if one is and the other isn't, we know these can't alias. if ((GV1 || GV2) && GV1 != GV2) return NoAlias; // Otherwise if they are both derived from the same addr-taken global, we // can't know the two accesses don't overlap. } // These pointers may be based on the memory owned by an indirect global. If // so, we may be able to handle this. First check to see if the base pointer // is a direct load from an indirect global. GV1 = GV2 = nullptr; if (const LoadInst *LI = dyn_cast<LoadInst>(UV1)) if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0))) if (IndirectGlobals.count(GV)) GV1 = GV; if (const LoadInst *LI = dyn_cast<LoadInst>(UV2)) if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0))) if (IndirectGlobals.count(GV)) GV2 = GV; // These pointers may also be from an allocation for the indirect global. If // so, also handle them. if (AllocsForIndirectGlobals.count(UV1)) GV1 = AllocsForIndirectGlobals[UV1]; if (AllocsForIndirectGlobals.count(UV2)) GV2 = AllocsForIndirectGlobals[UV2]; // Now that we know whether the two pointers are related to indirect globals, // use this to disambiguate the pointers. If either pointer is based on an // indirect global and if they are not both based on the same indirect global, // they cannot alias. if ((GV1 || GV2) && GV1 != GV2) return NoAlias; return AliasAnalysis::alias(LocA, LocB); } AliasAnalysis::ModRefResult GlobalsModRef::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { unsigned Known = ModRef; // If we are asking for mod/ref info of a direct call with a pointer to a // global we are tracking, return information if we have it. const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); if (const GlobalValue *GV = dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr, DL))) if (GV->hasLocalLinkage()) if (const Function *F = CS.getCalledFunction()) if (NonAddressTakenGlobals.count(GV)) if (const FunctionRecord *FR = getFunctionInfo(F)) Known = FR->getInfoForGlobal(GV); if (Known == NoModRef) return NoModRef; // No need to query other mod/ref analyses return ModRefResult(Known & AliasAnalysis::getModRefInfo(CS, Loc)); } //===----------------------------------------------------------------------===// // Methods to update the analysis as a result of the client transformation. // void GlobalsModRef::deleteValue(Value *V) { if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { if (NonAddressTakenGlobals.erase(GV)) { // This global might be an indirect global. If so, remove it and remove // any AllocRelatedValues for it. if (IndirectGlobals.erase(GV)) { // Remove any entries in AllocsForIndirectGlobals for this global. for (std::map<const Value *, const GlobalValue *>::iterator I = AllocsForIndirectGlobals.begin(), E = AllocsForIndirectGlobals.end(); I != E;) { if (I->second == GV) { AllocsForIndirectGlobals.erase(I++); } else { ++I; } } } } } // Otherwise, if this is an allocation related to an indirect global, remove // it. AllocsForIndirectGlobals.erase(V); AliasAnalysis::deleteValue(V); } void GlobalsModRef::addEscapingUse(Use &U) { // For the purposes of this analysis, it is conservatively correct to treat // a newly escaping value equivalently to a deleted one. We could perhaps // be more precise by processing the new use and attempting to update our // saved analysis results to accommodate it. deleteValue(U); AliasAnalysis::addEscapingUse(U); }
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/CMakeLists.txt
add_llvm_library(LLVMipa CallGraph.cpp CallGraphSCCPass.cpp CallPrinter.cpp GlobalsModRef.cpp IPA.cpp InlineCost.cpp ) add_dependencies(LLVMipa intrinsics_gen)
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/CallGraph.cpp
//===- CallGraph.cpp - Build a Module's call graph ------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/CallGraph.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "dxc/HLSL/HLModule.h" // HLSL Change using namespace llvm; //===----------------------------------------------------------------------===// // Implementations of the CallGraph class methods. // CallGraph::CallGraph(Module &M) : M(M), ExternalCallingNode(getOrInsertFunction(nullptr)), CallsExternalNode(llvm::make_unique<CallGraphNode>(nullptr)) { // Add every function to the call graph. for (Function &F : M) addToCallGraph(&F); } CallGraph::CallGraph(CallGraph &&Arg) : M(Arg.M), FunctionMap(std::move(Arg.FunctionMap)), ExternalCallingNode(Arg.ExternalCallingNode), CallsExternalNode(std::move(Arg.CallsExternalNode)) { Arg.FunctionMap.clear(); Arg.ExternalCallingNode = nullptr; } CallGraph::~CallGraph() { // CallsExternalNode is not in the function map, delete it explicitly. if (CallsExternalNode) CallsExternalNode->allReferencesDropped(); // Reset all node's use counts to zero before deleting them to prevent an // assertion from firing. #ifndef NDEBUG for (auto &I : FunctionMap) I.second->allReferencesDropped(); #endif } void CallGraph::addToCallGraph(Function *F) { CallGraphNode *Node = getOrInsertFunction(F); // If this function has external linkage or has its address taken, anything // could call it. if (!F->hasLocalLinkage() || F->hasAddressTaken()) ExternalCallingNode->addCalledFunction(CallSite(), Node); // If this function is not defined in this translation unit, it could call // anything. if (F->isDeclaration() && !F->isIntrinsic()) Node->addCalledFunction(CallSite(), CallsExternalNode.get()); // Look for calls by this function. for (BasicBlock &BB : *F) for (Instruction &I : BB) { if (auto CS = CallSite(&I)) { const Function *Callee = CS.getCalledFunction(); if (!Callee || !Intrinsic::isLeaf(Callee->getIntrinsicID())) // Indirect calls of intrinsics are not allowed so no need to check. // We can be more precise here by using TargetArg returned by // Intrinsic::isLeaf. Node->addCalledFunction(CS, CallsExternalNode.get()); else if (!Callee->isIntrinsic()) Node->addCalledFunction(CS, getOrInsertFunction(Callee)); } } } void CallGraph::print(raw_ostream &OS) const { // Print in a deterministic order by sorting CallGraphNodes by name. We do // this here to avoid slowing down the non-printing fast path. SmallVector<CallGraphNode *, 16> Nodes; Nodes.reserve(FunctionMap.size()); for (const auto &I : *this) Nodes.push_back(I.second.get()); std::sort(Nodes.begin(), Nodes.end(), [](CallGraphNode *LHS, CallGraphNode *RHS) { if (Function *LF = LHS->getFunction()) if (Function *RF = RHS->getFunction()) return LF->getName() < RF->getName(); return RHS->getFunction() != nullptr; }); for (CallGraphNode *CN : Nodes) CN->print(OS); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void CallGraph::dump() const { print(dbgs()); } #endif // removeFunctionFromModule - Unlink the function from this module, returning // it. Because this removes the function from the module, the call graph node // is destroyed. This is only valid if the function does not call any other // functions (ie, there are no edges in it's CGN). The easiest way to do this // is to dropAllReferences before calling this. // Function *CallGraph::removeFunctionFromModule(CallGraphNode *CGN) { assert(CGN->empty() && "Cannot remove function from call " "graph if it references other functions!"); Function *F = CGN->getFunction(); // Get the function for the call graph node FunctionMap.erase(F); // Remove the call graph node from the map M.CallRemoveGlobalHook(F); // HLSL Change M.getFunctionList().remove(F); return F; } /// spliceFunction - Replace the function represented by this node by another. /// This does not rescan the body of the function, so it is suitable when /// splicing the body of the old function to the new while also updating all /// callers from old to new. /// void CallGraph::spliceFunction(const Function *From, const Function *To) { assert(FunctionMap.count(From) && "No CallGraphNode for function!"); assert(!FunctionMap.count(To) && "Pointing CallGraphNode at a function that already exists"); FunctionMapTy::iterator I = FunctionMap.find(From); I->second->F = const_cast<Function*>(To); FunctionMap[To] = std::move(I->second); FunctionMap.erase(I); } // getOrInsertFunction - This method is identical to calling operator[], but // it will insert a new CallGraphNode for the specified function if one does // not already exist. CallGraphNode *CallGraph::getOrInsertFunction(const Function *F) { auto &CGN = FunctionMap[F]; if (CGN) return CGN.get(); assert((!F || F->getParent() == &M) && "Function not in current module!"); CGN = llvm::make_unique<CallGraphNode>(const_cast<Function *>(F)); return CGN.get(); } //===----------------------------------------------------------------------===// // Implementations of the CallGraphNode class methods. // void CallGraphNode::print(raw_ostream &OS) const { if (Function *F = getFunction()) OS << "Call graph node for function: '" << F->getName() << "'"; else OS << "Call graph node <<null function>>"; OS << "<<" << this << ">> #uses=" << getNumReferences() << '\n'; for (const_iterator I = begin(), E = end(); I != E; ++I) { OS << " CS<" << I->first << "> calls "; if (Function *FI = I->second->getFunction()) OS << "function '" << FI->getName() <<"'\n"; else OS << "external node\n"; } OS << '\n'; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void CallGraphNode::dump() const { print(dbgs()); } #endif /// removeCallEdgeFor - This method removes the edge in the node for the /// specified call site. Note that this method takes linear time, so it /// should be used sparingly. void CallGraphNode::removeCallEdgeFor(CallSite CS) { for (CalledFunctionsVector::iterator I = CalledFunctions.begin(); ; ++I) { assert(I != CalledFunctions.end() && "Cannot find callsite to remove!"); if (I->first == CS.getInstruction()) { I->second->DropRef(); *I = CalledFunctions.back(); CalledFunctions.pop_back(); return; } } } // removeAnyCallEdgeTo - This method removes any call edges from this node to // the specified callee function. This takes more time to execute than // removeCallEdgeTo, so it should not be used unless necessary. void CallGraphNode::removeAnyCallEdgeTo(CallGraphNode *Callee) { for (unsigned i = 0, e = CalledFunctions.size(); i != e; ++i) if (CalledFunctions[i].second == Callee) { Callee->DropRef(); CalledFunctions[i] = CalledFunctions.back(); CalledFunctions.pop_back(); --i; --e; } } /// removeOneAbstractEdgeTo - Remove one edge associated with a null callsite /// from this node to the specified callee function. void CallGraphNode::removeOneAbstractEdgeTo(CallGraphNode *Callee) { for (CalledFunctionsVector::iterator I = CalledFunctions.begin(); ; ++I) { assert(I != CalledFunctions.end() && "Cannot find callee to remove!"); CallRecord &CR = *I; if (CR.second == Callee && CR.first == nullptr) { Callee->DropRef(); *I = CalledFunctions.back(); CalledFunctions.pop_back(); return; } } } /// replaceCallEdge - This method replaces the edge in the node for the /// specified call site with a new one. Note that this method takes linear /// time, so it should be used sparingly. void CallGraphNode::replaceCallEdge(CallSite CS, CallSite NewCS, CallGraphNode *NewNode){ for (CalledFunctionsVector::iterator I = CalledFunctions.begin(); ; ++I) { assert(I != CalledFunctions.end() && "Cannot find callsite to remove!"); if (I->first == CS.getInstruction()) { I->second->DropRef(); I->first = NewCS.getInstruction(); I->second = NewNode; NewNode->AddRef(); return; } } } //===----------------------------------------------------------------------===// // Out-of-line definitions of CallGraphAnalysis class members. // char CallGraphAnalysis::PassID; //===----------------------------------------------------------------------===// // Implementations of the CallGraphWrapperPass class methods. // CallGraphWrapperPass::CallGraphWrapperPass() : ModulePass(ID) { initializeCallGraphWrapperPassPass(*PassRegistry::getPassRegistry()); } CallGraphWrapperPass::~CallGraphWrapperPass() {} void CallGraphWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); } bool CallGraphWrapperPass::runOnModule(Module &M) { // All the real work is done in the constructor for the CallGraph. G.reset(new CallGraph(M)); return false; } INITIALIZE_PASS(CallGraphWrapperPass, "basiccg", "CallGraph Construction", false, true) char CallGraphWrapperPass::ID = 0; void CallGraphWrapperPass::releaseMemory() { G.reset(); } void CallGraphWrapperPass::print(raw_ostream &OS, const Module *) const { if (!G) { OS << "No call graph has been built!\n"; return; } // Just delegate. G->print(OS); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void CallGraphWrapperPass::dump() const { print(dbgs(), nullptr); } #endif
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/LLVMBuild.txt
;===- ./lib/Analysis/IPA/LLVMBuild.txt -------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = IPA parent = Libraries library_name = ipa required_libraries = Analysis Core Support
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/CallPrinter.cpp
//===- CallPrinter.cpp - DOT printer for call graph -----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines '-dot-callgraph', which emit a callgraph.<fnname>.dot // containing the call graph of a module. // // There is also a pass available to directly call dotty ('-view-callgraph'). // //===----------------------------------------------------------------------===// #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallPrinter.h" #include "llvm/Analysis/DOTGraphTraitsPass.h" using namespace llvm; namespace llvm { template <> struct DOTGraphTraits<CallGraph *> : public DefaultDOTGraphTraits { DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} static std::string getGraphName(CallGraph *Graph) { return "Call graph"; } std::string getNodeLabel(CallGraphNode *Node, CallGraph *Graph) { if (Function *Func = Node->getFunction()) return Func->getName(); return "external node"; } }; struct AnalysisCallGraphWrapperPassTraits { static CallGraph *getGraph(CallGraphWrapperPass *P) { return &P->getCallGraph(); } }; } // end llvm namespace namespace { struct CallGraphViewer : public DOTGraphTraitsModuleViewer<CallGraphWrapperPass, true, CallGraph *, AnalysisCallGraphWrapperPassTraits> { static char ID; CallGraphViewer() : DOTGraphTraitsModuleViewer<CallGraphWrapperPass, true, CallGraph *, AnalysisCallGraphWrapperPassTraits>( "callgraph", ID) { initializeCallGraphViewerPass(*PassRegistry::getPassRegistry()); } }; struct CallGraphPrinter : public DOTGraphTraitsModulePrinter< CallGraphWrapperPass, true, CallGraph *, AnalysisCallGraphWrapperPassTraits> { static char ID; CallGraphPrinter() : DOTGraphTraitsModulePrinter<CallGraphWrapperPass, true, CallGraph *, AnalysisCallGraphWrapperPassTraits>( "callgraph", ID) { initializeCallGraphPrinterPass(*PassRegistry::getPassRegistry()); } }; } // end anonymous namespace char CallGraphViewer::ID = 0; INITIALIZE_PASS(CallGraphViewer, "view-callgraph", "View call graph", false, false) char CallGraphPrinter::ID = 0; INITIALIZE_PASS(CallGraphPrinter, "dot-callgraph", "Print call graph to 'dot' file", false, false) // Create methods available outside of this file, to use them // "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by // the link time optimization. ModulePass *llvm::createCallGraphViewerPass() { return new CallGraphViewer(); } ModulePass *llvm::createCallGraphPrinterPass() { return new CallGraphPrinter(); }
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/IPA.cpp
//===-- IPA.cpp -----------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the common initialization routines for the IPA library. // //===----------------------------------------------------------------------===// #include "llvm/InitializePasses.h" #include "llvm-c/Initialization.h" #include "llvm/PassRegistry.h" using namespace llvm; /// initializeIPA - Initialize all passes linked into the IPA library. void llvm::initializeIPA(PassRegistry &Registry) { initializeCallGraphWrapperPassPass(Registry); initializeCallGraphPrinterPass(Registry); initializeCallGraphViewerPass(Registry); initializeGlobalsModRefPass(Registry); } void LLVMInitializeIPA(LLVMPassRegistryRef R) { initializeIPA(*unwrap(R)); }
0
repos/DirectXShaderCompiler/lib/Analysis
repos/DirectXShaderCompiler/lib/Analysis/IPA/InlineCost.cpp
//===- InlineCost.cpp - Cost analysis for inliner -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements inline cost analysis. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/InlineCost.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CodeMetrics.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Operator.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "inline-cost" STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); namespace { class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { typedef InstVisitor<CallAnalyzer, bool> Base; friend class InstVisitor<CallAnalyzer, bool>; /// The TargetTransformInfo available for this compilation. const TargetTransformInfo &TTI; /// The cache of @llvm.assume intrinsics. AssumptionCacheTracker *ACT; // The called function. Function &F; // The candidate callsite being analyzed. Please do not use this to do // analysis in the caller function; we want the inline cost query to be // easily cacheable. Instead, use the cover function paramHasAttr. CallSite CandidateCS; int Threshold; int Cost; bool IsCallerRecursive; bool IsRecursiveCall; bool ExposesReturnsTwice; bool HasDynamicAlloca; bool ContainsNoDuplicateCall; bool HasReturn; bool HasIndirectBr; bool HasFrameEscape; /// Number of bytes allocated statically by the callee. uint64_t AllocatedSize; unsigned NumInstructions, NumVectorInstructions; int FiftyPercentVectorBonus, TenPercentVectorBonus; int VectorBonus; // While we walk the potentially-inlined instructions, we build up and // maintain a mapping of simplified values specific to this callsite. The // idea is to propagate any special information we have about arguments to // this call through the inlinable section of the function, and account for // likely simplifications post-inlining. The most important aspect we track // is CFG altering simplifications -- when we prove a basic block dead, that // can cause dramatic shifts in the cost of inlining a function. DenseMap<Value *, Constant *> SimplifiedValues; // Keep track of the values which map back (through function arguments) to // allocas on the caller stack which could be simplified through SROA. DenseMap<Value *, Value *> SROAArgValues; // The mapping of caller Alloca values to their accumulated cost savings. If // we have to disable SROA for one of the allocas, this tells us how much // cost must be added. DenseMap<Value *, int> SROAArgCosts; // Keep track of values which map to a pointer base and constant offset. DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs; // Custom simplification helper routines. bool isAllocaDerivedArg(Value *V); bool lookupSROAArgAndCost(Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt); void disableSROA(DenseMap<Value *, int>::iterator CostIt); void disableSROA(Value *V); void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, int InstructionCost); bool isGEPOffsetConstant(GetElementPtrInst &GEP); bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); bool simplifyCallSite(Function *F, CallSite CS); ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); /// Return true if the given argument to the function being considered for /// inlining has the given attribute set either at the call site or the /// function declaration. Primarily used to inspect call site specific /// attributes since these can be more precise than the ones on the callee /// itself. bool paramHasAttr(Argument *A, Attribute::AttrKind Attr); /// Return true if the given value is known non null within the callee if /// inlined through this particular callsite. bool isKnownNonNullInCallee(Value *V); // Custom analysis routines. bool analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues); // Disable several entry points to the visitor so we don't accidentally use // them by declaring but not defining them here. void visit(Module *); void visit(Module &); void visit(Function *); void visit(Function &); void visit(BasicBlock *); void visit(BasicBlock &); // Provide base case for our instruction visit. bool visitInstruction(Instruction &I); // Our visit overrides. bool visitAlloca(AllocaInst &I); bool visitPHI(PHINode &I); bool visitGetElementPtr(GetElementPtrInst &I); bool visitBitCast(BitCastInst &I); bool visitPtrToInt(PtrToIntInst &I); bool visitIntToPtr(IntToPtrInst &I); bool visitCastInst(CastInst &I); bool visitUnaryInstruction(UnaryInstruction &I); bool visitCmpInst(CmpInst &I); bool visitSub(BinaryOperator &I); bool visitBinaryOperator(BinaryOperator &I); bool visitLoad(LoadInst &I); bool visitStore(StoreInst &I); bool visitExtractValue(ExtractValueInst &I); bool visitInsertValue(InsertValueInst &I); bool visitCallSite(CallSite CS); bool visitReturnInst(ReturnInst &RI); bool visitBranchInst(BranchInst &BI); bool visitSwitchInst(SwitchInst &SI); bool visitIndirectBrInst(IndirectBrInst &IBI); bool visitResumeInst(ResumeInst &RI); bool visitUnreachableInst(UnreachableInst &I); public: CallAnalyzer(const TargetTransformInfo &TTI, AssumptionCacheTracker *ACT, Function &Callee, int Threshold, CallSite CSArg) : TTI(TTI), ACT(ACT), F(Callee), CandidateCS(CSArg), Threshold(Threshold), Cost(0), IsCallerRecursive(false), IsRecursiveCall(false), ExposesReturnsTwice(false), HasDynamicAlloca(false), ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false), HasFrameEscape(false), AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0), FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0), NumConstantPtrDiffs(0), NumInstructionsSimplified(0), SROACostSavings(0), SROACostSavingsLost(0) {} bool analyzeCall(CallSite CS); int getThreshold() { return Threshold; } int getCost() { return Cost; } // Keep a bunch of stats about the cost savings found so we can print them // out when debugging. unsigned NumConstantArgs; unsigned NumConstantOffsetPtrArgs; unsigned NumAllocaArgs; unsigned NumConstantPtrCmps; unsigned NumConstantPtrDiffs; unsigned NumInstructionsSimplified; unsigned SROACostSavings; unsigned SROACostSavingsLost; void dump(); }; } // namespace /// \brief Test whether the given value is an Alloca-derived function argument. bool CallAnalyzer::isAllocaDerivedArg(Value *V) { return SROAArgValues.count(V); } /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. /// Returns false if V does not map to a SROA-candidate. bool CallAnalyzer::lookupSROAArgAndCost( Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { if (SROAArgValues.empty() || SROAArgCosts.empty()) return false; DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V); if (ArgIt == SROAArgValues.end()) return false; Arg = ArgIt->second; CostIt = SROAArgCosts.find(Arg); return CostIt != SROAArgCosts.end(); } /// \brief Disable SROA for the candidate marked by this cost iterator. /// /// This marks the candidate as no longer viable for SROA, and adds the cost /// savings associated with it back into the inline cost measurement. void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { // If we're no longer able to perform SROA we need to undo its cost savings // and prevent subsequent analysis. Cost += CostIt->second; SROACostSavings -= CostIt->second; SROACostSavingsLost += CostIt->second; SROAArgCosts.erase(CostIt); } /// \brief If 'V' maps to a SROA candidate, disable SROA for it. void CallAnalyzer::disableSROA(Value *V) { Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; if (lookupSROAArgAndCost(V, SROAArg, CostIt)) disableSROA(CostIt); } /// \brief Accumulate the given cost for a particular SROA candidate. void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, int InstructionCost) { CostIt->second += InstructionCost; SROACostSavings += InstructionCost; } /// \brief Check whether a GEP's indices are all constant. /// /// Respects any simplified values known during the analysis of this callsite. bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) { for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) return false; return true; } /// \brief Accumulate a constant GEP offset into an APInt if possible. /// /// Returns false if unable to compute the offset for any reason. Respects any /// simplified values known during the analysis of this callsite. bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { const DataLayout &DL = F.getParent()->getDataLayout(); unsigned IntPtrWidth = DL.getPointerSizeInBits(); assert(IntPtrWidth == Offset.getBitWidth()); for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); GTI != GTE; ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); if (!OpC) if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) OpC = dyn_cast<ConstantInt>(SimpleOp); if (!OpC) return false; if (OpC->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. if (StructType *STy = dyn_cast<StructType>(*GTI)) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); continue; } APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType())); Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; } return true; } bool CallAnalyzer::visitAlloca(AllocaInst &I) { // Check whether inlining will turn a dynamic alloca into a static // alloca, and handle that case. if (I.isArrayAllocation()) { if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) { ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size); assert(AllocSize && "Allocation size not a constant int?"); Type *Ty = I.getAllocatedType(); AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue(); return Base::visitAlloca(I); } } // Accumulate the allocated size. if (I.isStaticAlloca()) { const DataLayout &DL = F.getParent()->getDataLayout(); Type *Ty = I.getAllocatedType(); AllocatedSize += DL.getTypeAllocSize(Ty); } // We will happily inline static alloca instructions. if (I.isStaticAlloca()) return Base::visitAlloca(I); // FIXME: This is overly conservative. Dynamic allocas are inefficient for // a variety of reasons, and so we would like to not inline them into // functions which don't currently have a dynamic alloca. This simply // disables inlining altogether in the presence of a dynamic alloca. HasDynamicAlloca = true; return false; } bool CallAnalyzer::visitPHI(PHINode &I) { // FIXME: We should potentially be tracking values through phi nodes, // especially when they collapse to a single value due to deleted CFG edges // during inlining. // FIXME: We need to propagate SROA *disabling* through phi nodes, even // though we don't want to propagate it's bonuses. The idea is to disable // SROA if it *might* be used in an inappropriate manner. // Phi nodes are always zero-cost. return true; } bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt); // Try to fold GEPs of constant-offset call site argument pointers. This // requires target data and inbounds GEPs. if (I.isInBounds()) { // Check if we have a base + offset for the pointer. Value *Ptr = I.getPointerOperand(); std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); if (BaseAndOffset.first) { // Check if the offset of this GEP is constant, and if so accumulate it // into Offset. if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { // Non-constant GEPs aren't folded, and disable SROA. if (SROACandidate) disableSROA(CostIt); return false; } // Add the result as a new mapping to Base + Offset. ConstantOffsetPtrs[&I] = BaseAndOffset; // Also handle SROA candidates here, we already know that the GEP is // all-constant indexed. if (SROACandidate) SROAArgValues[&I] = SROAArg; return true; } } if (isGEPOffsetConstant(I)) { if (SROACandidate) SROAArgValues[&I] = SROAArg; // Constant GEPs are modeled as free. return true; } // Variable GEPs will require math and will disable SROA. if (SROACandidate) disableSROA(CostIt); return false; } bool CallAnalyzer::visitBitCast(BitCastInst &I) { // Propagate constants through bitcasts. Constant *COp = dyn_cast<Constant>(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) { SimplifiedValues[&I] = C; return true; } // Track base/offsets through casts std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(I.getOperand(0)); // Casts don't change the offset, just wrap it up. if (BaseAndOffset.first) ConstantOffsetPtrs[&I] = BaseAndOffset; // Also look for SROA candidates here. Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) SROAArgValues[&I] = SROAArg; // Bitcasts are always zero cost. return true; } bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { // Propagate constants through ptrtoint. Constant *COp = dyn_cast<Constant>(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) { SimplifiedValues[&I] = C; return true; } // Track base/offset pairs when converted to a plain integer provided the // integer is large enough to represent the pointer. unsigned IntegerSize = I.getType()->getScalarSizeInBits(); const DataLayout &DL = F.getParent()->getDataLayout(); if (IntegerSize >= DL.getPointerSizeInBits()) { std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(I.getOperand(0)); if (BaseAndOffset.first) ConstantOffsetPtrs[&I] = BaseAndOffset; } // This is really weird. Technically, ptrtoint will disable SROA. However, // unless that ptrtoint is *used* somewhere in the live basic blocks after // inlining, it will be nuked, and SROA should proceed. All of the uses which // would block SROA would also block SROA if applied directly to a pointer, // and so we can just add the integer in here. The only places where SROA is // preserved either cannot fire on an integer, or won't in-and-of themselves // disable SROA (ext) w/o some later use that we would see and disable. Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) SROAArgValues[&I] = SROAArg; return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); } bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { // Propagate constants through ptrtoint. Constant *COp = dyn_cast<Constant>(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) { SimplifiedValues[&I] = C; return true; } // Track base/offset pairs when round-tripped through a pointer without // modifications provided the integer is not too large. Value *Op = I.getOperand(0); unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); const DataLayout &DL = F.getParent()->getDataLayout(); if (IntegerSize <= DL.getPointerSizeInBits()) { std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); if (BaseAndOffset.first) ConstantOffsetPtrs[&I] = BaseAndOffset; } // "Propagate" SROA here in the same manner as we do for ptrtoint above. Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; if (lookupSROAArgAndCost(Op, SROAArg, CostIt)) SROAArgValues[&I] = SROAArg; return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); } bool CallAnalyzer::visitCastInst(CastInst &I) { // Propagate constants through ptrtoint. Constant *COp = dyn_cast<Constant>(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) { SimplifiedValues[&I] = C; return true; } // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere. disableSROA(I.getOperand(0)); return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); } bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { Value *Operand = I.getOperand(0); Constant *COp = dyn_cast<Constant>(Operand); if (!COp) COp = SimplifiedValues.lookup(Operand); if (COp) { const DataLayout &DL = F.getParent()->getDataLayout(); if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(), COp, DL)) { SimplifiedValues[&I] = C; return true; } } // Disable any SROA on the argument to arbitrary unary operators. disableSROA(Operand); return false; } bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) { unsigned ArgNo = A->getArgNo(); return CandidateCS.paramHasAttr(ArgNo+1, Attr); } bool CallAnalyzer::isKnownNonNullInCallee(Value *V) { // Does the *call site* have the NonNull attribute set on an argument? We // use the attribute on the call site to memoize any analysis done in the // caller. This will also trip if the callee function has a non-null // parameter attribute, but that's a less interesting case because hopefully // the callee would already have been simplified based on that. if (Argument *A = dyn_cast<Argument>(V)) if (paramHasAttr(A, Attribute::NonNull)) return true; // Is this an alloca in the caller? This is distinct from the attribute case // above because attributes aren't updated within the inliner itself and we // always want to catch the alloca derived case. if (isAllocaDerivedArg(V)) // We can actually predict the result of comparisons between an // alloca-derived value and null. Note that this fires regardless of // SROA firing. return true; return false; } bool CallAnalyzer::visitCmpInst(CmpInst &I) { Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); // First try to handle simplified comparisons. if (!isa<Constant>(LHS)) if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) LHS = SimpleLHS; if (!isa<Constant>(RHS)) if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) RHS = SimpleRHS; if (Constant *CLHS = dyn_cast<Constant>(LHS)) { if (Constant *CRHS = dyn_cast<Constant>(RHS)) if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) { SimplifiedValues[&I] = C; return true; } } if (I.getOpcode() == Instruction::FCmp) return false; // Otherwise look for a comparison between constant offset pointers with // a common base. Value *LHSBase, *RHSBase; APInt LHSOffset, RHSOffset; std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); if (LHSBase) { std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); if (RHSBase && LHSBase == RHSBase) { // We have common bases, fold the icmp to a constant based on the // offsets. Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { SimplifiedValues[&I] = C; ++NumConstantPtrCmps; return true; } } } // If the comparison is an equality comparison with null, we can simplify it // if we know the value (argument) can't be null if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) && isKnownNonNullInCallee(I.getOperand(0))) { bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) : ConstantInt::getFalse(I.getType()); return true; } // Finally check for SROA candidates in comparisons. Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { if (isa<ConstantPointerNull>(I.getOperand(1))) { accumulateSROACost(CostIt, InlineConstants::InstrCost); return true; } disableSROA(CostIt); } return false; } bool CallAnalyzer::visitSub(BinaryOperator &I) { // Try to handle a special case: we can fold computing the difference of two // constant-related pointers. Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); Value *LHSBase, *RHSBase; APInt LHSOffset, RHSOffset; std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); if (LHSBase) { std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); if (RHSBase && LHSBase == RHSBase) { // We have common bases, fold the subtract to a constant based on the // offsets. Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { SimplifiedValues[&I] = C; ++NumConstantPtrDiffs; return true; } } } // Otherwise, fall back to the generic logic for simplifying and handling // instructions. return Base::visitSub(I); } bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); const DataLayout &DL = F.getParent()->getDataLayout(); if (!isa<Constant>(LHS)) if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) LHS = SimpleLHS; if (!isa<Constant>(RHS)) if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) RHS = SimpleRHS; Value *SimpleV = nullptr; if (auto FI = dyn_cast<FPMathOperator>(&I)) SimpleV = SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL); else SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL); if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) { SimplifiedValues[&I] = C; return true; } // Disable any SROA on arguments to arbitrary, unsimplified binary operators. disableSROA(LHS); disableSROA(RHS); return false; } bool CallAnalyzer::visitLoad(LoadInst &I) { Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) { if (I.isSimple()) { accumulateSROACost(CostIt, InlineConstants::InstrCost); return true; } disableSROA(CostIt); } return false; } bool CallAnalyzer::visitStore(StoreInst &I) { Value *SROAArg; DenseMap<Value *, int>::iterator CostIt; if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) { if (I.isSimple()) { accumulateSROACost(CostIt, InlineConstants::InstrCost); return true; } disableSROA(CostIt); } return false; } bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { // Constant folding for extract value is trivial. Constant *C = dyn_cast<Constant>(I.getAggregateOperand()); if (!C) C = SimplifiedValues.lookup(I.getAggregateOperand()); if (C) { SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices()); return true; } // SROA can look through these but give them a cost. return false; } bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { // Constant folding for insert value is trivial. Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand()); if (!AggC) AggC = SimplifiedValues.lookup(I.getAggregateOperand()); Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand()); if (!InsertedC) InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand()); if (AggC && InsertedC) { SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC, I.getIndices()); return true; } // SROA can look through these but give them a cost. return false; } /// \brief Try to simplify a call site. /// /// Takes a concrete function and callsite and tries to actually simplify it by /// analyzing the arguments and call itself with instsimplify. Returns true if /// it has simplified the callsite to some other entity (a constant), making it /// free. bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { // FIXME: Using the instsimplify logic directly for this is inefficient // because we have to continually rebuild the argument list even when no // simplifications can be performed. Until that is fixed with remapping // inside of instsimplify, directly constant fold calls here. if (!canConstantFoldCallTo(F)) return false; // Try to re-map the arguments to constants. SmallVector<Constant *, 4> ConstantArgs; ConstantArgs.reserve(CS.arg_size()); for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I) { Constant *C = dyn_cast<Constant>(*I); if (!C) C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I)); if (!C) return false; // This argument doesn't map to a constant. ConstantArgs.push_back(C); } if (Constant *C = ConstantFoldCall(F, ConstantArgs)) { SimplifiedValues[CS.getInstruction()] = C; return true; } return false; } bool CallAnalyzer::visitCallSite(CallSite CS) { if (CS.hasFnAttr(Attribute::ReturnsTwice) && !F.hasFnAttribute(Attribute::ReturnsTwice)) { // This aborts the entire analysis. ExposesReturnsTwice = true; return false; } if (CS.isCall() && cast<CallInst>(CS.getInstruction())->cannotDuplicate()) ContainsNoDuplicateCall = true; if (Function *F = CS.getCalledFunction()) { // When we have a concrete function, first try to simplify it directly. if (simplifyCallSite(F, CS)) return true; // Next check if it is an intrinsic we know about. // FIXME: Lift this into part of the InstVisitor. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { switch (II->getIntrinsicID()) { default: return Base::visitCallSite(CS); case Intrinsic::memset: case Intrinsic::memcpy: case Intrinsic::memmove: // SROA can usually chew through these intrinsics, but they aren't free. return false; case Intrinsic::localescape: HasFrameEscape = true; return false; } } if (F == CS.getInstruction()->getParent()->getParent()) { // This flag will fully abort the analysis, so don't bother with anything // else. IsRecursiveCall = true; return false; } if (TTI.isLoweredToCall(F)) { // We account for the average 1 instruction per call argument setup // here. Cost += CS.arg_size() * InlineConstants::InstrCost; // Everything other than inline ASM will also have a significant cost // merely from making the call. if (!isa<InlineAsm>(CS.getCalledValue())) Cost += InlineConstants::CallPenalty; } return Base::visitCallSite(CS); } // Otherwise we're in a very special case -- an indirect function call. See // if we can be particularly clever about this. Value *Callee = CS.getCalledValue(); // First, pay the price of the argument setup. We account for the average // 1 instruction per call argument setup here. Cost += CS.arg_size() * InlineConstants::InstrCost; // Next, check if this happens to be an indirect function call to a known // function in this inline context. If not, we've done all we can. Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); if (!F) return Base::visitCallSite(CS); // If we have a constant that we are calling as a function, we can peer // through it and see the function target. This happens not infrequently // during devirtualization and so we want to give it a hefty bonus for // inlining, but cap that bonus in the event that inlining wouldn't pan // out. Pretend to inline the function, with a custom threshold. CallAnalyzer CA(TTI, ACT, *F, InlineConstants::IndirectCallThreshold, CS); if (CA.analyzeCall(CS)) { // We were able to inline the indirect call! Subtract the cost from the // bonus we want to apply, but don't go below zero. Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost()); } return Base::visitCallSite(CS); } bool CallAnalyzer::visitReturnInst(ReturnInst &RI) { // At least one return instruction will be free after inlining. bool Free = !HasReturn; HasReturn = true; return Free; } bool CallAnalyzer::visitBranchInst(BranchInst &BI) { // We model unconditional branches as essentially free -- they really // shouldn't exist at all, but handling them makes the behavior of the // inliner more regular and predictable. Interestingly, conditional branches // which will fold away are also free. return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) || dyn_cast_or_null<ConstantInt>( SimplifiedValues.lookup(BI.getCondition())); } bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) { // We model unconditional switches as free, see the comments on handling // branches. if (isa<ConstantInt>(SI.getCondition())) return true; if (Value *V = SimplifiedValues.lookup(SI.getCondition())) if (isa<ConstantInt>(V)) return true; // Otherwise, we need to accumulate a cost proportional to the number of // distinct successor blocks. This fan-out in the CFG cannot be represented // for free even if we can represent the core switch as a jumptable that // takes a single instruction. // // NB: We convert large switches which are just used to initialize large phi // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent // inlining those. It will prevent inlining in cases where the optimization // does not (yet) fire. SmallPtrSet<BasicBlock *, 8> SuccessorBlocks; SuccessorBlocks.insert(SI.getDefaultDest()); for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I) SuccessorBlocks.insert(I.getCaseSuccessor()); // Add cost corresponding to the number of distinct destinations. The first // we model as free because of fallthrough. Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost; return false; } bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) { // We never want to inline functions that contain an indirectbr. This is // incorrect because all the blockaddress's (in static global initializers // for example) would be referring to the original function, and this // indirect jump would jump from the inlined copy of the function into the // original function which is extremely undefined behavior. // FIXME: This logic isn't really right; we can safely inline functions with // indirectbr's as long as no other function or global references the // blockaddress of a block within the current function. HasIndirectBr = true; return false; } bool CallAnalyzer::visitResumeInst(ResumeInst &RI) { // FIXME: It's not clear that a single instruction is an accurate model for // the inline cost of a resume instruction. return false; } bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) { // FIXME: It might be reasonably to discount the cost of instructions leading // to unreachable as they have the lowest possible impact on both runtime and // code size. return true; // No actual code is needed for unreachable. } bool CallAnalyzer::visitInstruction(Instruction &I) { // Some instructions are free. All of the free intrinsics can also be // handled by SROA, etc. if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I)) return true; // We found something we don't understand or can't handle. Mark any SROA-able // values in the operand list as no longer viable. for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) disableSROA(*OI); return false; } /// \brief Analyze a basic block for its contribution to the inline cost. /// /// This method walks the analyzer over every instruction in the given basic /// block and accounts for their cost during inlining at this callsite. It /// aborts early if the threshold has been exceeded or an impossible to inline /// construct has been detected. It returns false if inlining is no longer /// viable, and true if inlining remains viable. bool CallAnalyzer::analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues) { for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { // FIXME: Currently, the number of instructions in a function regardless of // our ability to simplify them during inline to constants or dead code, // are actually used by the vector bonus heuristic. As long as that's true, // we have to special case debug intrinsics here to prevent differences in // inlining due to debug symbols. Eventually, the number of unsimplified // instructions shouldn't factor into the cost computation, but until then, // hack around it here. if (isa<DbgInfoIntrinsic>(I)) continue; // Skip ephemeral values. if (EphValues.count(I)) continue; ++NumInstructions; if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy()) ++NumVectorInstructions; // If the instruction is floating point, and the target says this operation is // expensive or the function has the "use-soft-float" attribute, this may // eventually become a library call. Treat the cost as such. if (I->getType()->isFloatingPointTy()) { bool hasSoftFloatAttr = false; // If the function has the "use-soft-float" attribute, mark it as expensive. if (F.hasFnAttribute("use-soft-float")) { Attribute Attr = F.getFnAttribute("use-soft-float"); StringRef Val = Attr.getValueAsString(); if (Val == "true") hasSoftFloatAttr = true; } if (TTI.getFPOpCost(I->getType()) == TargetTransformInfo::TCC_Expensive || hasSoftFloatAttr) Cost += InlineConstants::CallPenalty; } // If the instruction simplified to a constant, there is no cost to this // instruction. Visit the instructions using our InstVisitor to account for // all of the per-instruction logic. The visit tree returns true if we // consumed the instruction in any way, and false if the instruction's base // cost should count against inlining. if (Base::visit(I)) ++NumInstructionsSimplified; else Cost += InlineConstants::InstrCost; // If the visit this instruction detected an uninlinable pattern, abort. if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca || HasIndirectBr || HasFrameEscape) return false; // If the caller is a recursive function then we don't want to inline // functions which allocate a lot of stack space because it would increase // the caller stack usage dramatically. if (IsCallerRecursive && AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) return false; // Check if we've past the maximum possible threshold so we don't spin in // huge basic blocks that will never inline. if (Cost > Threshold) return false; } return true; } /// \brief Compute the base pointer and cumulative constant offsets for V. /// /// This strips all constant offsets off of V, leaving it the base pointer, and /// accumulates the total constant offset applied in the returned constant. It /// returns 0 if V is not a pointer, and returns the constant '0' if there are /// no constant offsets applied. ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { if (!V->getType()->isPointerTy()) return nullptr; const DataLayout &DL = F.getParent()->getDataLayout(); unsigned IntPtrWidth = DL.getPointerSizeInBits(); APInt Offset = APInt::getNullValue(IntPtrWidth); // Even though we don't look through PHI nodes, we could be called on an // instruction in an unreachable block, which may be on a cycle. SmallPtrSet<Value *, 4> Visited; Visited.insert(V); do { if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) return nullptr; V = GEP->getPointerOperand(); } else if (Operator::getOpcode(V) == Instruction::BitCast) { V = cast<Operator>(V)->getOperand(0); } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { if (GA->mayBeOverridden()) break; V = GA->getAliasee(); } else { break; } assert(V->getType()->isPointerTy() && "Unexpected operand type!"); } while (Visited.insert(V).second); Type *IntPtrTy = DL.getIntPtrType(V->getContext()); return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); } /// \brief Analyze a call site for potential inlining. /// /// Returns true if inlining this call is viable, and false if it is not /// viable. It computes the cost and adjusts the threshold based on numerous /// factors and heuristics. If this method returns false but the computed cost /// is below the computed threshold, then inlining was forcibly disabled by /// some artifact of the routine. bool CallAnalyzer::analyzeCall(CallSite CS) { ++NumCallsAnalyzed; // Perform some tweaks to the cost and threshold based on the direct // callsite information. // We want to more aggressively inline vector-dense kernels, so up the // threshold, and we'll lower it if the % of vector instructions gets too // low. Note that these bonuses are some what arbitrary and evolved over time // by accident as much as because they are principled bonuses. // // FIXME: It would be nice to remove all such bonuses. At least it would be // nice to base the bonus values on something more scientific. assert(NumInstructions == 0); assert(NumVectorInstructions == 0); FiftyPercentVectorBonus = 3 * Threshold / 2; TenPercentVectorBonus = 3 * Threshold / 4; const DataLayout &DL = F.getParent()->getDataLayout(); // Track whether the post-inlining function would have more than one basic // block. A single basic block is often intended for inlining. Balloon the // threshold by 50% until we pass the single-BB phase. bool SingleBB = true; int SingleBBBonus = Threshold / 2; // Speculatively apply all possible bonuses to Threshold. If cost exceeds // this Threshold any time, and cost cannot decrease, we can stop processing // the rest of the function body. Threshold += (SingleBBBonus + FiftyPercentVectorBonus); // Give out bonuses per argument, as the instructions setting them up will // be gone after inlining. for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) { if (CS.isByValArgument(I)) { // We approximate the number of loads and stores needed by dividing the // size of the byval type by the target's pointer size. PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType()); unsigned PointerSize = DL.getPointerSizeInBits(); // Ceiling division. unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; // If it generates more than 8 stores it is likely to be expanded as an // inline memcpy so we take that as an upper bound. Otherwise we assume // one load and one store per word copied. // FIXME: The maxStoresPerMemcpy setting from the target should be used // here instead of a magic number of 8, but it's not available via // DataLayout. NumStores = std::min(NumStores, 8U); Cost -= 2 * NumStores * InlineConstants::InstrCost; } else { // For non-byval arguments subtract off one instruction per call // argument. Cost -= InlineConstants::InstrCost; } } // If there is only one call of the function, and it has internal linkage, // the cost of inlining it drops dramatically. bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction(); if (OnlyOneCallAndLocalLinkage) Cost += InlineConstants::LastCallToStaticBonus; // If the instruction after the call, or if the normal destination of the // invoke is an unreachable instruction, the function is noreturn. As such, // there is little point in inlining this unless there is literally zero // cost. Instruction *Instr = CS.getInstruction(); if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) { if (isa<UnreachableInst>(II->getNormalDest()->begin())) Threshold = 0; } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr))) Threshold = 0; // If this function uses the coldcc calling convention, prefer not to inline // it. if (F.getCallingConv() == CallingConv::Cold) Cost += InlineConstants::ColdccPenalty; // Check if we're done. This can happen due to bonuses and penalties. if (Cost > Threshold) return false; if (F.empty()) return true; Function *Caller = CS.getInstruction()->getParent()->getParent(); // Check if the caller function is recursive itself. for (User *U : Caller->users()) { CallSite Site(U); if (!Site) continue; Instruction *I = Site.getInstruction(); if (I->getParent()->getParent() == Caller) { IsCallerRecursive = true; break; } } // Populate our simplified values by mapping from function arguments to call // arguments with known important simplifications. CallSite::arg_iterator CAI = CS.arg_begin(); for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); FAI != FAE; ++FAI, ++CAI) { assert(CAI != CS.arg_end()); if (Constant *C = dyn_cast<Constant>(CAI)) SimplifiedValues[FAI] = C; Value *PtrArg = *CAI; if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue()); // We can SROA any pointer arguments derived from alloca instructions. if (isa<AllocaInst>(PtrArg)) { SROAArgValues[FAI] = PtrArg; SROAArgCosts[PtrArg] = 0; } } } NumConstantArgs = SimplifiedValues.size(); NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); NumAllocaArgs = SROAArgValues.size(); // FIXME: If a caller has multiple calls to a callee, we end up recomputing // the ephemeral values multiple times (and they're completely determined by // the callee, so this is purely duplicate work). SmallPtrSet<const Value *, 32> EphValues; CodeMetrics::collectEphemeralValues(&F, &ACT->getAssumptionCache(F), EphValues); // The worklist of live basic blocks in the callee *after* inlining. We avoid // adding basic blocks of the callee which can be proven to be dead for this // particular call site in order to get more accurate cost estimates. This // requires a somewhat heavyweight iteration pattern: we need to walk the // basic blocks in a breadth-first order as we insert live successors. To // accomplish this, prioritizing for small iterations because we exit after // crossing our threshold, we use a small-size optimized SetVector. typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, SmallPtrSet<BasicBlock *, 16> > BBSetVector; BBSetVector BBWorklist; BBWorklist.insert(&F.getEntryBlock()); // Note that we *must not* cache the size, this loop grows the worklist. for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { // Bail out the moment we cross the threshold. This means we'll under-count // the cost, but only when undercounting doesn't matter. if (Cost > Threshold) break; BasicBlock *BB = BBWorklist[Idx]; if (BB->empty()) continue; // Disallow inlining a blockaddress. A blockaddress only has defined // behavior for an indirect branch in the same function, and we do not // currently support inlining indirect branches. But, the inliner may not // see an indirect branch that ends up being dead code at a particular call // site. If the blockaddress escapes the function, e.g., via a global // variable, inlining may lead to an invalid cross-function reference. if (BB->hasAddressTaken()) return false; // Analyze the cost of this block. If we blow through the threshold, this // returns false, and we can bail on out. if (!analyzeBlock(BB, EphValues)) { if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca || HasIndirectBr || HasFrameEscape) return false; // If the caller is a recursive function then we don't want to inline // functions which allocate a lot of stack space because it would increase // the caller stack usage dramatically. if (IsCallerRecursive && AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) return false; break; } TerminatorInst *TI = BB->getTerminator(); // Add in the live successors by first checking whether we have terminator // that may be simplified based on the values simplified by this call. if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { if (BI->isConditional()) { Value *Cond = BI->getCondition(); if (ConstantInt *SimpleCond = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); continue; } } } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { Value *Cond = SI->getCondition(); if (ConstantInt *SimpleCond = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor()); continue; } } // If we're unable to select a particular successor, just count all of // them. for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx) BBWorklist.insert(TI->getSuccessor(TIdx)); // If we had any successors at this point, than post-inlining is likely to // have them as well. Note that we assume any basic blocks which existed // due to branches or switches which folded above will also fold after // inlining. if (SingleBB && TI->getNumSuccessors() > 1) { // Take off the bonus we applied to the threshold. Threshold -= SingleBBBonus; SingleBB = false; } } // If this is a noduplicate call, we can still inline as long as // inlining this would cause the removal of the caller (so the instruction // is not actually duplicated, just moved). if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) return false; // We applied the maximum possible vector bonus at the beginning. Now, // subtract the excess bonus, if any, from the Threshold before // comparing against Cost. if (NumVectorInstructions <= NumInstructions / 10) Threshold -= FiftyPercentVectorBonus; else if (NumVectorInstructions <= NumInstructions / 2) Threshold -= (FiftyPercentVectorBonus - TenPercentVectorBonus); return Cost < Threshold; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// \brief Dump stats about this call's analysis. void CallAnalyzer::dump() { #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n" DEBUG_PRINT_STAT(NumConstantArgs); DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); DEBUG_PRINT_STAT(NumAllocaArgs); DEBUG_PRINT_STAT(NumConstantPtrCmps); DEBUG_PRINT_STAT(NumConstantPtrDiffs); DEBUG_PRINT_STAT(NumInstructionsSimplified); DEBUG_PRINT_STAT(NumInstructions); DEBUG_PRINT_STAT(SROACostSavings); DEBUG_PRINT_STAT(SROACostSavingsLost); DEBUG_PRINT_STAT(ContainsNoDuplicateCall); DEBUG_PRINT_STAT(Cost); DEBUG_PRINT_STAT(Threshold); #undef DEBUG_PRINT_STAT } #endif INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", true, true) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis", true, true) char InlineCostAnalysis::ID = 0; InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {} InlineCostAnalysis::~InlineCostAnalysis() {} void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<TargetTransformInfoWrapperPass>(); CallGraphSCCPass::getAnalysisUsage(AU); } bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) { TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>(); ACT = &getAnalysis<AssumptionCacheTracker>(); return false; } InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) { return getInlineCost(CS, CS.getCalledFunction(), Threshold); } /// \brief Test that two functions either have or have not the given attribute /// at the same time. template<typename AttrKind> static bool attributeMatches(Function *F1, Function *F2, AttrKind Attr) { return F1->getFnAttribute(Attr) == F2->getFnAttribute(Attr); } /// \brief Test that there are no attribute conflicts between Caller and Callee /// that prevent inlining. static bool functionsHaveCompatibleAttributes(Function *Caller, Function *Callee, TargetTransformInfo &TTI) { return TTI.hasCompatibleFunctionAttributes(Caller, Callee) && attributeMatches(Caller, Callee, Attribute::SanitizeAddress) && attributeMatches(Caller, Callee, Attribute::SanitizeMemory) && attributeMatches(Caller, Callee, Attribute::SanitizeThread); } InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee, int Threshold) { // Cannot inline indirect calls. if (!Callee) return llvm::InlineCost::getNever(); // Calls to functions with always-inline attributes should be inlined // whenever possible. if (CS.hasFnAttr(Attribute::AlwaysInline)) { if (isInlineViable(*Callee)) return llvm::InlineCost::getAlways(); return llvm::InlineCost::getNever(); } // Never inline functions with conflicting attributes (unless callee has // always-inline attribute). if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee, TTIWP->getTTI(*Callee))) return llvm::InlineCost::getNever(); // Don't inline this call if the caller has the optnone attribute. if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone)) return llvm::InlineCost::getNever(); // Don't inline functions which can be redefined at link-time to mean // something else. Don't inline functions marked noinline or call sites // marked noinline. if (Callee->mayBeOverridden() || Callee->hasFnAttribute(Attribute::NoInline) || CS.isNoInline()) return llvm::InlineCost::getNever(); DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n"); CallAnalyzer CA(TTIWP->getTTI(*Callee), ACT, *Callee, Threshold, CS); bool ShouldInline = CA.analyzeCall(CS); DEBUG(CA.dump()); // Check if there was a reason to force inlining or no inlining. if (!ShouldInline && CA.getCost() < CA.getThreshold()) return InlineCost::getNever(); if (ShouldInline && CA.getCost() >= CA.getThreshold()) return InlineCost::getAlways(); return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); } bool InlineCostAnalysis::isInlineViable(Function &F) { bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice); for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { // Disallow inlining of functions which contain indirect branches or // blockaddresses. if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken()) return false; for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; ++II) { CallSite CS(II); if (!CS) continue; // Disallow recursive calls. if (&F == CS.getCalledFunction()) return false; // Disallow calls which expose returns-twice to a function not previously // attributed as such. if (!ReturnsTwice && CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice()) return false; // Disallow inlining functions that call @llvm.localescape. Doing this // correctly would require major changes to the inliner. if (CS.getCalledFunction() && CS.getCalledFunction()->getIntrinsicID() == llvm::Intrinsic::localescape) return false; } } return true; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilCompression/DxilCompression.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilCompression.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// // // Helper wrapper functions for zlib deflate and inflate. Entirely // self-contained, only depends on IMalloc interface. // #include "dxc/DxilCompression/DxilCompression.h" #include "dxc/Support/Global.h" #include "dxc/Support/WinIncludes.h" #include "miniz.h" typedef size_t ZlibSize_t; typedef const Bytef ZlibInputBytesf; namespace { // // A resource managment class for a zlib stream that calls the appropriate init // and end routines. // class Zlib { public: enum Operation { INFLATE, DEFLATE }; Zlib(Operation Op, IMalloc *pAllocator) : m_Stream{}, m_Op(Op), m_Initalized(false) { m_Stream = {}; if (pAllocator) { m_Stream.zalloc = ZAlloc; m_Stream.zfree = ZFree; m_Stream.opaque = pAllocator; } int ret = Z_ERRNO; if (Op == INFLATE) { ret = inflateInit(&m_Stream); } else { ret = deflateInit(&m_Stream, Z_DEFAULT_COMPRESSION); } if (ret != Z_OK) { m_InitializationResult = TranslateZlibResult(ret); return; } m_Initalized = true; } ~Zlib() { if (m_Initalized) { if (m_Op == INFLATE) { inflateEnd(&m_Stream); } else { deflateEnd(&m_Stream); } } } z_stream *GetStream() { if (m_Initalized) return &m_Stream; return nullptr; } hlsl::ZlibResult GetInitializationResult() const { return m_InitializationResult; } static hlsl::ZlibResult TranslateZlibResult(int zlibResult) { switch (zlibResult) { default: return hlsl::ZlibResult::InvalidData; case Z_MEM_ERROR: case Z_BUF_ERROR: return hlsl::ZlibResult::OutOfMemory; } } private: z_stream m_Stream; Operation m_Op; bool m_Initalized; hlsl::ZlibResult m_InitializationResult = hlsl::ZlibResult::Success; static void *ZAlloc(void *context, ZlibSize_t items, ZlibSize_t size) { IMalloc *mallocif = (IMalloc *)context; return mallocif->Alloc(items * size); } static void ZFree(void *context, void *pointer) { IMalloc *mallocif = (IMalloc *)context; mallocif->Free(pointer); } }; } // namespace hlsl::ZlibResult hlsl::ZlibDecompress(IMalloc *pMalloc, const void *pCompressedBuffer, size_t BufferSizeInBytes, void *pUncompressedBuffer, size_t UncompressedBufferSize) { Zlib zlib(Zlib::INFLATE, pMalloc); z_stream *pStream = zlib.GetStream(); if (!pStream) return zlib.GetInitializationResult(); pStream->avail_in = BufferSizeInBytes; pStream->next_in = (ZlibInputBytesf *)pCompressedBuffer; pStream->next_out = (Byte *)pUncompressedBuffer; pStream->avail_out = UncompressedBufferSize; // Compression should finish in one call because of the call to deflateBound. int status = inflate(pStream, Z_FINISH); if (status != Z_STREAM_END) { return Zlib::TranslateZlibResult(status); } return ZlibResult::Success; } hlsl::ZlibResult hlsl::ZlibCompress(IMalloc *pMalloc, const void *pData, size_t pDataSize, void *pUserData, ZlibCallbackFn *Callback, size_t *pOutCompressedSize) { Zlib zlib(Zlib::DEFLATE, pMalloc); z_stream *pStream = zlib.GetStream(); if (!pStream) return zlib.GetInitializationResult(); const size_t UpperBound = deflateBound(pStream, pDataSize); void *pDestBuffer = Callback(pUserData, UpperBound); if (!pDestBuffer) return ZlibResult::OutOfMemory; pStream->next_in = (ZlibInputBytesf *)pData; pStream->avail_in = pDataSize; pStream->next_out = (Byte *)pDestBuffer; pStream->avail_out = UpperBound; int status = deflate(pStream, Z_FINISH); if (status != Z_STREAM_END) { return Zlib::TranslateZlibResult(status); } *pOutCompressedSize = pStream->total_out; return ZlibResult::Success; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilCompression/miniz.c
/************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #include "miniz.h" typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; #ifdef __cplusplus extern "C" { #endif /* ------------------- zlib-style API's */ mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } /* Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/ */ #if 0 mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c }; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } #else /* Faster, but larger CPU cache footprint. */ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc_table[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; mz_uint32 crc32 = (mz_uint32)crc ^ 0xFFFFFFFF; const mz_uint8 *pByte_buf = (const mz_uint8 *)ptr; while (buf_len >= 4) { crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF]; crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[1]) & 0xFF]; crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[2]) & 0xFF]; crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[3]) & 0xFF]; pByte_buf += 4; buf_len -= 4; } while (buf_len) { crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF]; ++pByte_buf; --buf_len; } return ~crc32; } #endif void mz_free(void *p) { MZ_FREE(p); } void *miniz_def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } void miniz_def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size) { (void)opaque, (void)address, (void)items, (void)size; return MZ_REALLOC(address, items * size); } const char *mz_version(void) { return MZ_VERSION; } #ifndef MINIZ_NO_ZLIB_APIS int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = miniz_def_alloc_func; if (!pStream->zfree) pStream->zfree = miniz_def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; /* Can't make forward progress without some input. */ } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; /* This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.) */ return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); /* In case mz_ulong is 64-bits (argh I hate longs). */ if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = miniz_def_alloc_func; if (!pStream->zfree) pStream->zfree = miniz_def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflateReset(mz_streamp pStream) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; pDecomp = (inflate_state *)pStream->state; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; /* pDecomp->m_window_bits = window_bits */; return MZ_OK; } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { /* MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file. */ decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } /* flush != MZ_FINISH then we must assume there's more input. */ if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; /* Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well). */ else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; /* Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH. */ else if (flush == MZ_FINISH) { /* The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH. */ if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; /* status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong. */ else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); /* In case mz_ulong is 64-bits (argh I hate longs). */ if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = { { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" } }; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif /*MINIZ_NO_ZLIB_APIS */ #ifdef __cplusplus } #endif /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ /************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #ifdef __cplusplus extern "C" { #endif /* ------------------- Low-level Compression (independent from all decompression API's) */ /* Purposely making these tables static for faster init and thread safety. */ static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285 }; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 }; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 }; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 }; /* Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values. */ typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } /* tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996. */ static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } /* Limits canonical Huffman code table's max code size. */ enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do \ { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) \ { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) \ { \ if (rle_repeat_count < 3) \ { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } \ else \ { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) \ { \ if (rle_z_count < 3) \ { \ d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) \ packed_code_sizes[num_packed_code_sizes++] = 0; \ } \ else if (rle_z_count <= 10) \ { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \ } \ else \ { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF }; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint16 read_val; memcpy((void*)&read_val, pLZ_codes + 1, sizeof(mz_uint16)); mz_uint match_len = pLZ_codes[0], match_dist = read_val; pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); /* This sequence coaxes MSVC into using cmov's vs. jmp's. */ s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; memcpy((void*)pOutput_buf, (void*)&bit_buffer, sizeof(bit_buffer)); pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS */ static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); /* If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. */ if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } /* Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes. */ else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #ifdef MINIZ_UNALIGNED_USE_MEMCPY static mz_uint16 TDEFL_READ_UNALIGNED_WORD(const mz_uint8* p) { mz_uint16 ret; memcpy(&ret, p, sizeof(mz_uint16)); return ret; } static mz_uint16 TDEFL_READ_UNALIGNED_WORD2(const mz_uint16* p) { mz_uint16 ret; memcpy(&ret, p, sizeof(mz_uint16)); return ret; } #else #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) #define TDEFL_READ_UNALIGNED_WORD2(p) *(const mz_uint16 *)(p) #endif static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD2(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD2(q) != s01) continue; p = s; probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, (mz_uint)TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES */ #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #ifdef MINIZ_UNALIGNED_USE_MEMCPY static mz_uint32 TDEFL_READ_UNALIGNED_WORD32(const mz_uint8* p) { mz_uint32 ret; memcpy(&ret, p, sizeof(mz_uint32)); return ret; } #else #define TDEFL_READ_UNALIGNED_WORD32(p) *(const mz_uint32 *)(p) #endif static mz_bool tdefl_compress_fast(tdefl_compressor *d) { /* Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio. */ mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = TDEFL_READ_UNALIGNED_WORD32(pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((TDEFL_READ_UNALIGNED_WORD32(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); #ifdef MINIZ_UNALIGNED_USE_MEMCPY memcpy(&pLZ_code_buf[1], &cur_match_dist, sizeof(cur_match_dist)); #else *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; #endif pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, (mz_uint)TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, (mz_uint)TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */ static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; /* Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN. */ if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; /* Simple lazy/greedy parsing state machine. */ len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } /* Move the lookahead forward by len_to_move bytes. */ d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); /* Check if it's time to flush the current LZ codes to the internal output buffer. */ if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */ { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_dict); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; /* level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files). */ mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer (also supported by GNU C and C99, so no big deal) */ #endif /* Simple PNG writer function by Alex Evans, 2011. Released into the public domain: https://gist.github.com/908299, more context at http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. This is actually a modification of Alex's original code so PNG files generated by this function pass pngcheck. */ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { /* Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was defined. */ static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 }; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } /* write dummy header */ for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); /* compress image data */ tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer( pComp, (const mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } /* write real header */ *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 }; mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x44, 0x41, 0x54 }; pnghdr[18] = (mz_uint8)(w >> 8); pnghdr[19] = (mz_uint8)w; pnghdr[22] = (mz_uint8)(h >> 8); pnghdr[23] = (mz_uint8)h; pnghdr[25] = chans[num_chans]; pnghdr[33] = (mz_uint8)(*pLen_out >> 24); pnghdr[34] = (mz_uint8)(*pLen_out >> 16); pnghdr[35] = (mz_uint8)(*pLen_out >> 8); pnghdr[36] = (mz_uint8)*pLen_out; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } /* write footer (IDAT CRC-32, followed by IEND chunk) */ if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); /* compute final size of file, grab compressed data buffer and return */ *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { /* Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's where #defined out) */ return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } #ifndef MINIZ_NO_MALLOC /* Allocate the tdefl_compressor and tinfl_decompressor structures in C so that */ /* non-C language bindings to tdefL_ and tinfl_ API don't need to worry about */ /* structure size and allocation mechanism. */ tdefl_compressor *tdefl_compressor_alloc() { return (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); } void tdefl_compressor_free(tdefl_compressor *pComp) { MZ_FREE(pComp); } #endif #ifdef _MSC_VER #pragma warning(pop) #endif #ifdef __cplusplus } #endif /************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #ifdef __cplusplus extern "C" { #endif /* ------------------- Low-level Decompression (completely independent from all compression API's) */ #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) \ { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do \ { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do \ { \ for (;;) \ { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } #define TINFL_GET_BYTE(state_index, c) \ do \ { \ while (pIn_buf_cur >= pIn_buf_end) \ { \ TINFL_CR_RETURN(state_index, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS); \ } \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do \ { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do \ { \ if (num_bits < (mz_uint)(n)) \ { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do \ { \ if (num_bits < (mz_uint)(n)) \ { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END /* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2. */ /* It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a */ /* Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the */ /* bit buffer contains >=15 bits (deflate's max. Huffman code size). */ #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do \ { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) \ { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) \ break; \ } \ else if (num_bits > TINFL_FAST_LOOKUP_BITS) \ { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do \ { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) \ break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); /* TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read */ /* beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully */ /* decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32. */ /* The slow path is only executed at the very end of the input buffer. */ /* v1.16: The original macro handled the case at the very end of the passed-in input buffer, but we also need to handle the case where the user passes in 1+zillion bytes */ /* following the deflate data and our non-conservative read-ahead path won't kick in here on this code. This is much trickier. */ #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do \ { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) \ { \ if ((pIn_buf_end - pIn_buf_cur) < 2) \ { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } \ else \ { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \ code_len = temp >> 9, temp &= 511; \ else \ { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do \ { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 }; static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 }; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 }; static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 }; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 }; static const int s_min_table_sizes[3] = { 257, 1, 4 }; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; /* Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). */ if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { TINFL_CR_RETURN(38, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS); } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { #ifdef MINIZ_UNALIGNED_USE_MEMCPY memcpy(pOut_buf_cur, pSrc, sizeof(mz_uint32)*2); #else ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; #endif pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif while(counter>2) { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; counter -= 3; } if (counter > 0) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); /* Ensure byte alignment and put back any bytes from the bitbuf if we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */ /* I'm being super conservative here. A number of simplifications can be made to the byte alignment part, and the Adler32 check shouldn't ever need to worry about reading from the bitbuf now. */ TINFL_SKIP_BITS(32, num_bits & 7); while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) { --pIn_buf_cur; num_bits -= 8; } bit_buf &= (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1); MZ_ASSERT(!num_bits); /* if this assert fires then we've read beyond the end of non-deflate/zlib streams with following data (such as gzip streams). */ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: /* As long as we aren't telling the caller that we NEED more input to make forward progress: */ /* Put back any bytes from the bitbuf in case we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */ /* We need to be very careful here to NOT push back any bytes we definitely know we need to make forward progress, though, or we'll lock the caller up into an inf loop. */ if ((status != TINFL_STATUS_NEEDS_MORE_INPUT) && (status != TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS)) { while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) { --pIn_buf_cur; num_bits -= 8; } } r->m_num_bits = num_bits; r->m_bit_buf = bit_buf & (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1); r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } /* Higher level helper functions. */ void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } #ifndef MINIZ_NO_MALLOC tinfl_decompressor *tinfl_decompressor_alloc() { tinfl_decompressor *pDecomp = (tinfl_decompressor *)MZ_MALLOC(sizeof(tinfl_decompressor)); if (pDecomp) tinfl_init(pDecomp); return pDecomp; } void tinfl_decompressor_free(tinfl_decompressor *pDecomp) { MZ_FREE(pDecomp); } #endif #ifdef __cplusplus } #endif /************************************************************************** * * Copyright 2013-2014 RAD Game Tools and Valve Software * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC * Copyright 2016 Martin Raiber * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus extern "C" { #endif /* ------------------- .ZIP archive reading */ #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat64 #define MZ_FILE_STAT _stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #elif defined(__APPLE__) #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen(p, m, s) #define MZ_DELETE_FILE remove #else #pragma message("Using fopen, ftello, fseeko, stat() etc. path for file I/O - this path may not support large files.") #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #ifdef __STRICT_ANSI__ #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #else #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #endif #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif /* #ifdef _MSC_VER */ #endif /* #ifdef MINIZ_NO_STDIO */ #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) /* Various ZIP archive enums. To completely avoid cross platform compiler alignment and platform endian issues, miniz.c doesn't use structs for any of this stuff. */ enum { /* ZIP archive identifiers and record sizes */ MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, /* ZIP64 archive identifier and record sizes */ MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06064b50, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG = 0x07064b50, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE = 56, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE = 20, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID = 0x0001, MZ_ZIP_DATA_DESCRIPTOR_ID = 0x08074b50, MZ_ZIP_DATA_DESCRIPTER_SIZE64 = 24, MZ_ZIP_DATA_DESCRIPTER_SIZE32 = 16, /* Central directory header record offsets */ MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, /* Local directory header offsets */ MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR = 1 << 3, /* End of central directory offsets */ MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, /* ZIP64 End of central directory locator offsets */ MZ_ZIP64_ECDL_SIG_OFS = 0, /* 4 bytes */ MZ_ZIP64_ECDL_NUM_DISK_CDIR_OFS = 4, /* 4 bytes */ MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS = 8, /* 8 bytes */ MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS = 16, /* 4 bytes */ /* ZIP64 End of central directory header offsets */ MZ_ZIP64_ECDH_SIG_OFS = 0, /* 4 bytes */ MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS = 4, /* 8 bytes */ MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS = 12, /* 2 bytes */ MZ_ZIP64_ECDH_VERSION_NEEDED_OFS = 14, /* 2 bytes */ MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS = 16, /* 4 bytes */ MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS = 20, /* 4 bytes */ MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 24, /* 8 bytes */ MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS = 32, /* 8 bytes */ MZ_ZIP64_ECDH_CDIR_SIZE_OFS = 40, /* 8 bytes */ MZ_ZIP64_ECDH_CDIR_OFS_OFS = 48, /* 8 bytes */ MZ_ZIP_VERSION_MADE_BY_DOS_FILESYSTEM_ID = 0, MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG = 0x10, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED = 1, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG = 32, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION = 64, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED = 8192, MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8 = 1 << 11 }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; /* The flags passed in when the archive is initially opened. */ uint32_t m_init_flags; /* MZ_TRUE if the archive has a zip64 end of central directory headers, etc. */ mz_bool m_zip64; /* MZ_TRUE if we found zip64 extended info in the central directory (m_zip64 will also be slammed to true too, even if we didn't find a zip64 end of central dir header, etc.) */ mz_bool m_zip64_has_extended_info_fields; /* These fields are used by the file, FILE, memory, and memory/heap read/write helpers. */ MZ_FILE *m_pFile; mz_uint64 m_file_archive_start_ofs; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) (array_ptr)->m_element_size = element_size #if defined(DEBUG) || defined(_DEBUG) || defined(NDEBUG) static MZ_FORCEINLINE mz_uint mz_zip_array_range_check(const mz_zip_array *pArray, mz_uint index) { MZ_ASSERT(index < pArray->m_size); return index; } #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) ((element_type *)((array_ptr)->m_p))[mz_zip_array_range_check(array_ptr, index)] #else #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) ((element_type *)((array_ptr)->m_p))[index] #endif static MZ_FORCEINLINE void mz_zip_array_init(mz_zip_array *pArray, mz_uint32 element_size) { memset(pArray, 0, sizeof(mz_zip_array)); pArray->m_element_size = element_size; } static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; if (n > 0) memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static MZ_TIME_T mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_zip_time_t_to_dos_time(MZ_TIME_T time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif /* #ifdef _MSC_VER */ *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif /* MINIZ_NO_ARCHIVE_WRITING_APIS */ #ifndef MINIZ_NO_STDIO #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static mz_bool mz_zip_get_file_modified_time(const char *pFilename, MZ_TIME_T *pTime) { struct MZ_FILE_STAT_STRUCT file_stat; /* On Linux with x86 glibc, this call will fail on large files (I think >= 0x80000000 bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. */ if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; *pTime = file_stat.st_mtime; return MZ_TRUE; } #endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS*/ static mz_bool mz_zip_set_file_times(const char *pFilename, MZ_TIME_T access_time, MZ_TIME_T modified_time) { struct utimbuf t; memset(&t, 0, sizeof(t)); t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif /* #ifndef MINIZ_NO_STDIO */ #endif /* #ifndef MINIZ_NO_TIME */ static MZ_FORCEINLINE mz_bool mz_zip_set_error(mz_zip_archive *pZip, mz_zip_error err_num) { if (pZip) pZip->m_last_error = err_num; return MZ_FALSE; } static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!pZip->m_pAlloc) pZip->m_pAlloc = miniz_def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = miniz_def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = miniz_def_realloc_func; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; pZip->m_last_error = MZ_ZIP_NO_ERROR; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); pZip->m_pState->m_init_flags = flags; pZip->m_pState->m_zip64 = MZ_FALSE; pZip->m_pState->m_zip64_has_extended_info_fields = MZ_FALSE; pZip->m_zip_mode = MZ_ZIP_MODE_READING; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do \ { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END /* Heap sort of lowercased filenames, used to help accelerate plain central directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), but it could allocate memory.) */ static void mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices; mz_uint32 start, end; const mz_uint32 size = pZip->m_total_files; if (size <= 1U) return; pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); start = (size - 2U) >> 1U; for (;;) { mz_uint64 child, root = start; for (;;) { if ((child = (root << 1U) + 1U) >= size) break; child += (((child + 1U) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1U]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } if (!start) break; start--; } end = size - 1; while (end > 0) { mz_uint64 child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1U) + 1U) >= end) break; child += (((child + 1U) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1U])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_locate_header_sig(mz_zip_archive *pZip, mz_uint32 record_sig, mz_uint32 record_size, mz_int64 *pOfs) { mz_int64 cur_file_ofs; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; /* Basic sanity checks - reject files which are too small */ if (pZip->m_archive_size < record_size) return MZ_FALSE; /* Find the record by scanning the file from the end towards the beginning. */ cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) { mz_uint s = MZ_READ_LE32(pBuf + i); if (s == record_sig) { if ((pZip->m_archive_size - (cur_file_ofs + i)) >= record_size) break; } } if (i >= 0) { cur_file_ofs += i; break; } /* Give up if we've searched the entire file, or we've gone back "too far" (~64kb) */ if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (MZ_UINT16_MAX + record_size))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } *pOfs = cur_file_ofs; return MZ_TRUE; } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint flags) { mz_uint cdir_size = 0, cdir_entries_on_this_disk = 0, num_this_disk = 0, cdir_disk_index = 0; mz_uint64 cdir_ofs = 0; mz_int64 cur_file_ofs = 0; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); mz_uint32 zip64_end_of_central_dir_locator_u32[(MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pZip64_locator = (mz_uint8 *)zip64_end_of_central_dir_locator_u32; mz_uint32 zip64_end_of_central_dir_header_u32[(MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pZip64_end_of_central_dir = (mz_uint8 *)zip64_end_of_central_dir_header_u32; mz_uint64 zip64_end_of_central_dir_ofs = 0; /* Basic sanity checks - reject files which are too small, and check the first 4 bytes of the file to make sure a local header is there. */ if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (!mz_zip_reader_locate_header_sig(pZip, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE, &cur_file_ofs)) return mz_zip_set_error(pZip, MZ_ZIP_FAILED_FINDING_CENTRAL_DIR); /* Read and verify the end of central directory record. */ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (cur_file_ofs >= (MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)) { if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs - MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE, pZip64_locator, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) == MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) { if (MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_SIG_OFS) == MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG) { zip64_end_of_central_dir_ofs = MZ_READ_LE64(pZip64_locator + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS); if (zip64_end_of_central_dir_ofs > (pZip->m_archive_size - MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (pZip->m_pRead(pZip->m_pIO_opaque, zip64_end_of_central_dir_ofs, pZip64_end_of_central_dir, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) == MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) { if (MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIG_OFS) == MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG) { pZip->m_pState->m_zip64 = MZ_TRUE; } } } } } pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS); cdir_entries_on_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS); num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS); cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if (pZip->m_pState->m_zip64) { mz_uint32 zip64_total_num_of_disks = MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS); mz_uint64 zip64_cdir_total_entries = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS); mz_uint64 zip64_cdir_total_entries_on_this_disk = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS); mz_uint64 zip64_size_of_end_of_central_dir_record = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS); mz_uint64 zip64_size_of_central_directory = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_SIZE_OFS); if (zip64_size_of_end_of_central_dir_record < (MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - 12)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (zip64_total_num_of_disks != 1U) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); /* Check for miniz's practical limits */ if (zip64_cdir_total_entries > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); pZip->m_total_files = (mz_uint32)zip64_cdir_total_entries; if (zip64_cdir_total_entries_on_this_disk > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); cdir_entries_on_this_disk = (mz_uint32)zip64_cdir_total_entries_on_this_disk; /* Check for miniz's current practical limits (sorry, this should be enough for millions of files) */ if (zip64_size_of_central_directory > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); cdir_size = (mz_uint32)zip64_size_of_central_directory; num_this_disk = MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS); cdir_ofs = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_OFS_OFS); } if (pZip->m_total_files != cdir_entries_on_this_disk) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); if (cdir_size < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; /* Read the entire central directory into a heap block, and allocate another heap block to hold the unsorted central dir file record offsets, and possibly another to hold the sorted indices. */ if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); /* Now create an index into the central directory file records, do some basic sanity checking on each record */ p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, disk_index, bit_flags, filename_size, ext_data_size; mz_uint64 comp_size, decomp_size, local_header_ofs; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); filename_size = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); ext_data_size = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS); if ((!pZip->m_pState->m_zip64_has_extended_info_fields) && (ext_data_size) && (MZ_MAX(MZ_MAX(comp_size, decomp_size), local_header_ofs) == MZ_UINT32_MAX)) { /* Attempt to find zip64 extended information field in the entry's extra data */ mz_uint32 extra_size_remaining = ext_data_size; if (extra_size_remaining) { const mz_uint8 *pExtra_data; void* buf = NULL; if (MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + ext_data_size > n) { buf = MZ_MALLOC(ext_data_size); if(buf==NULL) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size, buf, ext_data_size) != ext_data_size) { MZ_FREE(buf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } pExtra_data = (mz_uint8*)buf; } else { pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size; } do { mz_uint32 field_id; mz_uint32 field_data_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) { MZ_FREE(buf); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); if ((field_data_size + sizeof(mz_uint16) * 2) > extra_size_remaining) { MZ_FREE(buf); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { /* Ok, the archive didn't have any zip64 headers but it uses a zip64 extended information field so mark it as zip64 anyway (this can occur with infozip's zip util when it reads compresses files from stdin). */ pZip->m_pState->m_zip64 = MZ_TRUE; pZip->m_pState->m_zip64_has_extended_info_fields = MZ_TRUE; break; } pExtra_data += sizeof(mz_uint16) * 2 + field_data_size; extra_size_remaining = extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size; } while (extra_size_remaining); MZ_FREE(buf); } } /* I've seen archives that aren't marked as zip64 that uses zip64 ext data, argh */ if ((comp_size != MZ_UINT32_MAX) && (decomp_size != MZ_UINT32_MAX)) { if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index == MZ_UINT16_MAX) || ((disk_index != num_this_disk) && (disk_index != 1))) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK); if (comp_size != MZ_UINT32_MAX) { if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } bit_flags = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); if (bit_flags & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } void mz_zip_zero_struct(mz_zip_archive *pZip) { if (pZip) MZ_CLEAR_OBJ(*pZip); } static mz_bool mz_zip_reader_end_internal(mz_zip_archive *pZip, mz_bool set_last_error) { mz_bool status = MZ_TRUE; if (!pZip) return MZ_FALSE; if ((!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) { if (set_last_error) pZip->m_last_error = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE) { if (MZ_FCLOSE(pState->m_pFile) == EOF) { if (set_last_error) pZip->m_last_error = MZ_ZIP_FILE_CLOSE_FAILED; status = MZ_FALSE; } } pState->m_pFile = NULL; } #endif /* #ifndef MINIZ_NO_STDIO */ pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { return mz_zip_reader_end_internal(pZip, MZ_TRUE); } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint flags) { if ((!pZip) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_USER; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint flags) { if (!pMem) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_MEMORY; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pNeeds_keepalive = NULL; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); file_ofs += pZip->m_pState->m_file_archive_start_ofs; if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { return mz_zip_reader_init_file_v2(pZip, pFilename, flags, 0, 0); } mz_bool mz_zip_reader_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags, mz_uint64 file_start_ofs, mz_uint64 archive_size) { mz_uint64 file_size; MZ_FILE *pFile; if ((!pZip) || (!pFilename) || ((archive_size) && (archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); file_size = archive_size; if (!file_size) { if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED); } file_size = MZ_FTELL64(pFile); } /* TODO: Better sanity check archive_size and the # of actual remaining bytes */ if (file_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) { MZ_FCLOSE(pFile); return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); } if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_zip_type = MZ_ZIP_TYPE_FILE; pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; pZip->m_pState->m_file_archive_start_ofs = file_start_ofs; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } mz_bool mz_zip_reader_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint64 archive_size, mz_uint flags) { mz_uint64 cur_file_ofs; if ((!pZip) || (!pFile)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); cur_file_ofs = MZ_FTELL64(pFile); if (!archive_size) { if (MZ_FSEEK64(pFile, 0, SEEK_END)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED); archive_size = MZ_FTELL64(pFile) - cur_file_ofs; if (archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE); } if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_CFILE; pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = archive_size; pZip->m_pState->m_file_archive_start_ofs = cur_file_ofs; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end_internal(pZip, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } #endif /* #ifndef MINIZ_NO_STDIO */ static MZ_FORCEINLINE const mz_uint8 *mz_zip_get_cdh(mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION)) != 0; } mz_bool mz_zip_reader_is_file_supported(mz_zip_archive *pZip, mz_uint file_index) { mz_uint bit_flag; mz_uint method; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); if ((method != 0) && (method != MZ_DEFLATED)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); return MZ_FALSE; } if (bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); return MZ_FALSE; } if (bit_flag & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); return MZ_FALSE; } return MZ_TRUE; } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, attribute_mapping_id, external_attr; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } /* Bugfix: This code was also checking if the internal attribute was non-zero, which wasn't correct. */ /* Most/all zip writers (hopefully) set DOS file/directory attributes in the low 16-bits, so check for the DOS directory flag and ignore the source OS ID in the created by field. */ /* FIXME: Remove this check? Is it necessary - we already check the filename. */ attribute_mapping_id = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS) >> 8; (void)attribute_mapping_id; external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG) != 0) { return MZ_TRUE; } return MZ_FALSE; } static mz_bool mz_zip_file_stat_internal(mz_zip_archive *pZip, mz_uint file_index, const mz_uint8 *pCentral_dir_header, mz_zip_archive_file_stat *pStat, mz_bool *pFound_zip64_extra_data) { mz_uint n; const mz_uint8 *p = pCentral_dir_header; if (pFound_zip64_extra_data) *pFound_zip64_extra_data = MZ_FALSE; if ((!p) || (!pStat)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Extract fields from the central directory record. */ pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); /* Copy as much of the filename and comment as possible. */ n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; /* Set some flags for convienance */ pStat->m_is_directory = mz_zip_reader_is_file_a_directory(pZip, file_index); pStat->m_is_encrypted = mz_zip_reader_is_file_encrypted(pZip, file_index); pStat->m_is_supported = mz_zip_reader_is_file_supported(pZip, file_index); /* See if we need to read any zip64 extended information fields. */ /* Confusingly, these zip64 fields can be present even on non-zip64 archives (Debian zip on a huge files from stdin piped to stdout creates them). */ if (MZ_MAX(MZ_MAX(pStat->m_comp_size, pStat->m_uncomp_size), pStat->m_local_header_ofs) == MZ_UINT32_MAX) { /* Attempt to find zip64 extended information field in the entry's extra data */ mz_uint32 extra_size_remaining = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS); if (extra_size_remaining) { const mz_uint8 *pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); do { mz_uint32 field_id; mz_uint32 field_data_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); if ((field_data_size + sizeof(mz_uint16) * 2) > extra_size_remaining) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { const mz_uint8 *pField_data = pExtra_data + sizeof(mz_uint16) * 2; mz_uint32 field_data_remaining = field_data_size; if (pFound_zip64_extra_data) *pFound_zip64_extra_data = MZ_TRUE; if (pStat->m_uncomp_size == MZ_UINT32_MAX) { if (field_data_remaining < sizeof(mz_uint64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pStat->m_uncomp_size = MZ_READ_LE64(pField_data); pField_data += sizeof(mz_uint64); field_data_remaining -= sizeof(mz_uint64); } if (pStat->m_comp_size == MZ_UINT32_MAX) { if (field_data_remaining < sizeof(mz_uint64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pStat->m_comp_size = MZ_READ_LE64(pField_data); pField_data += sizeof(mz_uint64); field_data_remaining -= sizeof(mz_uint64); } if (pStat->m_local_header_ofs == MZ_UINT32_MAX) { if (field_data_remaining < sizeof(mz_uint64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pStat->m_local_header_ofs = MZ_READ_LE64(pField_data); pField_data += sizeof(mz_uint64); field_data_remaining -= sizeof(mz_uint64); } break; } pExtra_data += sizeof(mz_uint16) * 2 + field_data_size; extra_size_remaining = extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size; } while (extra_size_remaining); } } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_filename_compare(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static mz_bool mz_zip_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename, mz_uint32 *pIndex) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0); const uint32_t size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); if (pIndex) *pIndex = 0; if (size) { /* yes I could use uint32_t's, but then we would have to add some special case checks in the loop, argh, and */ /* honestly the major expense here on 32-bit CPU's will still be the filename compare */ mz_int64 l = 0, h = (mz_int64)size - 1; while (l <= h) { mz_int64 m = l + ((h - l) >> 1); uint32_t file_index = pIndices[(uint32_t)m]; int comp = mz_zip_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) { if (pIndex) *pIndex = file_index; return MZ_TRUE; } else if (comp < 0) l = m + 1; else h = m - 1; } } return mz_zip_set_error(pZip, MZ_ZIP_FILE_NOT_FOUND); } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint32 index; if (!mz_zip_reader_locate_file_v2(pZip, pName, pComment, flags, &index)) return -1; else return (int)index; } mz_bool mz_zip_reader_locate_file_v2(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags, mz_uint32 *pIndex) { mz_uint file_index; size_t name_len, comment_len; if (pIndex) *pIndex = 0; if ((!pZip) || (!pZip->m_pState) || (!pName)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* See if we can use a binary search */ if (((pZip->m_pState->m_init_flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0) && (pZip->m_zip_mode == MZ_ZIP_MODE_READING) && ((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) { return mz_zip_locate_file_binary_search(pZip, pName, pIndex); } /* Locate the entry by scanning the entire central directory */ name_len = strlen(pName); if (name_len > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); comment_len = pComment ? strlen(pComment) : 0; if (comment_len > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_string_equal(pName, pFilename, filename_len, flags))) { if (pIndex) *pIndex = file_index; return MZ_TRUE; } } return mz_zip_set_error(pZip, MZ_ZIP_FILE_NOT_FOUND); } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((!pZip) || (!pZip->m_pState) || ((buf_size) && (!pBuf)) || ((user_read_buf_size) && (!pUser_read_buf)) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; /* A directory or zero length file */ if ((file_stat.m_is_directory) || (!file_stat.m_comp_size)) return MZ_TRUE; /* Encryption and patch files are not supported. */ if (file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); /* This function only supports decompressing stored and deflate. */ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); /* Ensure supplied output buffer is large enough. */ needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return mz_zip_set_error(pZip, MZ_ZIP_BUF_TOO_SMALL); /* Read and parse the local directory entry. */ cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { /* The file is stored or the caller has requested the compressed data. */ if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) == 0) { if (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32) return mz_zip_set_error(pZip, MZ_ZIP_CRC_CHECK_FAILED); } #endif return MZ_TRUE; } /* Decompress the file either directly from memory or from a file input buffer. */ tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { /* Read directly from the archive in memory. */ pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { /* Use a user provided read buffer. */ if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { /* Temporarily allocate a read buffer. */ read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE); if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { /* The size_t cast here should be OK because we've verified that the output buffer is >= file_stat.m_uncomp_size above */ size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED); break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress(&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { /* Make sure the entire file was decompressed, and check its CRC. */ if (out_buf_ofs != file_stat.m_uncomp_size) { mz_zip_set_error(pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE); status = TINFL_STATUS_FAILED; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS else if (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32) { mz_zip_set_error(pZip, MZ_ZIP_CRC_CHECK_FAILED); status = TINFL_STATUS_FAILED; } #endif } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return NULL; } comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) { mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); return NULL; } if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); return NULL; } if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if ((!pZip) || (!pZip->m_pState) || (!pCallback) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; /* A directory or zero length file */ if ((file_stat.m_is_directory) || (!file_stat.m_comp_size)) return MZ_TRUE; /* Encryption and patch files are not supported. */ if (file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); /* This function only supports decompressing stored and deflate. */ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); /* Read and do some minimal validation of the local directory entry (this doesn't crack the zip64 stuff, which we already have from the central dir) */ cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); /* Decompress the file either directly from memory or from a file input buffer. */ if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { /* The file is stored or the caller has requested the compressed data. */ if (pZip->m_pState->m_pMem) { if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) { mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED); status = TINFL_STATUS_FAILED; } else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); #endif } cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); status = TINFL_STATUS_FAILED; break; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); } #endif if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED); status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); status = TINFL_STATUS_FAILED; } else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress(&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED); status = TINFL_STATUS_FAILED; break; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); #endif if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED); status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { /* Make sure the entire file was decompressed, and check its CRC. */ if (out_buf_ofs != file_stat.m_uncomp_size) { mz_zip_set_error(pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE); status = TINFL_STATUS_FAILED; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS else if (file_crc32 != file_stat.m_crc32) { mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED); status = TINFL_STATUS_FAILED; } #endif } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } mz_zip_reader_extract_iter_state* mz_zip_reader_extract_iter_new(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags) { mz_zip_reader_extract_iter_state *pState; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; /* Argument sanity check */ if ((!pZip) || (!pZip->m_pState)) return NULL; /* Allocate an iterator status structure */ pState = (mz_zip_reader_extract_iter_state*)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_reader_extract_iter_state)); if (!pState) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); return NULL; } /* Fetch file details */ if (!mz_zip_reader_file_stat(pZip, file_index, &pState->file_stat)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* Encryption and patch files are not supported. */ if (pState->file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* This function only supports decompressing stored and deflate. */ if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (pState->file_stat.m_method != 0) && (pState->file_stat.m_method != MZ_DEFLATED)) { mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* Init state - save args */ pState->pZip = pZip; pState->flags = flags; /* Init state - reset variables to defaults */ pState->status = TINFL_STATUS_DONE; #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS pState->file_crc32 = MZ_CRC32_INIT; #endif pState->read_buf_ofs = 0; pState->out_buf_ofs = 0; pState->pRead_buf = NULL; pState->pWrite_buf = NULL; pState->out_blk_remain = 0; /* Read and parse the local directory entry. */ pState->cur_file_ofs = pState->file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, pState->cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } pState->cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((pState->cur_file_ofs + pState->file_stat.m_comp_size) > pZip->m_archive_size) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } /* Decompress the file either directly from memory or from a file input buffer. */ if (pZip->m_pState->m_pMem) { pState->pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + pState->cur_file_ofs; pState->read_buf_size = pState->read_buf_avail = pState->file_stat.m_comp_size; pState->comp_remaining = pState->file_stat.m_comp_size; } else { if (!((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method))) { /* Decompression required, therefore intermediate read buffer required */ pState->read_buf_size = MZ_MIN(pState->file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pState->pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)pState->read_buf_size))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } } else { /* Decompression not required - we will be reading directly into user buffer, no temp buf required */ pState->read_buf_size = 0; } pState->read_buf_avail = 0; pState->comp_remaining = pState->file_stat.m_comp_size; } if (!((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method))) { /* Decompression required, init decompressor */ tinfl_init( &pState->inflator ); /* Allocate write buffer */ if (NULL == (pState->pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (pState->pRead_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pState->pRead_buf); pZip->m_pFree(pZip->m_pAlloc_opaque, pState); return NULL; } } return pState; } mz_zip_reader_extract_iter_state* mz_zip_reader_extract_file_iter_new(mz_zip_archive *pZip, const char *pFilename, mz_uint flags) { mz_uint32 file_index; /* Locate file index by name */ if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index)) return NULL; /* Construct iterator */ return mz_zip_reader_extract_iter_new(pZip, file_index, flags); } size_t mz_zip_reader_extract_iter_read(mz_zip_reader_extract_iter_state* pState, void* pvBuf, size_t buf_size) { size_t copied_to_caller = 0; /* Argument sanity check */ if ((!pState) || (!pState->pZip) || (!pState->pZip->m_pState) || (!pvBuf)) return 0; if ((pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method)) { /* The file is stored or the caller has requested the compressed data, calc amount to return. */ copied_to_caller = (size_t)MZ_MIN( buf_size, pState->comp_remaining ); /* Zip is in memory....or requires reading from a file? */ if (pState->pZip->m_pState->m_pMem) { /* Copy data to caller's buffer */ memcpy( pvBuf, pState->pRead_buf, copied_to_caller ); pState->pRead_buf = ((mz_uint8*)pState->pRead_buf) + copied_to_caller; } else { /* Read directly into caller's buffer */ if (pState->pZip->m_pRead(pState->pZip->m_pIO_opaque, pState->cur_file_ofs, pvBuf, copied_to_caller) != copied_to_caller) { /* Failed to read all that was asked for, flag failure and alert user */ mz_zip_set_error(pState->pZip, MZ_ZIP_FILE_READ_FAILED); pState->status = TINFL_STATUS_FAILED; copied_to_caller = 0; } } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS /* Compute CRC if not returning compressed data only */ if (!(pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) pState->file_crc32 = (mz_uint32)mz_crc32(pState->file_crc32, (const mz_uint8 *)pvBuf, copied_to_caller); #endif /* Advance offsets, dec counters */ pState->cur_file_ofs += copied_to_caller; pState->out_buf_ofs += copied_to_caller; pState->comp_remaining -= copied_to_caller; } else { do { /* Calc ptr to write buffer - given current output pos and block size */ mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pState->pWrite_buf + (pState->out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); /* Calc max output size - given current output pos and block size */ size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (pState->out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if (!pState->out_blk_remain) { /* Read more data from file if none available (and reading from file) */ if ((!pState->read_buf_avail) && (!pState->pZip->m_pState->m_pMem)) { /* Calc read size */ pState->read_buf_avail = MZ_MIN(pState->read_buf_size, pState->comp_remaining); if (pState->pZip->m_pRead(pState->pZip->m_pIO_opaque, pState->cur_file_ofs, pState->pRead_buf, (size_t)pState->read_buf_avail) != pState->read_buf_avail) { mz_zip_set_error(pState->pZip, MZ_ZIP_FILE_READ_FAILED); pState->status = TINFL_STATUS_FAILED; break; } /* Advance offsets, dec counters */ pState->cur_file_ofs += pState->read_buf_avail; pState->comp_remaining -= pState->read_buf_avail; pState->read_buf_ofs = 0; } /* Perform decompression */ in_buf_size = (size_t)pState->read_buf_avail; pState->status = tinfl_decompress(&pState->inflator, (const mz_uint8 *)pState->pRead_buf + pState->read_buf_ofs, &in_buf_size, (mz_uint8 *)pState->pWrite_buf, pWrite_buf_cur, &out_buf_size, pState->comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); pState->read_buf_avail -= in_buf_size; pState->read_buf_ofs += in_buf_size; /* Update current output block size remaining */ pState->out_blk_remain = out_buf_size; } if (pState->out_blk_remain) { /* Calc amount to return. */ size_t to_copy = MZ_MIN( (buf_size - copied_to_caller), pState->out_blk_remain ); /* Copy data to caller's buffer */ memcpy( (uint8_t*)pvBuf + copied_to_caller, pWrite_buf_cur, to_copy ); #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS /* Perform CRC */ pState->file_crc32 = (mz_uint32)mz_crc32(pState->file_crc32, pWrite_buf_cur, to_copy); #endif /* Decrement data consumed from block */ pState->out_blk_remain -= to_copy; /* Inc output offset, while performing sanity check */ if ((pState->out_buf_ofs += to_copy) > pState->file_stat.m_uncomp_size) { mz_zip_set_error(pState->pZip, MZ_ZIP_DECOMPRESSION_FAILED); pState->status = TINFL_STATUS_FAILED; break; } /* Increment counter of data copied to caller */ copied_to_caller += to_copy; } } while ( (copied_to_caller < buf_size) && ((pState->status == TINFL_STATUS_NEEDS_MORE_INPUT) || (pState->status == TINFL_STATUS_HAS_MORE_OUTPUT)) ); } /* Return how many bytes were copied into user buffer */ return copied_to_caller; } mz_bool mz_zip_reader_extract_iter_free(mz_zip_reader_extract_iter_state* pState) { int status; /* Argument sanity check */ if ((!pState) || (!pState->pZip) || (!pState->pZip->m_pState)) return MZ_FALSE; /* Was decompression completed and requested? */ if ((pState->status == TINFL_STATUS_DONE) && (!(pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { /* Make sure the entire file was decompressed, and check its CRC. */ if (pState->out_buf_ofs != pState->file_stat.m_uncomp_size) { mz_zip_set_error(pState->pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE); pState->status = TINFL_STATUS_FAILED; } #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS else if (pState->file_crc32 != pState->file_stat.m_crc32) { mz_zip_set_error(pState->pZip, MZ_ZIP_DECOMPRESSION_FAILED); pState->status = TINFL_STATUS_FAILED; } #endif } /* Free buffers */ if (!pState->pZip->m_pState->m_pMem) pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState->pRead_buf); if (pState->pWrite_buf) pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState->pWrite_buf); /* Save status */ status = pState->status; /* Free context */ pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState); return status == TINFL_STATUS_DONE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; if ((file_stat.m_is_directory) || (!file_stat.m_is_supported)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); status = mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) { if (status) mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED); status = MZ_FALSE; } #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_STDIO) if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pArchive_filename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } mz_bool mz_zip_reader_extract_to_cfile(mz_zip_archive *pZip, mz_uint file_index, MZ_FILE *pFile, mz_uint flags) { mz_zip_archive_file_stat file_stat; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; if ((file_stat.m_is_directory) || (!file_stat.m_is_supported)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); return mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_file_write_callback, pFile, flags); } mz_bool mz_zip_reader_extract_file_to_cfile(mz_zip_archive *pZip, const char *pArchive_filename, MZ_FILE *pFile, mz_uint flags) { mz_uint32 file_index; if (!mz_zip_reader_locate_file_v2(pZip, pArchive_filename, NULL, flags, &file_index)) return MZ_FALSE; return mz_zip_reader_extract_to_cfile(pZip, file_index, pFile, flags); } #endif /* #ifndef MINIZ_NO_STDIO */ static size_t mz_zip_compute_crc32_callback(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_uint32 *p = (mz_uint32 *)pOpaque; (void)file_ofs; *p = (mz_uint32)mz_crc32(*p, (const mz_uint8 *)pBuf, n); return n; } mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags) { mz_zip_archive_file_stat file_stat; mz_zip_internal_state *pState; const mz_uint8 *pCentral_dir_header; mz_bool found_zip64_ext_data_in_cdir = MZ_FALSE; mz_bool found_zip64_ext_data_in_ldir = MZ_FALSE; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint64 local_header_ofs = 0; mz_uint32 local_header_filename_len, local_header_extra_len, local_header_crc32; mz_uint64 local_header_comp_size, local_header_uncomp_size; mz_uint32 uncomp_crc32 = MZ_CRC32_INIT; mz_bool has_data_descriptor; mz_uint32 local_header_bit_flags; mz_zip_array file_data_array; mz_zip_array_init(&file_data_array, 1); if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (file_index > pZip->m_total_files) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; pCentral_dir_header = mz_zip_get_cdh(pZip, file_index); if (!mz_zip_file_stat_internal(pZip, file_index, pCentral_dir_header, &file_stat, &found_zip64_ext_data_in_cdir)) return MZ_FALSE; /* A directory or zero length file */ if ((file_stat.m_is_directory) || (!file_stat.m_uncomp_size)) return MZ_TRUE; /* Encryption and patch files are not supported. */ if (file_stat.m_is_encrypted) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION); /* This function only supports stored and deflate. */ if ((file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD); if (!file_stat.m_is_supported) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE); /* Read and parse the local directory entry. */ local_header_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); local_header_filename_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS); local_header_extra_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); local_header_comp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS); local_header_uncomp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS); local_header_crc32 = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_CRC32_OFS); local_header_bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); has_data_descriptor = (local_header_bit_flags & 8) != 0; if (local_header_filename_len != strlen(file_stat.m_filename)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if ((local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len + local_header_extra_len + file_stat.m_comp_size) > pZip->m_archive_size) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (!mz_zip_array_resize(pZip, &file_data_array, MZ_MAX(local_header_filename_len, local_header_extra_len), MZ_FALSE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (local_header_filename_len) { if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE, file_data_array.m_p, local_header_filename_len) != local_header_filename_len) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); goto handle_failure; } /* I've seen 1 archive that had the same pathname, but used backslashes in the local dir and forward slashes in the central dir. Do we care about this? For now, this case will fail validation. */ if (memcmp(file_stat.m_filename, file_data_array.m_p, local_header_filename_len) != 0) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); goto handle_failure; } } if ((local_header_extra_len) && ((local_header_comp_size == MZ_UINT32_MAX) || (local_header_uncomp_size == MZ_UINT32_MAX))) { mz_uint32 extra_size_remaining = local_header_extra_len; const mz_uint8 *pExtra_data = (const mz_uint8 *)file_data_array.m_p; if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len, file_data_array.m_p, local_header_extra_len) != local_header_extra_len) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); goto handle_failure; } do { mz_uint32 field_id, field_data_size, field_total_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); field_total_size = field_data_size + sizeof(mz_uint16) * 2; if (field_total_size > extra_size_remaining) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { const mz_uint8 *pSrc_field_data = pExtra_data + sizeof(mz_uint32); if (field_data_size < sizeof(mz_uint64) * 2) { mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); goto handle_failure; } local_header_uncomp_size = MZ_READ_LE64(pSrc_field_data); local_header_comp_size = MZ_READ_LE64(pSrc_field_data + sizeof(mz_uint64)); found_zip64_ext_data_in_ldir = MZ_TRUE; break; } pExtra_data += field_total_size; extra_size_remaining -= field_total_size; } while (extra_size_remaining); } /* TODO: parse local header extra data when local_header_comp_size is 0xFFFFFFFF! (big_descriptor.zip) */ /* I've seen zips in the wild with the data descriptor bit set, but proper local header values and bogus data descriptors */ if ((has_data_descriptor) && (!local_header_comp_size) && (!local_header_crc32)) { mz_uint8 descriptor_buf[32]; mz_bool has_id; const mz_uint8 *pSrc; mz_uint32 file_crc32; mz_uint64 comp_size = 0, uncomp_size = 0; mz_uint32 num_descriptor_uint32s = ((pState->m_zip64) || (found_zip64_ext_data_in_ldir)) ? 6 : 4; if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len + local_header_extra_len + file_stat.m_comp_size, descriptor_buf, sizeof(mz_uint32) * num_descriptor_uint32s) != (sizeof(mz_uint32) * num_descriptor_uint32s)) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); goto handle_failure; } has_id = (MZ_READ_LE32(descriptor_buf) == MZ_ZIP_DATA_DESCRIPTOR_ID); pSrc = has_id ? (descriptor_buf + sizeof(mz_uint32)) : descriptor_buf; file_crc32 = MZ_READ_LE32(pSrc); if ((pState->m_zip64) || (found_zip64_ext_data_in_ldir)) { comp_size = MZ_READ_LE64(pSrc + sizeof(mz_uint32)); uncomp_size = MZ_READ_LE64(pSrc + sizeof(mz_uint32) + sizeof(mz_uint64)); } else { comp_size = MZ_READ_LE32(pSrc + sizeof(mz_uint32)); uncomp_size = MZ_READ_LE32(pSrc + sizeof(mz_uint32) + sizeof(mz_uint32)); } if ((file_crc32 != file_stat.m_crc32) || (comp_size != file_stat.m_comp_size) || (uncomp_size != file_stat.m_uncomp_size)) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); goto handle_failure; } } else { if ((local_header_crc32 != file_stat.m_crc32) || (local_header_comp_size != file_stat.m_comp_size) || (local_header_uncomp_size != file_stat.m_uncomp_size)) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); goto handle_failure; } } mz_zip_array_clear(pZip, &file_data_array); if ((flags & MZ_ZIP_FLAG_VALIDATE_HEADERS_ONLY) == 0) { if (!mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_compute_crc32_callback, &uncomp_crc32, 0)) return MZ_FALSE; /* 1 more check to be sure, although the extract checks too. */ if (uncomp_crc32 != file_stat.m_crc32) { mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); return MZ_FALSE; } } return MZ_TRUE; handle_failure: mz_zip_array_clear(pZip, &file_data_array); return MZ_FALSE; } mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags) { mz_zip_internal_state *pState; uint32_t i; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; /* Basic sanity checks */ if (!pState->m_zip64) { if (pZip->m_total_files > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (pZip->m_archive_size > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); } else { if (pZip->m_total_files >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (pState->m_central_dir.m_size >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); } for (i = 0; i < pZip->m_total_files; i++) { if (MZ_ZIP_FLAG_VALIDATE_LOCATE_FILE_FLAG & flags) { mz_uint32 found_index; mz_zip_archive_file_stat stat; if (!mz_zip_reader_file_stat(pZip, i, &stat)) return MZ_FALSE; if (!mz_zip_reader_locate_file_v2(pZip, stat.m_filename, NULL, 0, &found_index)) return MZ_FALSE; /* This check can fail if there are duplicate filenames in the archive (which we don't check for when writing - that's up to the user) */ if (found_index != i) return mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED); } if (!mz_zip_validate_file(pZip, i, flags)) return MZ_FALSE; } return MZ_TRUE; } mz_bool mz_zip_validate_mem_archive(const void *pMem, size_t size, mz_uint flags, mz_zip_error *pErr) { mz_bool success = MZ_TRUE; mz_zip_archive zip; mz_zip_error actual_err = MZ_ZIP_NO_ERROR; if ((!pMem) || (!size)) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } mz_zip_zero_struct(&zip); if (!mz_zip_reader_init_mem(&zip, pMem, size, flags)) { if (pErr) *pErr = zip.m_last_error; return MZ_FALSE; } if (!mz_zip_validate_archive(&zip, flags)) { actual_err = zip.m_last_error; success = MZ_FALSE; } if (!mz_zip_reader_end_internal(&zip, success)) { if (!actual_err) actual_err = zip.m_last_error; success = MZ_FALSE; } if (pErr) *pErr = actual_err; return success; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_validate_file_archive(const char *pFilename, mz_uint flags, mz_zip_error *pErr) { mz_bool success = MZ_TRUE; mz_zip_archive zip; mz_zip_error actual_err = MZ_ZIP_NO_ERROR; if (!pFilename) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } mz_zip_zero_struct(&zip); if (!mz_zip_reader_init_file_v2(&zip, pFilename, flags, 0, 0)) { if (pErr) *pErr = zip.m_last_error; return MZ_FALSE; } if (!mz_zip_validate_archive(&zip, flags)) { actual_err = zip.m_last_error; success = MZ_FALSE; } if (!mz_zip_reader_end_internal(&zip, success)) { if (!actual_err) actual_err = zip.m_last_error; success = MZ_FALSE; } if (pErr) *pErr = actual_err; return success; } #endif /* #ifndef MINIZ_NO_STDIO */ /* ------------------- .ZIP archive writing */ #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static MZ_FORCEINLINE void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static MZ_FORCEINLINE void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } static MZ_FORCEINLINE void mz_write_le64(mz_uint8 *p, mz_uint64 v) { mz_write_le32(p, (mz_uint32)v); mz_write_le32(p + sizeof(mz_uint32), (mz_uint32)(v >> 32)); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) #define MZ_WRITE_LE64(p, v) mz_write_le64((mz_uint8 *)(p), (mz_uint64)(v)) static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); if (!n) return 0; /* An allocation this big is likely to just fail on 32-bit systems, so don't even go there. */ if ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)) { mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); return 0; } if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) { mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); return 0; } pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } static mz_bool mz_zip_writer_end_internal(mz_zip_archive *pZip, mz_bool set_last_error) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) { if (set_last_error) mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return MZ_FALSE; } pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE) { if (MZ_FCLOSE(pState->m_pFile) == EOF) { if (set_last_error) mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED); status = MZ_FALSE; } } pState->m_pFile = NULL; } #endif /* #ifndef MINIZ_NO_STDIO */ if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } mz_bool mz_zip_writer_init_v2(mz_zip_archive *pZip, mz_uint64 existing_size, mz_uint flags) { mz_bool zip64 = (flags & MZ_ZIP_FLAG_WRITE_ZIP64) != 0; if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) { if (!pZip->m_pRead) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } if (pZip->m_file_offset_alignment) { /* Ensure user specified file offset alignment is a power of 2. */ if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } if (!pZip->m_pAlloc) pZip->m_pAlloc = miniz_def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = miniz_def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = miniz_def_realloc_func; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); pZip->m_pState->m_zip64 = zip64; pZip->m_pState->m_zip64_has_extended_info_fields = zip64; pZip->m_zip_type = MZ_ZIP_TYPE_USER; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; return MZ_TRUE; } mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { return mz_zip_writer_init_v2(pZip, existing_size, 0); } mz_bool mz_zip_writer_init_heap_v2(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size, mz_uint flags) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pNeeds_keepalive = NULL; if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init_v2(pZip, size_to_reserve_at_beginning, flags)) return MZ_FALSE; pZip->m_zip_type = MZ_ZIP_TYPE_HEAP; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end_internal(pZip, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { return mz_zip_writer_init_heap_v2(pZip, size_to_reserve_at_beginning, initial_allocation_size, 0); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); file_ofs += pZip->m_pState->m_file_archive_start_ofs; if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) { mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED); return 0; } return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { return mz_zip_writer_init_file_v2(pZip, pFilename, size_to_reserve_at_beginning, 0); } mz_bool mz_zip_writer_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning, mz_uint flags) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pNeeds_keepalive = NULL; if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init_v2(pZip, size_to_reserve_at_beginning, flags)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) ? "w+b" : "wb"))) { mz_zip_writer_end(pZip); return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); } pZip->m_pState->m_pFile = pFile; pZip->m_zip_type = MZ_ZIP_TYPE_FILE; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } mz_bool mz_zip_writer_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint flags) { pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pNeeds_keepalive = NULL; if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init_v2(pZip, 0, flags)) return MZ_FALSE; pZip->m_pState->m_pFile = pFile; pZip->m_pState->m_file_archive_start_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); pZip->m_zip_type = MZ_ZIP_TYPE_CFILE; return MZ_TRUE; } #endif /* #ifndef MINIZ_NO_STDIO */ mz_bool mz_zip_writer_init_from_reader_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (flags & MZ_ZIP_FLAG_WRITE_ZIP64) { /* We don't support converting a non-zip64 file to zip64 - this seems like more trouble than it's worth. (What about the existing 32-bit data descriptors that could follow the compressed data?) */ if (!pZip->m_pState->m_zip64) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } /* No sense in trying to write to an archive that's already at the support max size */ if (pZip->m_pState->m_zip64) { if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if (pZip->m_total_files == MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); if ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); } pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO (void)pFilename; return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); #else if (pZip->m_pIO_opaque != pZip) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE) { if (!pFilename) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Archive is being read from stdio and was originally opened only for reading. Try to reopen as writable. */ if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { /* The mz_zip_archive is now in a bogus state because pState->m_pFile is NULL, so just close it. */ mz_zip_reader_end_internal(pZip, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); } } pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pNeeds_keepalive = NULL; #endif /* #ifdef MINIZ_NO_STDIO */ } else if (pState->m_pMem) { /* Archive lives in a memory block. Assume it's from the heap that we can resize using the realloc callback. */ if (pZip->m_pIO_opaque != pZip) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pNeeds_keepalive = NULL; } /* Archive is being read via a user provided read function - make sure the user has specified a write function too. */ else if (!pZip->m_pWrite) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Start writing new files at the archive's current central directory location. */ /* TODO: We could add a flag that lets the user start writing immediately AFTER the existing central dir - this would be safer. */ pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_central_directory_file_ofs = 0; /* Clear the sorted central dir offsets, they aren't useful or maintained now. */ /* Even though we're now in write mode, files can still be extracted and verified, but file locates will be slow. */ /* TODO: We could easily maintain the sorted central directory offsets. */ mz_zip_array_clear(pZip, &pZip->m_pState->m_sorted_central_dir_offsets); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; return MZ_TRUE; } mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { return mz_zip_writer_init_from_reader_v2(pZip, pFilename, 0); } /* TODO: pArchive_name is a terrible name here! */ mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } #define MZ_ZIP64_MAX_LOCAL_EXTRA_FIELD_SIZE (sizeof(mz_uint16) * 2 + sizeof(mz_uint64) * 2) #define MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE (sizeof(mz_uint16) * 2 + sizeof(mz_uint64) * 3) static mz_uint32 mz_zip_writer_create_zip64_extra_data(mz_uint8 *pBuf, mz_uint64 *pUncomp_size, mz_uint64 *pComp_size, mz_uint64 *pLocal_header_ofs) { mz_uint8 *pDst = pBuf; mz_uint32 field_size = 0; MZ_WRITE_LE16(pDst + 0, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID); MZ_WRITE_LE16(pDst + 2, 0); pDst += sizeof(mz_uint16) * 2; if (pUncomp_size) { MZ_WRITE_LE64(pDst, *pUncomp_size); pDst += sizeof(mz_uint64); field_size += sizeof(mz_uint64); } if (pComp_size) { MZ_WRITE_LE64(pDst, *pComp_size); pDst += sizeof(mz_uint64); field_size += sizeof(mz_uint64); } if (pLocal_header_ofs) { MZ_WRITE_LE64(pDst, *pLocal_header_ofs); pDst += sizeof(mz_uint64); field_size += sizeof(mz_uint64); } MZ_WRITE_LE16(pBuf + 2, field_size); return (mz_uint32)(pDst - pBuf); } static mz_bool mz_zip_writer_create_local_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, MZ_MIN(comp_size, MZ_UINT32_MAX)); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, MZ_MIN(uncomp_size, MZ_UINT32_MAX)); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, MZ_MIN(comp_size, MZ_UINT32_MAX)); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, MZ_MIN(uncomp_size, MZ_UINT32_MAX)); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, MZ_MIN(local_header_ofs, MZ_UINT32_MAX)); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir(mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes, const char *user_extra_data, mz_uint user_extra_data_len) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; if (!pZip->m_pState->m_zip64) { if (local_header_ofs > 0xFFFFFFFF) return mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); } /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */ if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + user_extra_data_len + comment_size) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); if (!mz_zip_writer_create_central_dir_header(pZip, central_dir_header, filename_size, (mz_uint16)(extra_size + user_extra_data_len), comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, user_extra_data, user_extra_data_len)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { /* Try to resize the central directory array back into its original state. */ mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { /* Basic ZIP archive filename validity checks: Valid filenames cannot start with a forward slash, cannot contain a drive letter, and cannot use DOS-style backward slashes. */ if (*pArchive_name == '/') return MZ_FALSE; /* Making sure the name does not contain drive letters or DOS style backward slashes is the responsibility of the program using miniz*/ return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (mz_uint)((pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1)); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { return mz_zip_writer_add_mem_ex_v2(pZip, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, uncomp_size, uncomp_crc32, NULL, NULL, 0, NULL, 0); } mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32, MZ_TIME_T *last_modified, const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; mz_uint8 *pExtra_data = NULL; mz_uint32 extra_size = 0; mz_uint8 extra_data[MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE]; mz_uint16 bit_flags = 0; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if (uncomp_size || (buf_size && !(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) bit_flags |= MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR; if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME)) bit_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; if (pState->m_zip64) { if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if (pZip->m_total_files == MZ_UINT16_MAX) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */ } if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ } } if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_writer_validate_archive_name(pArchive_name)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); #ifndef MINIZ_NO_TIME if (last_modified != NULL) { mz_zip_time_t_to_dos_time(*last_modified, &dos_time, &dos_date); } else { MZ_TIME_T cur_time; time(&cur_time); mz_zip_time_t_to_dos_time(cur_time, &dos_time, &dos_date); } #endif /* #ifndef MINIZ_NO_TIME */ if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } archive_name_size = strlen(pArchive_name); if (archive_name_size > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */ if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE + comment_size) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); if (!pState->m_zip64) { /* Bail early if the archive would obviously become too large */ if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + user_extra_data_central_len + MZ_ZIP_DATA_DESCRIPTER_SIZE32) > 0xFFFFFFFF) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ } } if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { /* Set DOS Subdirectory attribute bit. */ ext_attributes |= MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG; /* Subdirectories cannot contain data. */ if ((buf_size) || (uncomp_size)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); } /* Try to do any allocations before writing to the archive, so if an allocation fails the file remains unmodified. (A good idea if we're doing an in-place modification.) */ if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + (pState->m_zip64 ? MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE : 0))) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes; MZ_CLEAR_OBJ(local_dir_header); if (!store_data_uncompressed || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { method = MZ_DEFLATED; } if (pState->m_zip64) { if (uncomp_size >= MZ_UINT32_MAX || local_dir_header_ofs >= MZ_UINT32_MAX) { pExtra_data = extra_data; extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)(extra_size + user_extra_data_len), 0, 0, 0, method, bit_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; if (pExtra_data != NULL) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, extra_data, extra_size) != extra_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += extra_size; } } else { if ((comp_size > MZ_UINT32_MAX) || (cur_archive_file_ofs > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)user_extra_data_len, 0, 0, 0, method, bit_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; } if (user_extra_data_len > 0) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, user_extra_data, user_extra_data_len) != user_extra_data_len) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += user_extra_data_len; } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += buf_size; comp_size = buf_size; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return mz_zip_set_error(pZip, MZ_ZIP_COMPRESSION_FAILED); } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; if (uncomp_size) { mz_uint8 local_dir_footer[MZ_ZIP_DATA_DESCRIPTER_SIZE64]; mz_uint32 local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE32; MZ_ASSERT(bit_flags & MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR); MZ_WRITE_LE32(local_dir_footer + 0, MZ_ZIP_DATA_DESCRIPTOR_ID); MZ_WRITE_LE32(local_dir_footer + 4, uncomp_crc32); if (pExtra_data == NULL) { if (comp_size > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); MZ_WRITE_LE32(local_dir_footer + 8, comp_size); MZ_WRITE_LE32(local_dir_footer + 12, uncomp_size); } else { MZ_WRITE_LE64(local_dir_footer + 8, comp_size); MZ_WRITE_LE64(local_dir_footer + 16, uncomp_size); local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE64; } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_footer, local_dir_footer_size) != local_dir_footer_size) return MZ_FALSE; cur_archive_file_ofs += local_dir_footer_size; } if (pExtra_data != NULL) { extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, pExtra_data, (mz_uint16)extra_size, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_dir_header_ofs, ext_attributes, user_extra_data_central, user_extra_data_central_len)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_add_read_buf_callback(mz_zip_archive *pZip, const char *pArchive_name, mz_file_read_func read_callback, void* callback_opaque, mz_uint64 size_to_add, const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len) { mz_uint16 gen_flags = MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR; mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = size_to_add, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; mz_uint8 *pExtra_data = NULL; mz_uint32 extra_size = 0; mz_uint8 extra_data[MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE]; mz_zip_internal_state *pState; mz_uint64 file_ofs = 0; if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME)) gen_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; /* Sanity checks */ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; if ((!pState->m_zip64) && (uncomp_size > MZ_UINT32_MAX)) { /* Source file is too large for non-zip64 */ /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ pState->m_zip64 = MZ_TRUE; } /* We could support this, but why? */ if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_writer_validate_archive_name(pArchive_name)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); if (pState->m_zip64) { if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if (pZip->m_total_files == MZ_UINT16_MAX) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */ } } archive_name_size = strlen(pArchive_name); if (archive_name_size > MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME); num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */ if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE + comment_size) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); if (!pState->m_zip64) { /* Bail early if the archive would obviously become too large */ if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + 1024 + MZ_ZIP_DATA_DESCRIPTER_SIZE32 + user_extra_data_central_len) > 0xFFFFFFFF) { pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */ } } #ifndef MINIZ_NO_TIME if (pFile_time) { mz_zip_time_t_to_dos_time(*pFile_time, &dos_time, &dos_date); } #endif if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes)) { return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_archive_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((cur_archive_file_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (uncomp_size && level) { method = MZ_DEFLATED; } MZ_CLEAR_OBJ(local_dir_header); if (pState->m_zip64) { if (uncomp_size >= MZ_UINT32_MAX || local_dir_header_ofs >= MZ_UINT32_MAX) { pExtra_data = extra_data; extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)(extra_size + user_extra_data_len), 0, 0, 0, method, gen_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, extra_data, extra_size) != extra_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += extra_size; } else { if ((comp_size > MZ_UINT32_MAX) || (cur_archive_file_ofs > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, (mz_uint16)user_extra_data_len, 0, 0, 0, method, gen_flags, dos_time, dos_date)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += sizeof(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_archive_file_ofs += archive_name_size; } if (user_extra_data_len > 0) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, user_extra_data, user_extra_data_len) != user_extra_data_len) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_archive_file_ofs += user_extra_data_len; } if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((read_callback(callback_opaque, file_ofs, pRead_buf, n) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } file_ofs += n; uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; tdefl_flush flush = TDEFL_NO_FLUSH; if (read_callback(callback_opaque, file_ofs, pRead_buf, in_buf_size)!= in_buf_size) { mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); break; } file_ofs += in_buf_size; uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; if (pZip->m_pNeeds_keepalive != NULL && pZip->m_pNeeds_keepalive(pZip->m_pIO_opaque)) flush = TDEFL_FULL_FLUSH; status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size, uncomp_remaining ? flush : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) { mz_zip_set_error(pZip, MZ_ZIP_COMPRESSION_FAILED); break; } } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } { mz_uint8 local_dir_footer[MZ_ZIP_DATA_DESCRIPTER_SIZE64]; mz_uint32 local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE32; MZ_WRITE_LE32(local_dir_footer + 0, MZ_ZIP_DATA_DESCRIPTOR_ID); MZ_WRITE_LE32(local_dir_footer + 4, uncomp_crc32); if (pExtra_data == NULL) { if (comp_size > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); MZ_WRITE_LE32(local_dir_footer + 8, comp_size); MZ_WRITE_LE32(local_dir_footer + 12, uncomp_size); } else { MZ_WRITE_LE64(local_dir_footer + 8, comp_size); MZ_WRITE_LE64(local_dir_footer + 16, uncomp_size); local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE64; } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_footer, local_dir_footer_size) != local_dir_footer_size) return MZ_FALSE; cur_archive_file_ofs += local_dir_footer_size; } if (pExtra_data != NULL) { extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL, (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL); } if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, pExtra_data, (mz_uint16)extra_size, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, gen_flags, dos_time, dos_date, local_dir_header_ofs, ext_attributes, user_extra_data_central, user_extra_data_central_len)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_file_read_func_stdio(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { MZ_FILE *pSrc_file = (MZ_FILE *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pSrc_file); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pSrc_file, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pSrc_file); } mz_bool mz_zip_writer_add_cfile(mz_zip_archive *pZip, const char *pArchive_name, MZ_FILE *pSrc_file, mz_uint64 size_to_add, const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len) { return mz_zip_writer_add_read_buf_callback(pZip, pArchive_name, mz_file_read_func_stdio, pSrc_file, size_to_add, pFile_time, pComment, comment_size, level_and_flags, user_extra_data, user_extra_data_len, user_extra_data_central, user_extra_data_central_len); } mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { MZ_FILE *pSrc_file = NULL; mz_uint64 uncomp_size = 0; MZ_TIME_T file_modified_time; MZ_TIME_T *pFile_time = NULL; mz_bool status; memset(&file_modified_time, 0, sizeof(file_modified_time)); #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_STDIO) pFile_time = &file_modified_time; if (!mz_zip_get_file_modified_time(pSrc_filename, &file_modified_time)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_STAT_FAILED); #endif pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED); MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); status = mz_zip_writer_add_cfile(pZip, pArchive_name, pSrc_file, uncomp_size, pFile_time, pComment, comment_size, level_and_flags, NULL, 0, NULL, 0); MZ_FCLOSE(pSrc_file); return status; } #endif /* #ifndef MINIZ_NO_STDIO */ static mz_bool mz_zip_writer_update_zip64_extension_block(mz_zip_array *pNew_ext, mz_zip_archive *pZip, const mz_uint8 *pExt, uint32_t ext_len, mz_uint64 *pComp_size, mz_uint64 *pUncomp_size, mz_uint64 *pLocal_header_ofs, mz_uint32 *pDisk_start) { /* + 64 should be enough for any new zip64 data */ if (!mz_zip_array_reserve(pZip, pNew_ext, ext_len + 64, MZ_FALSE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); mz_zip_array_resize(pZip, pNew_ext, 0, MZ_FALSE); if ((pUncomp_size) || (pComp_size) || (pLocal_header_ofs) || (pDisk_start)) { mz_uint8 new_ext_block[64]; mz_uint8 *pDst = new_ext_block; mz_write_le16(pDst, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID); mz_write_le16(pDst + sizeof(mz_uint16), 0); pDst += sizeof(mz_uint16) * 2; if (pUncomp_size) { mz_write_le64(pDst, *pUncomp_size); pDst += sizeof(mz_uint64); } if (pComp_size) { mz_write_le64(pDst, *pComp_size); pDst += sizeof(mz_uint64); } if (pLocal_header_ofs) { mz_write_le64(pDst, *pLocal_header_ofs); pDst += sizeof(mz_uint64); } if (pDisk_start) { mz_write_le32(pDst, *pDisk_start); pDst += sizeof(mz_uint32); } mz_write_le16(new_ext_block + sizeof(mz_uint16), (mz_uint16)((pDst - new_ext_block) - sizeof(mz_uint16) * 2)); if (!mz_zip_array_push_back(pZip, pNew_ext, new_ext_block, pDst - new_ext_block)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if ((pExt) && (ext_len)) { mz_uint32 extra_size_remaining = ext_len; const mz_uint8 *pExtra_data = pExt; do { mz_uint32 field_id, field_data_size, field_total_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); field_total_size = field_data_size + sizeof(mz_uint16) * 2; if (field_total_size > extra_size_remaining) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); if (field_id != MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { if (!mz_zip_array_push_back(pZip, pNew_ext, pExtra_data, field_total_size)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } pExtra_data += field_total_size; extra_size_remaining -= field_total_size; } while (extra_size_remaining); } return MZ_TRUE; } /* TODO: This func is now pretty freakin complex due to zip64, split it up? */ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint src_file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes, src_central_dir_following_data_size; mz_uint64 src_archive_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 new_central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; mz_zip_archive_file_stat src_file_stat; mz_uint32 src_filename_len, src_comment_len, src_ext_len; mz_uint32 local_header_filename_size, local_header_extra_len; mz_uint64 local_header_comp_size, local_header_uncomp_size; mz_bool found_zip64_ext_data_in_ldir = MZ_FALSE; /* Sanity checks */ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pSource_zip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; /* Don't support copying files from zip64 archives to non-zip64, even though in some cases this is possible */ if ((pSource_zip->m_pState->m_zip64) && (!pZip->m_pState->m_zip64)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); /* Get pointer to the source central dir header and crack it */ if (NULL == (pSrc_central_header = mz_zip_get_cdh(pSource_zip, src_file_index))) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_SIG_OFS) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); src_filename_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS); src_comment_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); src_ext_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS); src_central_dir_following_data_size = src_filename_len + src_ext_len + src_comment_len; /* TODO: We don't support central dir's >= MZ_UINT32_MAX bytes right now (+32 fudge factor in case we need to add more extra data) */ if ((pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + 32) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); if (!pState->m_zip64) { if (pZip->m_total_files == MZ_UINT16_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { /* TODO: Our zip64 support still has some 32-bit limits that may not be worth fixing. */ if (pZip->m_total_files == MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } if (!mz_zip_file_stat_internal(pSource_zip, src_file_index, pSrc_central_header, &src_file_stat, NULL)) return MZ_FALSE; cur_src_file_ofs = src_file_stat.m_local_header_ofs; cur_dst_file_ofs = pZip->m_archive_size; /* Read the source archive's local dir header */ if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; /* Compute the total size we need to copy (filename+extra data+compressed data) */ local_header_filename_size = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS); local_header_extra_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); local_header_comp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS); local_header_uncomp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS); src_archive_bytes_remaining = local_header_filename_size + local_header_extra_len + src_file_stat.m_comp_size; /* Try to find a zip64 extended information field */ if ((local_header_extra_len) && ((local_header_comp_size == MZ_UINT32_MAX) || (local_header_uncomp_size == MZ_UINT32_MAX))) { mz_zip_array file_data_array; const mz_uint8 *pExtra_data; mz_uint32 extra_size_remaining = local_header_extra_len; mz_zip_array_init(&file_data_array, 1); if (!mz_zip_array_resize(pZip, &file_data_array, local_header_extra_len, MZ_FALSE)) { return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, src_file_stat.m_local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_size, file_data_array.m_p, local_header_extra_len) != local_header_extra_len) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } pExtra_data = (const mz_uint8 *)file_data_array.m_p; do { mz_uint32 field_id, field_data_size, field_total_size; if (extra_size_remaining < (sizeof(mz_uint16) * 2)) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } field_id = MZ_READ_LE16(pExtra_data); field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16)); field_total_size = field_data_size + sizeof(mz_uint16) * 2; if (field_total_size > extra_size_remaining) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID) { const mz_uint8 *pSrc_field_data = pExtra_data + sizeof(mz_uint32); if (field_data_size < sizeof(mz_uint64) * 2) { mz_zip_array_clear(pZip, &file_data_array); return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); } local_header_uncomp_size = MZ_READ_LE64(pSrc_field_data); local_header_comp_size = MZ_READ_LE64(pSrc_field_data + sizeof(mz_uint64)); /* may be 0 if there's a descriptor */ found_zip64_ext_data_in_ldir = MZ_TRUE; break; } pExtra_data += field_total_size; extra_size_remaining -= field_total_size; } while (extra_size_remaining); mz_zip_array_clear(pZip, &file_data_array); } if (!pState->m_zip64) { /* Try to detect if the new archive will most likely wind up too big and bail early (+(sizeof(mz_uint32) * 4) is for the optional descriptor which could be present, +64 is a fudge factor). */ /* We also check when the archive is finalized so this doesn't need to be perfect. */ mz_uint64 approx_new_archive_size = cur_dst_file_ofs + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + src_archive_bytes_remaining + (sizeof(mz_uint32) * 4) + pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + 64; if (approx_new_archive_size >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); } /* Write dest archive padding */ if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } /* The original zip's local header+ext block doesn't change, even with zip64, so we can just copy it over to the dest zip */ if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; /* Copy over the source archive bytes to the dest archive, also ensure we have enough buf space to handle optional data descriptor */ if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(32U, MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, src_archive_bytes_remaining))))) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); while (src_archive_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, src_archive_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_dst_file_ofs += n; src_archive_bytes_remaining -= n; } /* Now deal with the optional data descriptor */ bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { /* Copy data descriptor */ if ((pSource_zip->m_pState->m_zip64) || (found_zip64_ext_data_in_ldir)) { /* src is zip64, dest must be zip64 */ /* name uint32_t's */ /* id 1 (optional in zip64?) */ /* crc 1 */ /* comp_size 2 */ /* uncomp_size 2 */ if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, (sizeof(mz_uint32) * 6)) != (sizeof(mz_uint32) * 6)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == MZ_ZIP_DATA_DESCRIPTOR_ID) ? 6 : 5); } else { /* src is NOT zip64 */ mz_bool has_id; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } has_id = (MZ_READ_LE32(pBuf) == MZ_ZIP_DATA_DESCRIPTOR_ID); if (pZip->m_pState->m_zip64) { /* dest is zip64, so upgrade the data descriptor */ const mz_uint32 *pSrc_descriptor = (const mz_uint32 *)((const mz_uint8 *)pBuf + (has_id ? sizeof(mz_uint32) : 0)); const mz_uint32 src_crc32 = pSrc_descriptor[0]; const mz_uint64 src_comp_size = pSrc_descriptor[1]; const mz_uint64 src_uncomp_size = pSrc_descriptor[2]; mz_write_le32((mz_uint8 *)pBuf, MZ_ZIP_DATA_DESCRIPTOR_ID); mz_write_le32((mz_uint8 *)pBuf + sizeof(mz_uint32) * 1, src_crc32); mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 2, src_comp_size); mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 4, src_uncomp_size); n = sizeof(mz_uint32) * 6; } else { /* dest is NOT zip64, just copy it as-is */ n = sizeof(mz_uint32) * (has_id ? 4 : 3); } } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); /* Finally, add the new central dir header */ orig_central_dir_size = pState->m_central_dir.m_size; memcpy(new_central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); if (pState->m_zip64) { /* This is the painful part: We need to write a new central dir header + ext block with updated zip64 fields, and ensure the old fields (if any) are not included. */ const mz_uint8 *pSrc_ext = pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_filename_len; mz_zip_array new_ext_block; mz_zip_array_init(&new_ext_block, sizeof(mz_uint8)); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, MZ_UINT32_MAX); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, MZ_UINT32_MAX); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, MZ_UINT32_MAX); if (!mz_zip_writer_update_zip64_extension_block(&new_ext_block, pZip, pSrc_ext, src_ext_len, &src_file_stat.m_comp_size, &src_file_stat.m_uncomp_size, &local_dir_header_ofs, NULL)) { mz_zip_array_clear(pZip, &new_ext_block); return MZ_FALSE; } MZ_WRITE_LE16(new_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS, new_ext_block.m_size); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) { mz_zip_array_clear(pZip, &new_ext_block); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, src_filename_len)) { mz_zip_array_clear(pZip, &new_ext_block); mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_ext_block.m_p, new_ext_block.m_size)) { mz_zip_array_clear(pZip, &new_ext_block); mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_filename_len + src_ext_len, src_comment_len)) { mz_zip_array_clear(pZip, &new_ext_block); mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } mz_zip_array_clear(pZip, &new_ext_block); } else { /* sanity checks */ if (cur_dst_file_ofs > MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); if (local_dir_header_ofs >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, src_central_dir_following_data_size)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } } /* This shouldn't trigger unless we screwed up during the initial sanity checks */ if (pState->m_central_dir.m_size >= MZ_UINT32_MAX) { /* TODO: Support central dirs >= 32-bits in size */ mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); } n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[256]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); pState = pZip->m_pState; if (pState->m_zip64) { if ((pZip->m_total_files > MZ_UINT32_MAX) || (pState->m_central_dir.m_size >= MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } else { if ((pZip->m_total_files > MZ_UINT16_MAX) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > MZ_UINT32_MAX)) return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); } central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { /* Write central directory */ central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); pZip->m_archive_size += central_dir_size; } if (pState->m_zip64) { /* Write zip64 end of central directory header */ mz_uint64 rel_ofs_to_zip64_ecdr = pZip->m_archive_size; MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDH_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - sizeof(mz_uint32) - sizeof(mz_uint64)); MZ_WRITE_LE16(hdr + MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS, 0x031E); /* TODO: always Unix */ MZ_WRITE_LE16(hdr + MZ_ZIP64_ECDH_VERSION_NEEDED_OFS, 0x002D); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); pZip->m_archive_size += MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE; /* Write zip64 end of central directory locator */ MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG); MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS, rel_ofs_to_zip64_ecdr); MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS, 1); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) != MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); pZip->m_archive_size += MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE; } /* Write end of central directory record */ MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files)); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files)); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, MZ_MIN(MZ_UINT32_MAX, central_dir_size)); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, MZ_MIN(MZ_UINT32_MAX, central_dir_ofs)); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED); #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED); #endif /* #ifndef MINIZ_NO_STDIO */ pZip->m_archive_size += MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **ppBuf, size_t *pSize) { if ((!ppBuf) || (!pSize)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); *ppBuf = NULL; *pSize = 0; if ((!pZip) || (!pZip->m_pState)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (pZip->m_pWrite != mz_zip_heap_write_func) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *ppBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { return mz_zip_writer_end_internal(pZip, MZ_TRUE); } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { return mz_zip_add_mem_to_archive_file_in_place_v2(pZip_filename, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, NULL); } mz_bool mz_zip_add_mem_to_archive_file_in_place_v2(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_zip_error *pErr) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; mz_zip_error actual_err = MZ_ZIP_NO_ERROR; mz_zip_zero_struct(&zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return MZ_FALSE; } if (!mz_zip_writer_validate_archive_name(pArchive_name)) { if (pErr) *pErr = MZ_ZIP_INVALID_FILENAME; return MZ_FALSE; } /* Important: The regular non-64 bit version of stat() can fail here if the file is very large, which could cause the archive to be overwritten. */ /* So be sure to compile with _LARGEFILE64_SOURCE 1 */ if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { /* Create a new archive. */ if (!mz_zip_writer_init_file_v2(&zip_archive, pZip_filename, 0, level_and_flags)) { if (pErr) *pErr = zip_archive.m_last_error; return MZ_FALSE; } created_new_archive = MZ_TRUE; } else { /* Append to an existing archive. */ if (!mz_zip_reader_init_file_v2(&zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY, 0, 0)) { if (pErr) *pErr = zip_archive.m_last_error; return MZ_FALSE; } if (!mz_zip_writer_init_from_reader_v2(&zip_archive, pZip_filename, level_and_flags)) { if (pErr) *pErr = zip_archive.m_last_error; mz_zip_reader_end_internal(&zip_archive, MZ_FALSE); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); actual_err = zip_archive.m_last_error; /* Always finalize, even if adding failed for some reason, so we have a valid central directory. (This may not always succeed, but we can try.) */ if (!mz_zip_writer_finalize_archive(&zip_archive)) { if (!actual_err) actual_err = zip_archive.m_last_error; status = MZ_FALSE; } if (!mz_zip_writer_end_internal(&zip_archive, status)) { if (!actual_err) actual_err = zip_archive.m_last_error; status = MZ_FALSE; } if ((!status) && (created_new_archive)) { /* It's a new archive and something went wrong, so just delete it. */ int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } if (pErr) *pErr = actual_err; return status; } void *mz_zip_extract_archive_file_to_heap_v2(const char *pZip_filename, const char *pArchive_name, const char *pComment, size_t *pSize, mz_uint flags, mz_zip_error *pErr) { mz_uint32 file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) { if (pErr) *pErr = MZ_ZIP_INVALID_PARAMETER; return NULL; } mz_zip_zero_struct(&zip_archive); if (!mz_zip_reader_init_file_v2(&zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY, 0, 0)) { if (pErr) *pErr = zip_archive.m_last_error; return NULL; } if (mz_zip_reader_locate_file_v2(&zip_archive, pArchive_name, pComment, flags, &file_index)) { p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); } mz_zip_reader_end_internal(&zip_archive, p != NULL); if (pErr) *pErr = zip_archive.m_last_error; return p; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { return mz_zip_extract_archive_file_to_heap_v2(pZip_filename, pArchive_name, NULL, pSize, flags, NULL); } #endif /* #ifndef MINIZ_NO_STDIO */ #endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS */ /* ------------------- Misc utils */ mz_zip_mode mz_zip_get_mode(mz_zip_archive *pZip) { return pZip ? pZip->m_zip_mode : MZ_ZIP_MODE_INVALID; } mz_zip_type mz_zip_get_type(mz_zip_archive *pZip) { return pZip ? pZip->m_zip_type : MZ_ZIP_TYPE_INVALID; } mz_zip_error mz_zip_set_last_error(mz_zip_archive *pZip, mz_zip_error err_num) { mz_zip_error prev_err; if (!pZip) return MZ_ZIP_INVALID_PARAMETER; prev_err = pZip->m_last_error; pZip->m_last_error = err_num; return prev_err; } mz_zip_error mz_zip_peek_last_error(mz_zip_archive *pZip) { if (!pZip) return MZ_ZIP_INVALID_PARAMETER; return pZip->m_last_error; } mz_zip_error mz_zip_clear_last_error(mz_zip_archive *pZip) { return mz_zip_set_last_error(pZip, MZ_ZIP_NO_ERROR); } mz_zip_error mz_zip_get_last_error(mz_zip_archive *pZip) { mz_zip_error prev_err; if (!pZip) return MZ_ZIP_INVALID_PARAMETER; prev_err = pZip->m_last_error; pZip->m_last_error = MZ_ZIP_NO_ERROR; return prev_err; } const char *mz_zip_get_error_string(mz_zip_error mz_err) { switch (mz_err) { case MZ_ZIP_NO_ERROR: return "no error"; case MZ_ZIP_UNDEFINED_ERROR: return "undefined error"; case MZ_ZIP_TOO_MANY_FILES: return "too many files"; case MZ_ZIP_FILE_TOO_LARGE: return "file too large"; case MZ_ZIP_UNSUPPORTED_METHOD: return "unsupported method"; case MZ_ZIP_UNSUPPORTED_ENCRYPTION: return "unsupported encryption"; case MZ_ZIP_UNSUPPORTED_FEATURE: return "unsupported feature"; case MZ_ZIP_FAILED_FINDING_CENTRAL_DIR: return "failed finding central directory"; case MZ_ZIP_NOT_AN_ARCHIVE: return "not a ZIP archive"; case MZ_ZIP_INVALID_HEADER_OR_CORRUPTED: return "invalid header or archive is corrupted"; case MZ_ZIP_UNSUPPORTED_MULTIDISK: return "unsupported multidisk archive"; case MZ_ZIP_DECOMPRESSION_FAILED: return "decompression failed or archive is corrupted"; case MZ_ZIP_COMPRESSION_FAILED: return "compression failed"; case MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE: return "unexpected decompressed size"; case MZ_ZIP_CRC_CHECK_FAILED: return "CRC-32 check failed"; case MZ_ZIP_UNSUPPORTED_CDIR_SIZE: return "unsupported central directory size"; case MZ_ZIP_ALLOC_FAILED: return "allocation failed"; case MZ_ZIP_FILE_OPEN_FAILED: return "file open failed"; case MZ_ZIP_FILE_CREATE_FAILED: return "file create failed"; case MZ_ZIP_FILE_WRITE_FAILED: return "file write failed"; case MZ_ZIP_FILE_READ_FAILED: return "file read failed"; case MZ_ZIP_FILE_CLOSE_FAILED: return "file close failed"; case MZ_ZIP_FILE_SEEK_FAILED: return "file seek failed"; case MZ_ZIP_FILE_STAT_FAILED: return "file stat failed"; case MZ_ZIP_INVALID_PARAMETER: return "invalid parameter"; case MZ_ZIP_INVALID_FILENAME: return "invalid filename"; case MZ_ZIP_BUF_TOO_SMALL: return "buffer too small"; case MZ_ZIP_INTERNAL_ERROR: return "internal error"; case MZ_ZIP_FILE_NOT_FOUND: return "file not found"; case MZ_ZIP_ARCHIVE_TOO_LARGE: return "archive is too large"; case MZ_ZIP_VALIDATION_FAILED: return "validation failed"; case MZ_ZIP_WRITE_CALLBACK_FAILED: return "write calledback failed"; default: break; } return "unknown error"; } /* Note: Just because the archive is not zip64 doesn't necessarily mean it doesn't have Zip64 extended information extra field, argh. */ mz_bool mz_zip_is_zip64(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return MZ_FALSE; return pZip->m_pState->m_zip64; } size_t mz_zip_get_central_dir_size(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return 0; return pZip->m_pState->m_central_dir.m_size; } mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } mz_uint64 mz_zip_get_archive_size(mz_zip_archive *pZip) { if (!pZip) return 0; return pZip->m_archive_size; } mz_uint64 mz_zip_get_archive_file_start_offset(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return 0; return pZip->m_pState->m_file_archive_start_ofs; } MZ_FILE *mz_zip_get_cfile(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState)) return 0; return pZip->m_pState->m_pFile; } size_t mz_zip_read_archive_data(mz_zip_archive *pZip, mz_uint64 file_ofs, void *pBuf, size_t n) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pZip->m_pRead)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return pZip->m_pRead(pZip->m_pIO_opaque, file_ofs, pBuf, n); } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER); return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { return mz_zip_file_stat_internal(pZip, file_index, mz_zip_get_cdh(pZip, file_index), pStat, NULL); } mz_bool mz_zip_end(mz_zip_archive *pZip) { if (!pZip) return MZ_FALSE; if (pZip->m_zip_mode == MZ_ZIP_MODE_READING) return mz_zip_reader_end(pZip); #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS else if ((pZip->m_zip_mode == MZ_ZIP_MODE_WRITING) || (pZip->m_zip_mode == MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)) return mz_zip_writer_end(pZip); #endif return MZ_FALSE; } #ifdef __cplusplus } #endif #endif /*#ifndef MINIZ_NO_ARCHIVE_APIS*/
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilCompression/miniz.h
/* miniz.c 2.1.0 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <[email protected]>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateReset/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #pragma once /* Defines to completely disable specific portions of miniz.c: If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl. */ /* Define MINIZ_NO_STDIO to disable all usage and any functions which rely on * stdio for file I/O. */ /*#define MINIZ_NO_STDIO */ #define MINIZ_NO_STDIO // HLSL Change /* If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able * to get the current time, or */ /* get/set file times, and the C run-time funcs that get/set times won't be * called. */ /* The current downside is the times written to your archives will be from 1979. */ /*#define MINIZ_NO_TIME */ #define MINIZ_NO_TIME // HLSL Change /* Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. */ /*#define MINIZ_NO_ARCHIVE_APIS */ #define MINIZ_NO_ARCHIVE_APIS // HLSL Change /* Define MINIZ_NO_ARCHIVE_WRITING_APIS to disable all writing related ZIP * archive API's. */ /*#define MINIZ_NO_ARCHIVE_WRITING_APIS */ /* Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression * API's. */ /*#define MINIZ_NO_ZLIB_APIS */ /* Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent * conflicts against stock zlib. */ /*#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES */ /* Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work. */ /*#define MINIZ_NO_MALLOC */ #define MINIZ_NO_MALLOC // HLSL Change #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) /* TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc * on Linux */ #define MINIZ_NO_TIME #endif #include <stddef.h> #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) #include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) /* MINIZ_X86_OR_X64_CPU is only used to help set the below macros. */ #define MINIZ_X86_OR_X64_CPU 1 #else #define MINIZ_X86_OR_X64_CPU 0 #endif #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU /* Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. */ #define MINIZ_LITTLE_ENDIAN 1 #else #define MINIZ_LITTLE_ENDIAN 0 #endif /* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES only if not set */ #if !defined(MINIZ_USE_UNALIGNED_LOADS_AND_STORES) #if MINIZ_X86_OR_X64_CPU /* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient * integer loads and stores from unaligned addresses. */ #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_UNALIGNED_USE_MEMCPY #else #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0 #endif #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) /* Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are * reasonably fast (and don't involve compiler generated calls to helper * functions). */ #define MINIZ_HAS_64BIT_REGISTERS 1 #else #define MINIZ_HAS_64BIT_REGISTERS 0 #endif #ifdef __cplusplus extern "C" { #endif /* ------------------- zlib-style API Definitions. */ /* For more compatibility with zlib, miniz.c uses unsigned long for some * parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! */ typedef unsigned long mz_ulong; /* mz_free() internally uses the MZ_FREE() macro (which by default calls free() * unless you've modified the MZ_MALLOC macro) to release a block allocated from * the heap. */ void mz_free(void *p); #define MZ_ADLER32_INIT (1) /* mz_adler32() returns the initial adler-32 value to use when called with * ptr==NULL. */ mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) /* mz_crc32() returns the initial CRC-32 value to use when called with * ptr==NULL. */ mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); /* Compression strategies. */ enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; /* Method */ #define MZ_DEFLATED 8 /* Heap allocation callbacks. Note that mz_alloc_func parameter types purpsosely differ from zlib's: items/size is size_t, not unsigned long. */ typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); /* Compression levels: 0-9 are the standard zlib-style levels, 10 is best * possible compression (not zlib compatible, and may be very slow), * MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. */ enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; #define MZ_VERSION "10.1.0" #define MZ_VERNUM 0xA100 #define MZ_VER_MAJOR 10 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 0 #define MZ_VER_SUBREVISION 0 #ifndef MINIZ_NO_ZLIB_APIS /* Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The * other values are for advanced use (refer to the zlib docs). */ enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; /* Return status codes. MZ_PARAM_ERROR is non-standard. */ enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; /* Window bits */ #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; /* Compression/decompression stream struct. */ typedef struct mz_stream_s { const unsigned char *next_in; /* pointer to next byte to read */ unsigned int avail_in; /* number of bytes available at next_in */ mz_ulong total_in; /* total number of bytes consumed so far */ unsigned char *next_out; /* pointer to next byte to write */ unsigned int avail_out; /* number of bytes that can be written to next_out */ mz_ulong total_out; /* total number of bytes produced so far */ char *msg; /* error msg (unused) */ struct mz_internal_state *state; /* internal state, allocated by zalloc/zfree */ mz_alloc_func zalloc; /* optional heap allocation function (defaults to malloc) */ mz_free_func zfree; /* optional heap free function (defaults to free) */ void *opaque; /* heap alloc function user pointer */ int data_type; /* data_type (unused) */ mz_ulong adler; /* adler32 of the source or uncompressed data */ mz_ulong reserved; /* not used */ } mz_stream; typedef mz_stream *mz_streamp; /* Returns the version string of miniz.c. */ const char *mz_version(void); /* mz_deflateInit() initializes a compressor with default options: */ /* Parameters: */ /* pStream must point to an initialized mz_stream struct. */ /* level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. */ /* level 1 enables a specially optimized compression function that's been * optimized purely for performance, not ratio. */ /* (This special func. is currently only enabled when * MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) */ /* Return values: */ /* MZ_OK on success. */ /* MZ_STREAM_ERROR if the stream is bogus. */ /* MZ_PARAM_ERROR if the input parameters are bogus. */ /* MZ_MEM_ERROR on out of memory. */ int mz_deflateInit(mz_streamp pStream, int level); /* mz_deflateInit2() is like mz_deflate(), except with more control: */ /* Additional parameters: */ /* method must be MZ_DEFLATED */ /* window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with * zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no * header or footer) */ /* mem_level must be between [1, 9] (it's checked but ignored by miniz.c) */ int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); /* Quickly resets a compressor without having to reallocate anything. Same as * calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). */ int mz_deflateReset(mz_streamp pStream); /* mz_deflate() compresses the input to output, consuming as much of the input * and producing as much output as possible. */ /* Parameters: */ /* pStream is the stream to read from and write to. You must initialize/update * the next_in, avail_in, next_out, and avail_out members. */ /* flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or * MZ_FINISH. */ /* Return values: */ /* MZ_OK on success (when flushing, or if more input is needed but not * available, and/or there's more output to be written but the output buffer is * full). */ /* MZ_STREAM_END if all input has been consumed and all output bytes have been * written. Don't call mz_deflate() on the stream anymore. */ /* MZ_STREAM_ERROR if the stream is bogus. */ /* MZ_PARAM_ERROR if one of the parameters is invalid. */ /* MZ_BUF_ERROR if no forward progress is possible because the input and/or * output buffers are empty. (Fill up the input buffer or free up some output * space and try again.) */ int mz_deflate(mz_streamp pStream, int flush); /* mz_deflateEnd() deinitializes a compressor: */ /* Return values: */ /* MZ_OK on success. */ /* MZ_STREAM_ERROR if the stream is bogus. */ int mz_deflateEnd(mz_streamp pStream); /* mz_deflateBound() returns a (very) conservative upper bound on the amount of * data that could be generated by deflate(), assuming flush is set to only * MZ_NO_FLUSH or MZ_FINISH. */ mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); /* Single-call compression functions mz_compress() and mz_compress2(): */ /* Returns MZ_OK on success, or one of the error codes from mz_deflate() on * failure. */ int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); /* mz_compressBound() returns a (very) conservative upper bound on the amount of * data that could be generated by calling mz_compress(). */ mz_ulong mz_compressBound(mz_ulong source_len); /* Initializes a decompressor. */ int mz_inflateInit(mz_streamp pStream); /* mz_inflateInit2() is like mz_inflateInit() with an additional option that * controls the window size and whether or not the stream has been wrapped with * a zlib header/footer: */ /* window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or * -MZ_DEFAULT_WINDOW_BITS (raw deflate). */ int mz_inflateInit2(mz_streamp pStream, int window_bits); /* Quickly resets a compressor without having to reallocate anything. Same as * calling mz_inflateEnd() followed by mz_inflateInit()/mz_inflateInit2(). */ int mz_inflateReset(mz_streamp pStream); /* Decompresses the input stream to the output, consuming only as much of the * input as needed, and writing as much to the output as possible. */ /* Parameters: */ /* pStream is the stream to read from and write to. You must initialize/update * the next_in, avail_in, next_out, and avail_out members. */ /* flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. */ /* On the first call, if flush is MZ_FINISH it's assumed the input and output * buffers are both sized large enough to decompress the entire stream in a * single call (this is slightly faster). */ /* MZ_FINISH implies that there are no more source bytes available beside * what's already in the input buffer, and that the output buffer is large * enough to hold the rest of the decompressed data. */ /* Return values: */ /* MZ_OK on success. Either more input is needed but not available, and/or * there's more output to be written but the output buffer is full. */ /* MZ_STREAM_END if all needed input has been consumed and all output bytes * have been written. For zlib streams, the adler-32 of the decompressed data * has also been verified. */ /* MZ_STREAM_ERROR if the stream is bogus. */ /* MZ_DATA_ERROR if the deflate stream is invalid. */ /* MZ_PARAM_ERROR if one of the parameters is invalid. */ /* MZ_BUF_ERROR if no forward progress is possible because the input buffer is * empty but the inflater needs more input to continue, or if the output buffer * is not large enough. Call mz_inflate() again */ /* with more input data, or with more room in the output buffer (except when * using single call decompression, described above). */ int mz_inflate(mz_streamp pStream, int flush); /* Deinitializes a decompressor. */ int mz_inflateEnd(mz_streamp pStream); /* Single-call decompression. */ /* Returns MZ_OK on success, or one of the error codes from mz_inflate() on * failure. */ int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); /* Returns a string description of the specified error code, or NULL if the * error code is invalid. */ const char *mz_error(int err); /* Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used * as a drop-in replacement for the subset of zlib that miniz.c supports. */ /* Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you * use zlib in the same project. */ #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflateReset mz_inflateReset #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif /* #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES */ #endif /* MINIZ_NO_ZLIB_APIS */ #ifdef __cplusplus } #endif #pragma once #include <assert.h> #include <stdint.h> #include <stdlib.h> #include <string.h> /* ------------------- Types and macros */ typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef int64_t mz_int64; typedef uint64_t mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) /* Works around MSVC's spammy "warning C4127: conditional expression is * constant" message. */ #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #define MZ_FILE FILE #endif /* #ifdef MINIZ_NO_STDIO */ #ifdef MINIZ_NO_TIME typedef struct mz_dummy_time_t_tag { int m_dummy; } mz_dummy_time_t; #define MZ_TIME_T mz_dummy_time_t #else #define MZ_TIME_T time_t #endif #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #define MZ_READ_LE64(p) \ (((mz_uint64)MZ_READ_LE32(p)) | \ (((mz_uint64)MZ_READ_LE32((const mz_uint8 *)(p) + sizeof(mz_uint32))) \ << 32U)) #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE __inline__ __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif extern void *miniz_def_alloc_func(void *opaque, size_t items, size_t size); extern void miniz_def_free_func(void *opaque, void *address); extern void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size); #define MZ_UINT16_MAX (0xFFFFU) #define MZ_UINT32_MAX (0xFFFFFFFFU) #ifdef __cplusplus } #endif #pragma once #ifdef __cplusplus extern "C" { #endif /* ------------------- Low-level Compression API Definitions */ /* Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly * slower, and raw/dynamic blocks will be output more frequently). */ #define TDEFL_LESS_MEMORY 0 /* tdefl_init() compression flags logically OR'd together (low 12 bits contain * the max. number of probes per dictionary search): */ /* TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes * per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap * compression), 4095=Huffman+LZ (slowest/best compression). */ enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; /* TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before * the deflate data, and the Adler-32 of the source data at the end. Otherwise, * you'll get raw deflate data. */ /* TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even * when not writing zlib headers). */ /* TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more * efficient lazy parsing. */ /* TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's * initialization time to the minimum, but the output may vary from run to run * given the same input (depending on the contents of memory). */ /* TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) */ /* TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. */ /* TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. */ /* TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. */ /* The low 12 bits are reserved to control the max # of hash probes per * dictionary lookup (see TDEFL_MAX_PROBES_MASK). */ enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; /* High level compression functions: */ /* tdefl_compress_mem_to_heap() compresses a block in memory to a heap block * allocated via malloc(). */ /* On entry: */ /* pSrc_buf, src_buf_len: Pointer and size of source block to compress. */ /* flags: The max match finder probes (default is 128) logically OR'd against * the above flags. Higher probes are slower but improve compression. */ /* On return: */ /* Function returns a pointer to the compressed data, or NULL on failure. */ /* *pOut_len will be set to the compressed data's size, which could be larger * than src_buf_len on uncompressible data. */ /* The caller must free() the returned block when it's no longer needed. */ void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); /* tdefl_compress_mem_to_mem() compresses a block in memory to another block in * memory. */ /* Returns 0 on failure. */ size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); /* Compresses an image to a compressed PNG file in memory. */ /* On entry: */ /* pImage, w, h, and num_chans describe the image to compress. num_chans may be * 1, 2, 3, or 4. */ /* The image pitch in bytes per scanline will be w*num_chans. The leftmost * pixel on the top scanline is stored first in memory. */ /* level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, * MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL */ /* If flip is true, the image will be flipped on the Y axis (useful for OpenGL * apps). */ /* On return: */ /* Function returns a pointer to the compressed data, or NULL on failure. */ /* *pLen_out will be set to the size of the PNG image file. */ /* The caller must mz_free() the returned heap block (which will typically be * larger than *pLen_out) when it's no longer needed. */ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); /* Output stream interface. The compressor uses this interface to write * compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. */ typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); /* tdefl_compress_mem_to_output() compresses a block to an output stream. The * above helpers use this function internally. */ mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; /* TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed * output block (using static/fixed Huffman codes). */ #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif /* The low-level tdefl functions below may be used directly if the above helper * functions aren't flexible enough. The low-level functions don't make any heap * allocations, unlike the above helper functions. */ typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; /* Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums */ typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; /* tdefl's compression state structure. */ typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; /* Initializes the compressor. */ /* There is no corresponding deinit() function because the tdefl API's do not * dynamically allocate memory. */ /* pBut_buf_func: If NULL, output data will be supplied to the specified * callback. In this case, the user should call the tdefl_compress_buffer() API * for compression. */ /* If pBut_buf_func is NULL the user should always call the tdefl_compress() * API. */ /* flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, * etc.) */ tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); /* Compresses a block of data, consuming as much of the specified input buffer * as possible, and writing as much compressed data to the specified output * buffer as possible. */ tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); /* tdefl_compress_buffer() is only usable when the tdefl_init() is called with a * non-NULL tdefl_put_buf_func_ptr. */ /* tdefl_compress_buffer() always consumes the entire input buffer. */ tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); /* Create tdefl_compress() flags given zlib-style compression parameters. */ /* level may range from [0,10] (where 10 is absolute max compression, but may be * much slower on some files) */ /* window_bits may be -15 (raw deflate) or 15 (zlib) */ /* strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, * MZ_RLE, or MZ_FIXED */ mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #ifndef MINIZ_NO_MALLOC /* Allocate the tdefl_compressor structure in C so that */ /* non-C language bindings to tdefl_ API don't need to worry about */ /* structure size and allocation mechanism. */ tdefl_compressor *tdefl_compressor_alloc(void); void tdefl_compressor_free(tdefl_compressor *pComp); #endif #ifdef __cplusplus } #endif #pragma once /* ------------------- Low-level Decompression API Definitions */ #ifdef __cplusplus extern "C" { #endif /* Decompression flags used by tinfl_decompress(). */ /* TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and * ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the * input is a raw deflate stream. */ /* TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available * beyond the end of the supplied input buffer. If clear, the input buffer * contains all remaining input. */ /* TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large * enough to hold the entire decompressed stream. If clear, the output buffer is * at least the size of the dictionary (typically 32KB). */ /* TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the * decompressed bytes. */ enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; /* High level decompression functions: */ /* tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block * allocated via malloc(). */ /* On entry: */ /* pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data * to decompress. */ /* On return: */ /* Function returns a pointer to the decompressed data, or NULL on failure. */ /* *pOut_len will be set to the decompressed data's size, which could be larger * than src_buf_len on uncompressible data. */ /* The caller must call mz_free() on the returned block when it's no longer * needed. */ void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); /* tinfl_decompress_mem_to_mem() decompresses a block in memory to another block * in memory. */ /* Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes * written on success. */ #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); /* tinfl_decompress_mem_to_callback() decompresses a block in memory to an * internal 32KB buffer, and a user provided callback function will be called to * flush the buffer. */ /* Returns 1 on success or 0 on failure. */ typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; #ifndef MINIZ_NO_MALLOC /* Allocate the tinfl_decompressor structure in C so that */ /* non-C language bindings to tinfl_ API don't need to worry about */ /* structure size and allocation mechanism. */ tinfl_decompressor *tinfl_decompressor_alloc(void); void tinfl_decompressor_free(tinfl_decompressor *pDecomp); #endif /* Max size of LZ dictionary. */ #define TINFL_LZ_DICT_SIZE 32768 /* Return status. */ typedef enum { /* This flags indicates the inflator needs 1 or more input bytes to make forward progress, but the caller is indicating that no more are available. The compressed data */ /* is probably corrupted. If you call the inflator again with more bytes it'll try to continue processing the input but this is a BAD sign (either the data is corrupted or you called it incorrectly). */ /* If you call it again with no input you'll just get TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS again. */ TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS = -4, /* This flag indicates that one or more of the input parameters was obviously bogus. (You can try calling it again, but if you get this error the calling code is wrong.) */ TINFL_STATUS_BAD_PARAM = -3, /* This flags indicate the inflator is finished but the adler32 check of the uncompressed data didn't match. If you call it again it'll return TINFL_STATUS_DONE. */ TINFL_STATUS_ADLER32_MISMATCH = -2, /* This flags indicate the inflator has somehow failed (bad code, corrupted input, etc.). If you call it again without resetting via tinfl_init() it it'll just keep on returning the same status failure code. */ TINFL_STATUS_FAILED = -1, /* Any status code less than TINFL_STATUS_DONE must indicate a failure. */ /* This flag indicates the inflator has returned every byte of uncompressed data that it can, has consumed every byte that it needed, has successfully reached the end of the deflate stream, and */ /* if zlib headers and adler32 checking enabled that it has successfully checked the uncompressed data's adler32. If you call it again you'll just get TINFL_STATUS_DONE over and over again. */ TINFL_STATUS_DONE = 0, /* This flag indicates the inflator MUST have more input data (even 1 byte) before it can make any more forward progress, or you need to clear the TINFL_FLAG_HAS_MORE_INPUT */ /* flag on the next call if you don't have any more source data. If the source data was somehow corrupted it's also possible (but unlikely) for the inflator to keep on demanding input to */ /* proceed, so be sure to properly set the TINFL_FLAG_HAS_MORE_INPUT flag. */ TINFL_STATUS_NEEDS_MORE_INPUT = 1, /* This flag indicates the inflator definitely has 1 or more bytes of uncompressed data available, but it cannot write this data into the output buffer. */ /* Note if the source compressed data was corrupted it's possible for the inflator to return a lot of uncompressed data to the caller. I've been assuming you know how much uncompressed data to expect */ /* (either exact or worst case) and will stop calling the inflator and fail after receiving too much. In pure streaming scenarios where you have no idea how many bytes to expect this may not be possible */ /* so I may need to add some code to address this. */ TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; /* Initializes the decompressor to its initial state. */ #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 /* Main low-level decompressor coroutine function. This is the only function * actually needed for decompression. All the other functions are just * high-level helpers for improved usability. */ /* This is a universal API, i.e. it can be used as a building block to build any * desired higher level decompression API. In the limit case, it can be called * once per every byte input or output. */ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); /* Internal/private bits follow. */ enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #else #define TINFL_USE_64BIT_BITBUF 0 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; #ifdef __cplusplus } #endif #pragma once /* ------------------- ZIP archive reading/writing */ #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus extern "C" { #endif enum { /* Note: These enums can be reduced as needed to save memory or stack space - they are pretty conservative. */ MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 512, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 512 }; typedef struct { /* Central directory file index. */ mz_uint32 m_file_index; /* Byte offset of this entry in the archive's central directory. Note we * currently only support up to UINT_MAX or less bytes in the central dir. */ mz_uint64 m_central_dir_ofs; /* These fields are copied directly from the zip's central dir. */ mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME MZ_TIME_T m_time; #endif /* CRC-32 of uncompressed data. */ mz_uint32 m_crc32; /* File's compressed size. */ mz_uint64 m_comp_size; /* File's uncompressed size. Note, I've seen some old archives where directory * entries had 512 bytes for their uncompressed sizes, but when you try to * unpack them you actually get 0 bytes. */ mz_uint64 m_uncomp_size; /* Zip internal and external file attributes. */ mz_uint16 m_internal_attr; mz_uint32 m_external_attr; /* Entry's local header file offset in bytes. */ mz_uint64 m_local_header_ofs; /* Size of comment in bytes. */ mz_uint32 m_comment_size; /* MZ_TRUE if the entry appears to be a directory. */ mz_bool m_is_directory; /* MZ_TRUE if the entry uses encryption/strong encryption (which miniz_zip * doesn't support) */ mz_bool m_is_encrypted; /* MZ_TRUE if the file is not encrypted, a patch file, and if it uses a * compression method we support. */ mz_bool m_is_supported; /* Filename. If string ends in '/' it's a subdirectory entry. */ /* Guaranteed to be zero terminated, may be truncated to fit. */ char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; /* Comment field. */ /* Guaranteed to be zero terminated, may be truncated to fit. */ char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); typedef mz_bool (*mz_file_needs_keepalive)(void *pOpaque); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800, MZ_ZIP_FLAG_VALIDATE_LOCATE_FILE_FLAG = 0x1000, /* if enabled, mz_zip_reader_locate_file() will be called on each file as its validated to ensure the func finds the file in the central dir (intended for testing) */ MZ_ZIP_FLAG_VALIDATE_HEADERS_ONLY = 0x2000, /* validate the local headers, but don't decompress the entire file and check the crc32 */ MZ_ZIP_FLAG_WRITE_ZIP64 = 0x4000, /* always use the zip64 file format, instead of the original zip file format with automatic switch to zip64. Use as flags parameter with mz_zip_writer_init*_v2 */ MZ_ZIP_FLAG_WRITE_ALLOW_READING = 0x8000, MZ_ZIP_FLAG_ASCII_FILENAME = 0x10000 } mz_zip_flags; typedef enum { MZ_ZIP_TYPE_INVALID = 0, MZ_ZIP_TYPE_USER, MZ_ZIP_TYPE_MEMORY, MZ_ZIP_TYPE_HEAP, MZ_ZIP_TYPE_FILE, MZ_ZIP_TYPE_CFILE, MZ_ZIP_TOTAL_TYPES } mz_zip_type; /* miniz error codes. Be sure to update mz_zip_get_error_string() if you add or * modify this enum. */ typedef enum { MZ_ZIP_NO_ERROR = 0, MZ_ZIP_UNDEFINED_ERROR, MZ_ZIP_TOO_MANY_FILES, MZ_ZIP_FILE_TOO_LARGE, MZ_ZIP_UNSUPPORTED_METHOD, MZ_ZIP_UNSUPPORTED_ENCRYPTION, MZ_ZIP_UNSUPPORTED_FEATURE, MZ_ZIP_FAILED_FINDING_CENTRAL_DIR, MZ_ZIP_NOT_AN_ARCHIVE, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED, MZ_ZIP_UNSUPPORTED_MULTIDISK, MZ_ZIP_DECOMPRESSION_FAILED, MZ_ZIP_COMPRESSION_FAILED, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE, MZ_ZIP_CRC_CHECK_FAILED, MZ_ZIP_UNSUPPORTED_CDIR_SIZE, MZ_ZIP_ALLOC_FAILED, MZ_ZIP_FILE_OPEN_FAILED, MZ_ZIP_FILE_CREATE_FAILED, MZ_ZIP_FILE_WRITE_FAILED, MZ_ZIP_FILE_READ_FAILED, MZ_ZIP_FILE_CLOSE_FAILED, MZ_ZIP_FILE_SEEK_FAILED, MZ_ZIP_FILE_STAT_FAILED, MZ_ZIP_INVALID_PARAMETER, MZ_ZIP_INVALID_FILENAME, MZ_ZIP_BUF_TOO_SMALL, MZ_ZIP_INTERNAL_ERROR, MZ_ZIP_FILE_NOT_FOUND, MZ_ZIP_ARCHIVE_TOO_LARGE, MZ_ZIP_VALIDATION_FAILED, MZ_ZIP_WRITE_CALLBACK_FAILED, MZ_ZIP_TOTAL_ERRORS } mz_zip_error; typedef struct { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; /* We only support up to UINT32_MAX files in zip64 mode. */ mz_uint32 m_total_files; mz_zip_mode m_zip_mode; mz_zip_type m_zip_type; mz_zip_error m_last_error; mz_uint64 m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; mz_file_needs_keepalive m_pNeeds_keepalive; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef struct { mz_zip_archive *pZip; mz_uint flags; int status; #ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS mz_uint file_crc32; #endif mz_uint64 read_buf_size, read_buf_ofs, read_buf_avail, comp_remaining, out_buf_ofs, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf; void *pWrite_buf; size_t out_blk_remain; tinfl_decompressor inflator; } mz_zip_reader_extract_iter_state; /* -------- ZIP reading */ /* Inits a ZIP archive reader. */ /* These functions read and validate the archive's central directory. */ mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint flags); #ifndef MINIZ_NO_STDIO /* Read a archive from a disk file. */ /* file_start_ofs is the file offset where the archive actually begins, or 0. */ /* actual_archive_size is the true total size of the archive, which may be * smaller than the file's actual size on disk. If zero the entire file is * treated as the archive. */ mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); mz_bool mz_zip_reader_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags, mz_uint64 file_start_ofs, mz_uint64 archive_size); /* Read an archive from an already opened FILE, beginning at the current file * position. */ /* The archive is assumed to be archive_size bytes long. If archive_size is < 0, * then the entire rest of the file is assumed to contain the archive. */ /* The FILE will NOT be closed when mz_zip_reader_end() is called. */ mz_bool mz_zip_reader_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint64 archive_size, mz_uint flags); #endif /* Ends archive reading, freeing all allocations, and closing the input archive * file if mz_zip_reader_init_file() was used. */ mz_bool mz_zip_reader_end(mz_zip_archive *pZip); /* -------- ZIP reading or writing */ /* Clears a mz_zip_archive struct to all zeros. */ /* Important: This must be done before passing the struct to any mz_zip * functions. */ void mz_zip_zero_struct(mz_zip_archive *pZip); mz_zip_mode mz_zip_get_mode(mz_zip_archive *pZip); mz_zip_type mz_zip_get_type(mz_zip_archive *pZip); /* Returns the total number of files in the archive. */ mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); mz_uint64 mz_zip_get_archive_size(mz_zip_archive *pZip); mz_uint64 mz_zip_get_archive_file_start_offset(mz_zip_archive *pZip); MZ_FILE *mz_zip_get_cfile(mz_zip_archive *pZip); /* Reads n bytes of raw archive data, starting at file offset file_ofs, to pBuf. */ size_t mz_zip_read_archive_data(mz_zip_archive *pZip, mz_uint64 file_ofs, void *pBuf, size_t n); /* All mz_zip funcs set the m_last_error field in the mz_zip_archive struct. * These functions retrieve/manipulate this field. */ /* Note that the m_last_error functionality is not thread safe. */ mz_zip_error mz_zip_set_last_error(mz_zip_archive *pZip, mz_zip_error err_num); mz_zip_error mz_zip_peek_last_error(mz_zip_archive *pZip); mz_zip_error mz_zip_clear_last_error(mz_zip_archive *pZip); mz_zip_error mz_zip_get_last_error(mz_zip_archive *pZip); const char *mz_zip_get_error_string(mz_zip_error mz_err); /* MZ_TRUE if the archive file entry is a directory entry. */ mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); /* MZ_TRUE if the file is encrypted/strong encrypted. */ mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); /* MZ_TRUE if the compression method is supported, and the file is not * encrypted, and the file is not a compressed patch file. */ mz_bool mz_zip_reader_is_file_supported(mz_zip_archive *pZip, mz_uint file_index); /* Retrieves the filename of an archive file entry. */ /* Returns the number of bytes written to pFilename, or if filename_buf_size is * 0 this function returns the number of bytes needed to fully store the * filename. */ mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); /* Attempts to locates a file in the archive's central directory. */ /* Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH */ /* Returns -1 if the file cannot be found. */ int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); int mz_zip_reader_locate_file_v2(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags, mz_uint32 *file_index); /* Returns detailed information about an archive file entry. */ mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); /* MZ_TRUE if the file is in zip64 format. */ /* A file is considered zip64 if it contained a zip64 end of central directory * marker, or if it contained any zip64 extended file information fields in the * central directory. */ mz_bool mz_zip_is_zip64(mz_zip_archive *pZip); /* Returns the total central directory size in bytes. */ /* The current max supported size is <= MZ_UINT32_MAX. */ size_t mz_zip_get_central_dir_size(mz_zip_archive *pZip); /* Extracts a archive file to a memory buffer using no memory allocation. */ /* There must be at least enough room on the stack to store the inflator's state * (~34KB or so). */ mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); /* Extracts a archive file to a memory buffer. */ mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); /* Extracts a archive file to a dynamically allocated heap buffer. */ /* The memory will be allocated via the mz_zip_archive's alloc/realloc * functions. */ /* Returns NULL and sets the last error on failure. */ void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); /* Extracts a archive file using a callback function to output the file's data. */ mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); /* Extract a file iteratively */ mz_zip_reader_extract_iter_state * mz_zip_reader_extract_iter_new(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags); mz_zip_reader_extract_iter_state * mz_zip_reader_extract_file_iter_new(mz_zip_archive *pZip, const char *pFilename, mz_uint flags); size_t mz_zip_reader_extract_iter_read(mz_zip_reader_extract_iter_state *pState, void *pvBuf, size_t buf_size); mz_bool mz_zip_reader_extract_iter_free(mz_zip_reader_extract_iter_state *pState); #ifndef MINIZ_NO_STDIO /* Extracts a archive file to a disk file and sets its last accessed and * modified times. */ /* This function only extracts files, not archive directory records. */ mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); /* Extracts a archive file starting at the current position in the destination * FILE stream. */ mz_bool mz_zip_reader_extract_to_cfile(mz_zip_archive *pZip, mz_uint file_index, MZ_FILE *File, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_cfile(mz_zip_archive *pZip, const char *pArchive_filename, MZ_FILE *pFile, mz_uint flags); #endif #if 0 /* TODO */ typedef void *mz_zip_streaming_extract_state_ptr; mz_zip_streaming_extract_state_ptr mz_zip_streaming_extract_begin(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags); uint64_t mz_zip_streaming_extract_get_size(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState); uint64_t mz_zip_streaming_extract_get_cur_ofs(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState); mz_bool mz_zip_streaming_extract_seek(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState, uint64_t new_ofs); size_t mz_zip_streaming_extract_read(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState, void *pBuf, size_t buf_size); mz_bool mz_zip_streaming_extract_end(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState); #endif /* This function compares the archive's local headers, the optional local zip64 * extended information block, and the optional descriptor following the * compressed data vs. the data in the central directory. */ /* It also validates that each file can be successfully uncompressed unless the * MZ_ZIP_FLAG_VALIDATE_HEADERS_ONLY is specified. */ mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags); /* Validates an entire archive by calling mz_zip_validate_file() on each file. */ mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags); /* Misc utils/helpers, valid for ZIP reading or writing */ mz_bool mz_zip_validate_mem_archive(const void *pMem, size_t size, mz_uint flags, mz_zip_error *pErr); mz_bool mz_zip_validate_file_archive(const char *pFilename, mz_uint flags, mz_zip_error *pErr); /* Universal end function - calls either mz_zip_reader_end() or * mz_zip_writer_end(). */ mz_bool mz_zip_end(mz_zip_archive *pZip); /* -------- ZIP writing */ #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS /* Inits a ZIP archive writer. */ /*Set pZip->m_pWrite (and pZip->m_pIO_opaque) before calling mz_zip_writer_init * or mz_zip_writer_init_v2*/ /*The output is streamable, i.e. file_ofs in mz_file_write_func always increases * only by n*/ mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_v2(mz_zip_archive *pZip, mz_uint64 existing_size, mz_uint flags); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); mz_bool mz_zip_writer_init_heap_v2(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size, mz_uint flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); mz_bool mz_zip_writer_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning, mz_uint flags); mz_bool mz_zip_writer_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint flags); #endif /* Converts a ZIP archive reader object into a writer object, to allow efficient * in-place file appends to occur on an existing archive. */ /* For archives opened using mz_zip_reader_init_file, pFilename must be the * archive's filename so it can be reopened for writing. If the file can't be * reopened, mz_zip_reader_end() will be called. */ /* For archives opened using mz_zip_reader_init_mem, the memory block must be * growable using the realloc callback (which defaults to realloc unless you've * overridden it). */ /* Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's * user provided m_pWrite function cannot be NULL. */ /* Note: In-place archive modification is not recommended unless you know what * you're doing, because if execution stops or something goes wrong before */ /* the archive is finalized the file's central directory will be hosed. */ mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); mz_bool mz_zip_writer_init_from_reader_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags); /* Adds the contents of a memory buffer to an archive. These functions record * the current local time into the archive. */ /* To add a directory entry, call this method with an archive name ending in a * forwardslash with an empty buffer. */ /* level_and_flags - compression level (0-10, see MZ_BEST_SPEED, * MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or * just set to MZ_DEFAULT_COMPRESSION. */ mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); /* Like mz_zip_writer_add_mem(), except you can specify a file comment field, * and optionally supply the function with already compressed data. */ /* uncomp_size/uncomp_crc32 are only used if the MZ_ZIP_FLAG_COMPRESSED_DATA * flag is specified. */ mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); mz_bool mz_zip_writer_add_mem_ex_v2( mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32, MZ_TIME_T *last_modified, const char *user_extra_data_local, mz_uint user_extra_data_local_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len); /* Adds the contents of a file to an archive. This function also records the * disk file's modified time into the archive. */ /* File data is supplied via a read callback function. User * mz_zip_writer_add_(c)file to add a file directly.*/ mz_bool mz_zip_writer_add_read_buf_callback( mz_zip_archive *pZip, const char *pArchive_name, mz_file_read_func read_callback, void *callback_opaque, mz_uint64 size_to_add, const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, const char *user_extra_data_local, mz_uint user_extra_data_local_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len); #ifndef MINIZ_NO_STDIO /* Adds the contents of a disk file to an archive. This function also records * the disk file's modified time into the archive. */ /* level_and_flags - compression level (0-10, see MZ_BEST_SPEED, * MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or * just set to MZ_DEFAULT_COMPRESSION. */ mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); /* Like mz_zip_writer_add_file(), except the file data is read from the * specified FILE stream. */ mz_bool mz_zip_writer_add_cfile( mz_zip_archive *pZip, const char *pArchive_name, MZ_FILE *pSrc_file, mz_uint64 size_to_add, const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, const char *user_extra_data_local, mz_uint user_extra_data_local_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len); #endif /* Adds a file to an archive by fully cloning the data from another archive. */ /* This function fully clones the source file's compressed data (no * recompression), along with its full filename, extra data (it may add or * modify the zip64 local header extra data field), and the optional descriptor * following the compressed data. */ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint src_file_index); /* Finalizes the archive by writing the central directory records followed by * the end of central directory record. */ /* After an archive is finalized, the only valid call on the mz_zip_archive * struct is mz_zip_writer_end(). */ /* An archive must be manually finalized by calling this function for it to be * valid. */ mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); /* Finalizes a heap archive, returning a poiner to the heap block and its size. */ /* The heap block will be allocated using the mz_zip_archive's alloc/realloc * callbacks. */ mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **ppBuf, size_t *pSize); /* Ends archive writing, freeing all allocations, and closing the output file if * mz_zip_writer_init_file() was used. */ /* Note for the archive to be valid, it *must* have been finalized before ending * (this function will not do it for you). */ mz_bool mz_zip_writer_end(mz_zip_archive *pZip); /* -------- Misc. high-level helper functions: */ /* mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) * appends a memory blob to a ZIP archive. */ /* Note this is NOT a fully safe operation. If it crashes or dies in some way * your archive can be left in a screwed up state (without a central directory). */ /* level_and_flags - compression level (0-10, see MZ_BEST_SPEED, * MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or * just set to MZ_DEFAULT_COMPRESSION. */ /* TODO: Perhaps add an option to leave the existing central dir in place in * case the add dies? We could then truncate the file (so the old central dir * would be at the end) if something goes wrong. */ mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); mz_bool mz_zip_add_mem_to_archive_file_in_place_v2( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_zip_error *pErr); /* Reads a single file from an archive into a heap block. */ /* If pComment is not NULL, only the file with the specified comment will be * extracted. */ /* Returns NULL on failure. */ void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags); void *mz_zip_extract_archive_file_to_heap_v2(const char *pZip_filename, const char *pArchive_name, const char *pComment, size_t *pSize, mz_uint flags, mz_zip_error *pErr); #endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS */ #ifdef __cplusplus } #endif #endif /* MINIZ_NO_ARCHIVE_APIS */
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilCompression/LICENSE.TXT
Copyright 2013-2014 RAD Game Tools and Valve Software Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilCompression/CMakeLists.txt
# Copyright (C) Microsoft Corporation. All rights reserved. # This file is distributed under the University of Illinois Open Source License. # See LICENSE.TXT for details. add_llvm_library(LLVMDxilCompression DxilCompression.cpp miniz.c ADDITIONAL_HEADER_DIRS ) add_dependencies(LLVMDxilCompression intrinsics_gen)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilCompression/LLVMBuild.txt
; Copyright (C) Microsoft Corporation. All rights reserved. ; This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = DxilCompression parent = Libraries
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilRootSignature/DxilRootSignatureValidator.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilRootSignature.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides support for manipulating root signature structures. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilConstants.h" #include "dxc/DxilRootSignature/DxilRootSignature.h" #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/WinFunctions.h" #include "dxc/Support/WinIncludes.h" #include "dxc/dxcapi.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <ios> #include <set> #include <string> #include <utility> #include <vector> #include "dxc/DxilContainer/DxilPipelineStateValidation.h" #include <assert.h> // Needed for DxilPipelineStateValidation.h #include "DxilRootSignatureHelper.h" using namespace llvm; using std::string; namespace hlsl { using namespace root_sig_helper; ////////////////////////////////////////////////////////////////////////////// // Interval helper. template <typename T> class CIntervalCollection { private: std::set<T> m_set; public: const T *FindIntersectingInterval(const T &I) { auto it = m_set.find(I); if (it != m_set.end()) return &*it; return nullptr; } void Insert(const T &value) { auto result = m_set.insert(value); UNREFERENCED_PARAMETER(result); #if DBG DXASSERT(result.second, "otherwise interval collides with existing in collection"); #endif } }; ////////////////////////////////////////////////////////////////////////////// // Verifier classes. class DescriptorTableVerifier { public: void Verify(const DxilDescriptorRange1 *pRanges, unsigned NumRanges, unsigned iRTS, DiagnosticPrinter &DiagPrinter); }; class StaticSamplerVerifier { public: void Verify(const DxilStaticSamplerDesc *pDesc, DiagnosticPrinter &DiagPrinter); }; class RootSignatureVerifier { public: RootSignatureVerifier(); ~RootSignatureVerifier(); void AllowReservedRegisterSpace(bool bAllow); // Call this before calling VerifyShader, as it accumulates root signature // state. void VerifyRootSignature(const DxilVersionedRootSignatureDesc *pRootSignature, DiagnosticPrinter &DiagPrinter); void VerifyShader(DxilShaderVisibility VisType, const void *pPSVData, uint32_t PSVSize, DiagnosticPrinter &DiagPrinter); typedef enum NODE_TYPE { DESCRIPTOR_TABLE_ENTRY, ROOT_DESCRIPTOR, ROOT_CONSTANT, STATIC_SAMPLER } NODE_TYPE; private: static const unsigned kMinVisType = (unsigned)DxilShaderVisibility::All; static const unsigned kMaxVisType = (unsigned)DxilShaderVisibility::MaxValue; static const unsigned kMinDescType = (unsigned)DxilDescriptorRangeType::SRV; static const unsigned kMaxDescType = (unsigned)DxilDescriptorRangeType::MaxValue; struct RegisterRange { NODE_TYPE nt; unsigned space; unsigned lb; // inclusive lower bound unsigned ub; // inclusive upper bound unsigned iRP; unsigned iDTS; // Sort by space, then lower bound. bool operator<(const RegisterRange &other) const { return space < other.space || (space == other.space && ub < other.lb); } // Like a regular -1,0,1 comparison, but 0 indicates overlap. int overlap(const RegisterRange &other) const { if (space < other.space) return -1; if (space > other.space) return 1; if (ub < other.lb) return -1; if (lb > other.ub) return 1; return 0; } // Check containment. bool contains(const RegisterRange &other) const { return (space == other.space) && (lb <= other.lb && other.ub <= ub); } }; typedef CIntervalCollection<RegisterRange> RegisterRanges; void AddRegisterRange(unsigned iRTS, NODE_TYPE nt, unsigned iDTS, DxilDescriptorRangeType DescType, DxilShaderVisibility VisType, unsigned NumRegisters, unsigned BaseRegister, unsigned RegisterSpace, DiagnosticPrinter &DiagPrinter); const RegisterRange *FindCoveringInterval(DxilDescriptorRangeType RangeType, DxilShaderVisibility VisType, unsigned Num, unsigned LB, unsigned Space); RegisterRanges &GetRanges(DxilShaderVisibility VisType, DxilDescriptorRangeType DescType) { return RangeKinds[(unsigned)VisType][(unsigned)DescType]; } RegisterRanges RangeKinds[kMaxVisType + 1][kMaxDescType + 1]; bool m_bAllowReservedRegisterSpace; DxilRootSignatureFlags m_RootSignatureFlags; }; void DescriptorTableVerifier::Verify(const DxilDescriptorRange1 *pRanges, uint32_t NumRanges, uint32_t iRP, DiagnosticPrinter &DiagPrinter) { bool bHasSamplers = false; bool bHasResources = false; uint64_t iAppendStartSlot = 0; for (unsigned iDTS = 0; iDTS < NumRanges; iDTS++) { const DxilDescriptorRange1 *pRange = &pRanges[iDTS]; switch (pRange->RangeType) { case DxilDescriptorRangeType::SRV: case DxilDescriptorRangeType::UAV: case DxilDescriptorRangeType::CBV: bHasResources = true; break; case DxilDescriptorRangeType::Sampler: bHasSamplers = true; break; default: static_assert(DxilDescriptorRangeType::Sampler == DxilDescriptorRangeType::MaxValue, "otherwise, need to update cases here"); EAT(DiagPrinter << "Unsupported RangeType value " << (uint32_t)pRange->RangeType << " (descriptor table slot [" << iDTS << "], root parameter [" << iRP << "]).\n"); } // Samplers cannot be mixed with other resources. if (bHasResources && bHasSamplers) { EAT(DiagPrinter << "Samplers cannot be mixed with other " << "resource types in a descriptor table (root " << "parameter [" << iRP << "]).\n"); } // NumDescriptors is not 0. if (pRange->NumDescriptors == 0) { EAT(DiagPrinter << "NumDescriptors cannot be 0 (descriptor " << "table slot [" << iDTS << "], root parameter [" << iRP << "]).\n"); } // Range start. uint64_t iStartSlot = iAppendStartSlot; if (pRange->OffsetInDescriptorsFromTableStart != DxilDescriptorRangeOffsetAppend) { iStartSlot = pRange->OffsetInDescriptorsFromTableStart; } if (iStartSlot > UINT_MAX) { EAT(DiagPrinter << "Cannot append range with implicit lower " << "bound after an unbounded range (descriptor " << "table slot [" << iDTS << "], root parameter [" << iRP << "]).\n"); } // Descriptor range and shader register range overlow. if (pRange->NumDescriptors != UINT_MAX) { // Bounded range. uint64_t ub1 = (uint64_t)pRange->BaseShaderRegister + (uint64_t)pRange->NumDescriptors - 1ull; if (ub1 > UINT_MAX) { EAT(DiagPrinter << "Overflow for shader register range: " << "BaseShaderRegister=" << pRange->BaseShaderRegister << ", NumDescriptor=" << pRange->NumDescriptors << "; (descriptor table slot [" << iDTS << "], root parameter [" << iRP << "]).\n"); } uint64_t ub2 = (uint64_t)iStartSlot + (uint64_t)pRange->NumDescriptors - 1ull; if (ub2 > UINT_MAX) { EAT(DiagPrinter << "Overflow for descriptor range (descriptor " << "table slot [" << iDTS << "], root parameter [" << iRP << "])\n"); } iAppendStartSlot = iStartSlot + (uint64_t)pRange->NumDescriptors; } else { // Unbounded range. iAppendStartSlot = 1ull + (uint64_t)UINT_MAX; } } } RootSignatureVerifier::RootSignatureVerifier() { m_RootSignatureFlags = DxilRootSignatureFlags::None; m_bAllowReservedRegisterSpace = false; } RootSignatureVerifier::~RootSignatureVerifier() {} void RootSignatureVerifier::AllowReservedRegisterSpace(bool bAllow) { m_bAllowReservedRegisterSpace = bAllow; } const char *RangeTypeString(DxilDescriptorRangeType rt) { static const char *RangeType[] = {"SRV", "UAV", "CBV", "SAMPLER"}; static_assert(_countof(RangeType) == ((unsigned)DxilDescriptorRangeType::MaxValue + 1), "otherwise, need to update name array"); return (rt <= DxilDescriptorRangeType::MaxValue) ? RangeType[(unsigned)rt] : "unknown"; } const char *VisTypeString(DxilShaderVisibility vis) { static const char *Vis[] = {"ALL", "VERTEX", "HULL", "DOMAIN", "GEOMETRY", "PIXEL", "AMPLIFICATION", "MESH"}; static_assert(_countof(Vis) == ((unsigned)DxilShaderVisibility::MaxValue + 1), "otherwise, need to update name array"); unsigned idx = (unsigned)vis; return vis <= DxilShaderVisibility::MaxValue ? Vis[idx] : "unknown"; } static bool IsDxilShaderVisibility(DxilShaderVisibility v) { return v <= DxilShaderVisibility::MaxValue; } void RootSignatureVerifier::AddRegisterRange( unsigned iRP, NODE_TYPE nt, unsigned iDTS, DxilDescriptorRangeType DescType, DxilShaderVisibility VisType, unsigned NumRegisters, unsigned BaseRegister, unsigned RegisterSpace, DiagnosticPrinter &DiagPrinter) { RegisterRange interval; interval.space = RegisterSpace; interval.lb = BaseRegister; interval.ub = (NumRegisters != UINT_MAX) ? BaseRegister + NumRegisters - 1 : UINT_MAX; interval.nt = nt; interval.iDTS = iDTS; interval.iRP = iRP; if (!m_bAllowReservedRegisterSpace && (RegisterSpace >= DxilSystemReservedRegisterSpaceValuesStart) && (RegisterSpace <= DxilSystemReservedRegisterSpaceValuesEnd)) { if (nt == DESCRIPTOR_TABLE_ENTRY) { EAT(DiagPrinter << "Root parameter [" << iRP << "] descriptor table entry [" << iDTS << "] specifies RegisterSpace=" << std::hex << RegisterSpace << ", which is invalid since RegisterSpace values in the range " << "[" << std::hex << DxilSystemReservedRegisterSpaceValuesStart << "," << std::hex << DxilSystemReservedRegisterSpaceValuesEnd << "] are reserved for system use.\n"); } else { EAT(DiagPrinter << "Root parameter [" << iRP << "] specifies RegisterSpace=" << std::hex << RegisterSpace << ", which is invalid since RegisterSpace values in the range " << "[" << std::hex << DxilSystemReservedRegisterSpaceValuesStart << "," << std::hex << DxilSystemReservedRegisterSpaceValuesEnd << "] are reserved for system use.\n"); } } const RegisterRange *pNode = nullptr; DxilShaderVisibility NodeVis = VisType; if (VisType == DxilShaderVisibility::All) { // Check for overlap with each visibility type. for (unsigned iVT = kMinVisType; iVT <= kMaxVisType; iVT++) { pNode = GetRanges((DxilShaderVisibility)iVT, DescType) .FindIntersectingInterval(interval); if (pNode != nullptr) break; } } else { // Check for overlap with the same visibility. pNode = GetRanges(VisType, DescType).FindIntersectingInterval(interval); // Check for overlap with ALL visibility. if (pNode == nullptr) { pNode = GetRanges(DxilShaderVisibility::All, DescType) .FindIntersectingInterval(interval); NodeVis = DxilShaderVisibility::All; } } if (pNode != nullptr) { const int strSize = 132; char testString[strSize]; char nodeString[strSize]; switch (nt) { case DESCRIPTOR_TABLE_ENTRY: StringCchPrintfA( testString, strSize, "(root parameter [%u], visibility %s, descriptor table slot [%u])", iRP, VisTypeString(VisType), iDTS); break; case ROOT_DESCRIPTOR: case ROOT_CONSTANT: StringCchPrintfA(testString, strSize, "(root parameter [%u], visibility %s)", iRP, VisTypeString(VisType)); break; case STATIC_SAMPLER: StringCchPrintfA(testString, strSize, "(static sampler [%u], visibility %s)", iRP, VisTypeString(VisType)); break; default: DXASSERT_NOMSG(false); break; } switch (pNode->nt) { case DESCRIPTOR_TABLE_ENTRY: StringCchPrintfA( nodeString, strSize, "(root parameter[%u], visibility %s, descriptor table slot [%u])", pNode->iRP, VisTypeString(NodeVis), pNode->iDTS); break; case ROOT_DESCRIPTOR: case ROOT_CONSTANT: StringCchPrintfA(nodeString, strSize, "(root parameter [%u], visibility %s)", pNode->iRP, VisTypeString(NodeVis)); break; case STATIC_SAMPLER: StringCchPrintfA(nodeString, strSize, "(static sampler [%u], visibility %s)", pNode->iRP, VisTypeString(NodeVis)); break; default: DXASSERT_NOMSG(false); break; } EAT(DiagPrinter << "Shader register range of type " << RangeTypeString(DescType) << " " << testString << " overlaps with another " << "shader register range " << nodeString << ".\n"); } // Insert node. GetRanges(VisType, DescType).Insert(interval); } const RootSignatureVerifier::RegisterRange * RootSignatureVerifier::FindCoveringInterval(DxilDescriptorRangeType RangeType, DxilShaderVisibility VisType, unsigned Num, unsigned LB, unsigned Space) { RegisterRange RR; RR.space = Space; RR.lb = LB; RR.ub = LB + Num - 1; const RootSignatureVerifier::RegisterRange *pRange = GetRanges(DxilShaderVisibility::All, RangeType) .FindIntersectingInterval(RR); if (!pRange && VisType != DxilShaderVisibility::All) { pRange = GetRanges(VisType, RangeType).FindIntersectingInterval(RR); } if (pRange && !pRange->contains(RR)) { pRange = nullptr; } return pRange; } static DxilDescriptorRangeType GetRangeType(DxilRootParameterType RPT) { switch (RPT) { case DxilRootParameterType::CBV: return DxilDescriptorRangeType::CBV; case DxilRootParameterType::SRV: return DxilDescriptorRangeType::SRV; case DxilRootParameterType::UAV: return DxilDescriptorRangeType::UAV; default: static_assert(DxilRootParameterType::UAV == DxilRootParameterType::MaxValue, "otherwise, need to add cases here."); break; } DXASSERT_NOMSG(false); return DxilDescriptorRangeType::SRV; } void RootSignatureVerifier::VerifyRootSignature( const DxilVersionedRootSignatureDesc *pVersionedRootSignature, DiagnosticPrinter &DiagPrinter) { const DxilVersionedRootSignatureDesc *pUpconvertedRS = nullptr; // Up-convert root signature to the latest RS version. ConvertRootSignature(pVersionedRootSignature, DxilRootSignatureVersion::Version_1_1, &pUpconvertedRS); DXASSERT_NOMSG(pUpconvertedRS->Version == DxilRootSignatureVersion::Version_1_1); // Ensure this gets deleted as necessary. struct SigGuard { const DxilVersionedRootSignatureDesc *Orig, *Guard; SigGuard(const DxilVersionedRootSignatureDesc *pOrig, const DxilVersionedRootSignatureDesc *pGuard) : Orig(pOrig), Guard(pGuard) {} ~SigGuard() { if (Orig != Guard) { DeleteRootSignature(Guard); } } }; SigGuard S(pVersionedRootSignature, pUpconvertedRS); const DxilRootSignatureDesc1 *pRootSignature = &pUpconvertedRS->Desc_1_1; // Flags (assume they are bits that can be combined with OR). if ((pRootSignature->Flags & ~DxilRootSignatureFlags::ValidFlags) != DxilRootSignatureFlags::None) { EAT(DiagPrinter << "Unsupported bit-flag set (root signature flags " << std::hex << (uint32_t)pRootSignature->Flags << ").\n"); } m_RootSignatureFlags = pRootSignature->Flags; for (unsigned iRP = 0; iRP < pRootSignature->NumParameters; iRP++) { const DxilRootParameter1 *pSlot = &pRootSignature->pParameters[iRP]; // Shader visibility. DxilShaderVisibility Visibility = pSlot->ShaderVisibility; if (!IsDxilShaderVisibility(Visibility)) { EAT(DiagPrinter << "Unsupported ShaderVisibility value " << (uint32_t)Visibility << " (root parameter [" << iRP << "]).\n"); } DxilRootParameterType ParameterType = pSlot->ParameterType; switch (ParameterType) { case DxilRootParameterType::DescriptorTable: { DescriptorTableVerifier DTV; DTV.Verify(pSlot->DescriptorTable.pDescriptorRanges, pSlot->DescriptorTable.NumDescriptorRanges, iRP, DiagPrinter); for (unsigned iDTS = 0; iDTS < pSlot->DescriptorTable.NumDescriptorRanges; iDTS++) { const DxilDescriptorRange1 *pRange = &pSlot->DescriptorTable.pDescriptorRanges[iDTS]; unsigned RangeFlags = (unsigned)pRange->Flags; // Verify range flags. if (RangeFlags & ~(unsigned)DxilDescriptorRangeFlags::ValidFlags) { EAT(DiagPrinter << "Unsupported bit-flag set (descriptor range flags " << (uint32_t)pRange->Flags << ").\n"); } switch (pRange->RangeType) { case DxilDescriptorRangeType::Sampler: { if (RangeFlags & (unsigned)(DxilDescriptorRangeFlags::DataVolatile | DxilDescriptorRangeFlags::DataStatic | DxilDescriptorRangeFlags:: DataStaticWhileSetAtExecute)) { EAT(DiagPrinter << "Sampler descriptor ranges can't specify DATA_* flags " << "since there is no data pointed to by samplers " << "(descriptor range flags " << (uint32_t)pRange->Flags << ").\n"); } break; } default: { unsigned NumDataFlags = 0; if (RangeFlags & (unsigned)DxilDescriptorRangeFlags::DataVolatile) { NumDataFlags++; } if (RangeFlags & (unsigned)DxilDescriptorRangeFlags::DataStatic) { NumDataFlags++; } if (RangeFlags & (unsigned)DxilDescriptorRangeFlags::DataStaticWhileSetAtExecute) { NumDataFlags++; } if (NumDataFlags > 1) { EAT(DiagPrinter << "Descriptor range flags cannot specify more " "than one DATA_* flag " << "at a time (descriptor range flags " << (uint32_t)pRange->Flags << ").\n"); } if ((RangeFlags & (unsigned)DxilDescriptorRangeFlags::DataStatic) && (RangeFlags & (unsigned)DxilDescriptorRangeFlags::DescriptorsVolatile)) { EAT(DiagPrinter << "Descriptor range flags cannot specify DESCRIPTORS_VOLATILE " "with the DATA_STATIC flag at the same time (descriptor " "range flags " << (uint32_t)pRange->Flags << "). " << "DATA_STATIC_WHILE_SET_AT_EXECUTE is fine to combine with " "DESCRIPTORS_VOLATILE, since DESCRIPTORS_VOLATILE still " "requires descriptors don't change during execution. \n"); } break; } } AddRegisterRange(iRP, DESCRIPTOR_TABLE_ENTRY, iDTS, pRange->RangeType, Visibility, pRange->NumDescriptors, pRange->BaseShaderRegister, pRange->RegisterSpace, DiagPrinter); } break; } case DxilRootParameterType::Constants32Bit: AddRegisterRange(iRP, ROOT_CONSTANT, (unsigned)-1, DxilDescriptorRangeType::CBV, Visibility, 1, pSlot->Constants.ShaderRegister, pSlot->Constants.RegisterSpace, DiagPrinter); break; case DxilRootParameterType::CBV: case DxilRootParameterType::SRV: case DxilRootParameterType::UAV: { // Verify root descriptor flags. unsigned Flags = (unsigned)pSlot->Descriptor.Flags; if (Flags & ~(unsigned)DxilRootDescriptorFlags::ValidFlags) { EAT(DiagPrinter << "Unsupported bit-flag set (root descriptor flags " << std::hex << Flags << ").\n"); } unsigned NumDataFlags = 0; if (Flags & (unsigned)DxilRootDescriptorFlags::DataVolatile) { NumDataFlags++; } if (Flags & (unsigned)DxilRootDescriptorFlags::DataStatic) { NumDataFlags++; } if (Flags & (unsigned)DxilRootDescriptorFlags::DataStaticWhileSetAtExecute) { NumDataFlags++; } if (NumDataFlags > 1) { EAT(DiagPrinter << "Root descriptor flags cannot specify more " << "than one DATA_* flag at a time (root " << "descriptor flags " << NumDataFlags << ").\n"); } AddRegisterRange(iRP, ROOT_DESCRIPTOR, (unsigned)-1, GetRangeType(ParameterType), Visibility, 1, pSlot->Descriptor.ShaderRegister, pSlot->Descriptor.RegisterSpace, DiagPrinter); break; } default: static_assert(DxilRootParameterType::UAV == DxilRootParameterType::MaxValue, "otherwise, need to add cases here."); EAT(DiagPrinter << "Unsupported ParameterType value " << (uint32_t)ParameterType << " (root parameter " << iRP << ")\n"); } } for (unsigned iSS = 0; iSS < pRootSignature->NumStaticSamplers; iSS++) { const DxilStaticSamplerDesc *pSS = &pRootSignature->pStaticSamplers[iSS]; // Shader visibility. DxilShaderVisibility Visibility = pSS->ShaderVisibility; if (!IsDxilShaderVisibility(Visibility)) { EAT(DiagPrinter << "Unsupported ShaderVisibility value " << (uint32_t)Visibility << " (static sampler [" << iSS << "]).\n"); } StaticSamplerVerifier SSV; SSV.Verify(pSS, DiagPrinter); AddRegisterRange(iSS, STATIC_SAMPLER, (unsigned)-1, DxilDescriptorRangeType::Sampler, Visibility, 1, pSS->ShaderRegister, pSS->RegisterSpace, DiagPrinter); } } void RootSignatureVerifier::VerifyShader(DxilShaderVisibility VisType, const void *pPSVData, uint32_t PSVSize, DiagnosticPrinter &DiagPrinter) { DxilPipelineStateValidation PSV; IFTBOOL(PSV.InitFromPSV0(pPSVData, PSVSize), E_INVALIDARG); bool bShaderDeniedByRootSig = false; switch (VisType) { case DxilShaderVisibility::Vertex: if ((m_RootSignatureFlags & DxilRootSignatureFlags::DenyVertexShaderRootAccess) != DxilRootSignatureFlags::None) { bShaderDeniedByRootSig = true; } break; case DxilShaderVisibility::Hull: if ((m_RootSignatureFlags & DxilRootSignatureFlags::DenyHullShaderRootAccess) != DxilRootSignatureFlags::None) { bShaderDeniedByRootSig = true; } break; case DxilShaderVisibility::Domain: if ((m_RootSignatureFlags & DxilRootSignatureFlags::DenyDomainShaderRootAccess) != DxilRootSignatureFlags::None) { bShaderDeniedByRootSig = true; } break; case DxilShaderVisibility::Geometry: if ((m_RootSignatureFlags & DxilRootSignatureFlags::DenyGeometryShaderRootAccess) != DxilRootSignatureFlags::None) { bShaderDeniedByRootSig = true; } break; case DxilShaderVisibility::Pixel: if ((m_RootSignatureFlags & DxilRootSignatureFlags::DenyPixelShaderRootAccess) != DxilRootSignatureFlags::None) { bShaderDeniedByRootSig = true; } break; case DxilShaderVisibility::Amplification: if ((m_RootSignatureFlags & DxilRootSignatureFlags::DenyAmplificationShaderRootAccess) != DxilRootSignatureFlags::None) { bShaderDeniedByRootSig = true; } break; case DxilShaderVisibility::Mesh: if ((m_RootSignatureFlags & DxilRootSignatureFlags::DenyMeshShaderRootAccess) != DxilRootSignatureFlags::None) { bShaderDeniedByRootSig = true; } break; default: break; } bool bShaderHasRootBindings = false; for (unsigned iResource = 0; iResource < PSV.GetBindCount(); iResource++) { const PSVResourceBindInfo0 *pBindInfo0 = PSV.GetPSVResourceBindInfo0(iResource); DXASSERT_NOMSG(pBindInfo0); unsigned Space = pBindInfo0->Space; unsigned LB = pBindInfo0->LowerBound; unsigned UB = pBindInfo0->UpperBound; unsigned Num = (UB != UINT_MAX) ? (UB - LB + 1) : 1; PSVResourceType ResType = (PSVResourceType)pBindInfo0->ResType; switch (ResType) { case PSVResourceType::Sampler: { bShaderHasRootBindings = true; auto pCoveringRange = FindCoveringInterval( DxilDescriptorRangeType::Sampler, VisType, Num, LB, Space); if (!pCoveringRange) { EAT(DiagPrinter << "Shader sampler descriptor range (RegisterSpace=" << Space << ", NumDescriptors=" << Num << ", BaseShaderRegister=" << LB << ") is not fully bound in root signature.\n"); } break; } case PSVResourceType::SRVTyped: case PSVResourceType::SRVRaw: case PSVResourceType::SRVStructured: { bShaderHasRootBindings = true; auto pCoveringRange = FindCoveringInterval(DxilDescriptorRangeType::SRV, VisType, Num, LB, Space); if (pCoveringRange) { if (pCoveringRange->nt == ROOT_DESCRIPTOR && ResType == PSVResourceType::SRVTyped) { EAT(DiagPrinter << "A Shader is declaring a resource object as a texture using " << "a register mapped to a root descriptor SRV (RegisterSpace=" << Space << ", ShaderRegister=" << LB << "). " << "SRV or UAV root descriptors can only be Raw or Structured " "buffers.\n"); } } else { EAT(DiagPrinter << "Shader SRV descriptor range (RegisterSpace=" << Space << ", NumDescriptors=" << Num << ", BaseShaderRegister=" << LB << ") is not fully bound in root signature.\n"); } break; } case PSVResourceType::UAVTyped: case PSVResourceType::UAVRaw: case PSVResourceType::UAVStructured: case PSVResourceType::UAVStructuredWithCounter: { bShaderHasRootBindings = true; auto pCoveringRange = FindCoveringInterval(DxilDescriptorRangeType::UAV, VisType, Num, LB, Space); if (pCoveringRange) { if (pCoveringRange->nt == ROOT_DESCRIPTOR) { if (ResType == PSVResourceType::UAVTyped) { EAT(DiagPrinter << "A shader is declaring a typed UAV using a register mapped " << "to a root descriptor UAV (RegisterSpace=" << Space << ", ShaderRegister=" << LB << "). " << "SRV or UAV root descriptors can only be Raw or Structured " "buffers.\n"); } if (ResType == PSVResourceType::UAVStructuredWithCounter) { EAT(DiagPrinter << "A Shader is declaring a structured UAV with counter using " << "a register mapped to a root descriptor UAV (RegisterSpace=" << Space << ", ShaderRegister=" << LB << "). " << "SRV or UAV root descriptors can only be Raw or Structured " "buffers.\n"); } } } else { EAT(DiagPrinter << "Shader UAV descriptor range (RegisterSpace=" << Space << ", NumDescriptors=" << Num << ", BaseShaderRegister=" << LB << ") is not fully bound in root signature.\n"); } break; } case PSVResourceType::CBV: { bShaderHasRootBindings = true; auto pCoveringRange = FindCoveringInterval(DxilDescriptorRangeType::CBV, VisType, Num, LB, Space); if (!pCoveringRange) { EAT(DiagPrinter << "Shader CBV descriptor range (RegisterSpace=" << Space << ", NumDescriptors=" << Num << ", BaseShaderRegister=" << LB << ") is not fully bound in root signature.\n"); } break; } default: break; } } if (bShaderHasRootBindings && bShaderDeniedByRootSig) { EAT(DiagPrinter << "Shader has root bindings but root signature uses a DENY flag " << "to disallow root binding access to the shader stage.\n"); } } BOOL isNaN(const float &a) { static const unsigned exponentMask = 0x7f800000; static const unsigned mantissaMask = 0x007fffff; unsigned u = *(const unsigned *)&a; return (((u & exponentMask) == exponentMask) && (u & mantissaMask)); // NaN } static bool IsDxilTextureAddressMode(DxilTextureAddressMode v) { return DxilTextureAddressMode::Wrap <= v && v <= DxilTextureAddressMode::MirrorOnce; } static bool IsDxilComparisonFunc(DxilComparisonFunc v) { return DxilComparisonFunc::Never <= v && v <= DxilComparisonFunc::Always; } // This validation closely mirrors CCreateSamplerStateValidator's checks void StaticSamplerVerifier::Verify(const DxilStaticSamplerDesc *pDesc, DiagnosticPrinter &DiagPrinter) { if (!pDesc) { EAT(DiagPrinter << "Static sampler: A nullptr pSamplerDesc was specified.\n"); } bool bIsComparison = false; switch (pDesc->Filter) { case DxilFilter::MINIMUM_MIN_MAG_MIP_POINT: case DxilFilter::MINIMUM_MIN_MAG_POINT_MIP_LINEAR: case DxilFilter::MINIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT: case DxilFilter::MINIMUM_MIN_POINT_MAG_MIP_LINEAR: case DxilFilter::MINIMUM_MIN_LINEAR_MAG_MIP_POINT: case DxilFilter::MINIMUM_MIN_LINEAR_MAG_POINT_MIP_LINEAR: case DxilFilter::MINIMUM_MIN_MAG_LINEAR_MIP_POINT: case DxilFilter::MINIMUM_MIN_MAG_MIP_LINEAR: case DxilFilter::MINIMUM_ANISOTROPIC: case DxilFilter::MAXIMUM_MIN_MAG_MIP_POINT: case DxilFilter::MAXIMUM_MIN_MAG_POINT_MIP_LINEAR: case DxilFilter::MAXIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT: case DxilFilter::MAXIMUM_MIN_POINT_MAG_MIP_LINEAR: case DxilFilter::MAXIMUM_MIN_LINEAR_MAG_MIP_POINT: case DxilFilter::MAXIMUM_MIN_LINEAR_MAG_POINT_MIP_LINEAR: case DxilFilter::MAXIMUM_MIN_MAG_LINEAR_MIP_POINT: case DxilFilter::MAXIMUM_MIN_MAG_MIP_LINEAR: case DxilFilter::MAXIMUM_ANISOTROPIC: break; case DxilFilter::MIN_MAG_MIP_POINT: case DxilFilter::MIN_MAG_POINT_MIP_LINEAR: case DxilFilter::MIN_POINT_MAG_LINEAR_MIP_POINT: case DxilFilter::MIN_POINT_MAG_MIP_LINEAR: case DxilFilter::MIN_LINEAR_MAG_MIP_POINT: case DxilFilter::MIN_LINEAR_MAG_POINT_MIP_LINEAR: case DxilFilter::MIN_MAG_LINEAR_MIP_POINT: case DxilFilter::MIN_MAG_MIP_LINEAR: case DxilFilter::ANISOTROPIC: break; case DxilFilter::COMPARISON_MIN_MAG_MIP_POINT: case DxilFilter::COMPARISON_MIN_MAG_POINT_MIP_LINEAR: case DxilFilter::COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT: case DxilFilter::COMPARISON_MIN_POINT_MAG_MIP_LINEAR: case DxilFilter::COMPARISON_MIN_LINEAR_MAG_MIP_POINT: case DxilFilter::COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR: case DxilFilter::COMPARISON_MIN_MAG_LINEAR_MIP_POINT: case DxilFilter::COMPARISON_MIN_MAG_MIP_LINEAR: case DxilFilter::COMPARISON_ANISOTROPIC: bIsComparison = true; break; default: EAT(DiagPrinter << "Static sampler: Filter unrecognized.\n"); } if (!IsDxilTextureAddressMode(pDesc->AddressU)) { EAT(DiagPrinter << "Static sampler: AddressU unrecognized.\n"); } if (!IsDxilTextureAddressMode(pDesc->AddressV)) { EAT(DiagPrinter << "Static sampler: AddressV unrecognized.\n"); } if (!IsDxilTextureAddressMode(pDesc->AddressW)) { EAT(DiagPrinter << "Static sampler: AddressW unrecognized.\n"); } if (isNaN(pDesc->MipLODBias) || (pDesc->MipLODBias < DxilMipLodBiaxMin) || (pDesc->MipLODBias > DxilMipLodBiaxMax)) { EAT(DiagPrinter << "Static sampler: MipLODBias must be in the " << "range [" << DxilMipLodBiaxMin << " to " << DxilMipLodBiaxMax << "]. " << pDesc->MipLODBias << "specified.\n"); } if (pDesc->MaxAnisotropy > DxilMapAnisotropy) { EAT(DiagPrinter << "Static sampler: MaxAnisotropy must be in " << "the range [0 to " << DxilMapAnisotropy << "]. " << pDesc->MaxAnisotropy << " specified.\n"); } if (bIsComparison && !IsDxilComparisonFunc(pDesc->ComparisonFunc)) { EAT(DiagPrinter << "Static sampler: ComparisonFunc unrecognized."); } if (isNaN(pDesc->MinLOD)) { EAT(DiagPrinter << "Static sampler: MinLOD be in the range [-INF to +INF]. " << pDesc->MinLOD << " specified.\n"); } if (isNaN(pDesc->MaxLOD)) { EAT(DiagPrinter << "Static sampler: MaxLOD be in the range [-INF to +INF]. " << pDesc->MaxLOD << " specified.\n"); } } static DxilShaderVisibility GetVisibilityType(DXIL::ShaderKind ShaderKind) { switch (ShaderKind) { case DXIL::ShaderKind::Pixel: return DxilShaderVisibility::Pixel; case DXIL::ShaderKind::Vertex: return DxilShaderVisibility::Vertex; case DXIL::ShaderKind::Geometry: return DxilShaderVisibility::Geometry; case DXIL::ShaderKind::Hull: return DxilShaderVisibility::Hull; case DXIL::ShaderKind::Domain: return DxilShaderVisibility::Domain; case DXIL::ShaderKind::Amplification: return DxilShaderVisibility::Amplification; case DXIL::ShaderKind::Mesh: return DxilShaderVisibility::Mesh; default: return DxilShaderVisibility::All; } } bool VerifyRootSignatureWithShaderPSV( const DxilVersionedRootSignatureDesc *pDesc, DXIL::ShaderKind ShaderKind, const void *pPSVData, uint32_t PSVSize, llvm::raw_ostream &DiagStream) { try { RootSignatureVerifier RSV; DiagnosticPrinterRawOStream DiagPrinter(DiagStream); RSV.VerifyRootSignature(pDesc, DiagPrinter); RSV.VerifyShader(GetVisibilityType(ShaderKind), pPSVData, PSVSize, DiagPrinter); } catch (...) { return false; } return true; } bool VerifyRootSignature(const DxilVersionedRootSignatureDesc *pDesc, llvm::raw_ostream &DiagStream, bool bAllowReservedRegisterSpace) { try { RootSignatureVerifier RSV; RSV.AllowReservedRegisterSpace(bAllowReservedRegisterSpace); DiagnosticPrinterRawOStream DiagPrinter(DiagStream); RSV.VerifyRootSignature(pDesc, DiagPrinter); } catch (...) { return false; } return true; } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilRootSignature/DxilRootSignature.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilRootSignature.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides support for manipulating root signature structures. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DxilRootSignature/DxilRootSignature.h" #include "dxc/DXIL/DxilConstants.h" #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/WinFunctions.h" #include "dxc/Support/WinIncludes.h" #include "dxc/dxcapi.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <set> #include <string> #include <utility> #include <vector> #include "DxilRootSignatureHelper.h" using namespace llvm; using std::string; namespace hlsl { ////////////////////////////////////////////////////////////////////////////// // Root signature handler. RootSignatureHandle::RootSignatureHandle(RootSignatureHandle &&other) { m_pDesc = nullptr; m_pSerialized = nullptr; std::swap(m_pDesc, other.m_pDesc); std::swap(m_pSerialized, other.m_pSerialized); } void RootSignatureHandle::Assign(const DxilVersionedRootSignatureDesc *pDesc, IDxcBlob *pSerialized) { Clear(); m_pDesc = pDesc; m_pSerialized = pSerialized; if (m_pSerialized) m_pSerialized->AddRef(); } void RootSignatureHandle::Clear() { hlsl::DeleteRootSignature(m_pDesc); m_pDesc = nullptr; if (m_pSerialized != nullptr) { m_pSerialized->Release(); m_pSerialized = nullptr; } } const uint8_t *RootSignatureHandle::GetSerializedBytes() const { DXASSERT_NOMSG(m_pSerialized != nullptr); return (uint8_t *)m_pSerialized->GetBufferPointer(); } unsigned RootSignatureHandle::GetSerializedSize() const { DXASSERT_NOMSG(m_pSerialized != nullptr); return m_pSerialized->GetBufferSize(); } void RootSignatureHandle::EnsureSerializedAvailable() { DXASSERT_NOMSG(!IsEmpty()); if (m_pSerialized == nullptr) { CComPtr<IDxcBlob> pResult; hlsl::SerializeRootSignature(m_pDesc, &pResult, nullptr, false); IFTBOOL(pResult != nullptr, E_FAIL); m_pSerialized = pResult.Detach(); } } void RootSignatureHandle::Deserialize() { DXASSERT_NOMSG(m_pSerialized && !m_pDesc); DeserializeRootSignature((uint8_t *)m_pSerialized->GetBufferPointer(), (uint32_t)m_pSerialized->GetBufferSize(), &m_pDesc); } void RootSignatureHandle::LoadSerialized(const uint8_t *pData, unsigned length) { DXASSERT_NOMSG(IsEmpty()); IDxcBlob *pCreated; IFT(DxcCreateBlobOnHeapCopy(pData, length, &pCreated)); m_pSerialized = pCreated; } ////////////////////////////////////////////////////////////////////////////// namespace root_sig_helper { // GetFlags/SetFlags overloads. DxilRootDescriptorFlags GetFlags(const DxilRootDescriptor &) { // Upconvert root parameter flags to be volatile. return DxilRootDescriptorFlags::DataVolatile; } void SetFlags(DxilRootDescriptor &, DxilRootDescriptorFlags) { // Drop the flags; none existed in rs_1_0. } DxilRootDescriptorFlags GetFlags(const DxilRootDescriptor1 &D) { return D.Flags; } void SetFlags(DxilRootDescriptor1 &D, DxilRootDescriptorFlags Flags) { D.Flags = Flags; } void SetFlags(DxilContainerRootDescriptor1 &D, DxilRootDescriptorFlags Flags) { D.Flags = (uint32_t)Flags; } DxilDescriptorRangeFlags GetFlags(const DxilDescriptorRange &D) { // Upconvert range flags to be volatile. DxilDescriptorRangeFlags Flags = DxilDescriptorRangeFlags::DescriptorsVolatile; // Sampler does not have data. if (D.RangeType != DxilDescriptorRangeType::Sampler) Flags = (DxilDescriptorRangeFlags)((unsigned)Flags | (unsigned) DxilDescriptorRangeFlags::DataVolatile); return Flags; } void SetFlags(DxilDescriptorRange &, DxilDescriptorRangeFlags) {} DxilDescriptorRangeFlags GetFlags(const DxilContainerDescriptorRange &D) { // Upconvert range flags to be volatile. DxilDescriptorRangeFlags Flags = DxilDescriptorRangeFlags::DescriptorsVolatile; // Sampler does not have data. if (D.RangeType != (uint32_t)DxilDescriptorRangeType::Sampler) Flags |= DxilDescriptorRangeFlags::DataVolatile; return Flags; } void SetFlags(DxilContainerDescriptorRange &, DxilDescriptorRangeFlags) {} DxilDescriptorRangeFlags GetFlags(const DxilDescriptorRange1 &D) { return D.Flags; } void SetFlags(DxilDescriptorRange1 &D, DxilDescriptorRangeFlags Flags) { D.Flags = Flags; } DxilDescriptorRangeFlags GetFlags(const DxilContainerDescriptorRange1 &D) { return (DxilDescriptorRangeFlags)D.Flags; } void SetFlags(DxilContainerDescriptorRange1 &D, DxilDescriptorRangeFlags Flags) { D.Flags = (uint32_t)Flags; } } // namespace root_sig_helper ////////////////////////////////////////////////////////////////////////////// template <typename T> void DeleteRootSignatureTemplate(const T &RS) { for (unsigned i = 0; i < RS.NumParameters; i++) { const auto &P = RS.pParameters[i]; if (P.ParameterType == DxilRootParameterType::DescriptorTable) { delete[] P.DescriptorTable.pDescriptorRanges; } } delete[] RS.pParameters; delete[] RS.pStaticSamplers; } void DeleteRootSignature(const DxilVersionedRootSignatureDesc *pRootSignature) { if (pRootSignature == nullptr) return; switch (pRootSignature->Version) { case DxilRootSignatureVersion::Version_1_0: DeleteRootSignatureTemplate<DxilRootSignatureDesc>( pRootSignature->Desc_1_0); break; case DxilRootSignatureVersion::Version_1_1: default: DXASSERT(pRootSignature->Version == DxilRootSignatureVersion::Version_1_1, "else version is incorrect"); DeleteRootSignatureTemplate<DxilRootSignatureDesc1>( pRootSignature->Desc_1_1); break; } delete pRootSignature; } namespace { // Dump root sig. void printRootSigFlags(DxilRootSignatureFlags Flags, raw_ostream &os) { if (Flags == DxilRootSignatureFlags::None) return; unsigned UFlags = (unsigned)Flags; std::pair<unsigned, std::string> FlagTable[] = { {unsigned(DxilRootSignatureFlags::AllowInputAssemblerInputLayout), "ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT"}, {unsigned(DxilRootSignatureFlags::DenyVertexShaderRootAccess), "DenyVertexShaderRootAccess"}, {unsigned(DxilRootSignatureFlags::DenyHullShaderRootAccess), "DenyHullShaderRootAccess"}, {unsigned(DxilRootSignatureFlags::DenyDomainShaderRootAccess), "DenyDomainShaderRootAccess"}, {unsigned(DxilRootSignatureFlags::DenyGeometryShaderRootAccess), "DenyGeometryShaderRootAccess"}, {unsigned(DxilRootSignatureFlags::DenyPixelShaderRootAccess), "DenyPixelShaderRootAccess"}, {unsigned(DxilRootSignatureFlags::AllowStreamOutput), "AllowStreamOutput"}, {unsigned(DxilRootSignatureFlags::LocalRootSignature), "LocalRootSignature"}, {unsigned(DxilRootSignatureFlags::DenyAmplificationShaderRootAccess), "DenyAmplificationShaderRootAccess"}, {unsigned(DxilRootSignatureFlags::DenyMeshShaderRootAccess), "DenyMeshShaderRootAccess"}, {unsigned(DxilRootSignatureFlags::CBVSRVUAVHeapDirectlyIndexed), "CBV_SRV_UAV_HEAP_DIRECTLY_INDEXED"}, {unsigned(DxilRootSignatureFlags::SamplerHeapDirectlyIndexed), "SAMPLER_HEAP_DIRECTLY_INDEXED"}, {unsigned(DxilRootSignatureFlags::AllowLowTierReservedHwCbLimit), "AllowLowTierReservedHwCbLimit"}, }; os << "RootFlags("; SmallVector<std::string, 4> FlagStrs; for (auto &f : FlagTable) { if (UFlags & f.first) FlagStrs.emplace_back(f.second); } auto it = FlagStrs.begin(); os << *(it++); for (; it != FlagStrs.end(); it++) { os << "|" << *it; } os << "),"; } void printDesc(unsigned Reg, unsigned Space, unsigned Size, raw_ostream &os) { os << Reg; if (Space) os << ", space=" << Space; if (Size && Size != 1) os << ", numDescriptors =" << Size; } void printDescType(DxilDescriptorRangeType Ty, raw_ostream &os) { switch (Ty) { case DxilDescriptorRangeType::CBV: { os << "CBV(b"; } break; case DxilDescriptorRangeType::Sampler: { os << "Sampler(s"; } break; case DxilDescriptorRangeType::UAV: { os << "UAV(u"; } break; case DxilDescriptorRangeType::SRV: { os << "SRV(t"; } break; } } template <typename RangeTy> void printDescRange(RangeTy &R, raw_ostream &os) { printDescType(R.RangeType, os); printDesc(R.BaseShaderRegister, R.RegisterSpace, R.NumDescriptors, os); os << ")"; } template <typename TableTy> void printDescTable(TableTy &Tab, raw_ostream &os) { for (unsigned i = 0; i < Tab.NumDescriptorRanges; i++) { auto *pRange = Tab.pDescriptorRanges + i; printDescRange(*pRange, os); os << ","; } } void printVisibility(DxilShaderVisibility v, raw_ostream &os) { switch (v) { default: break; case DxilShaderVisibility::Amplification: os << ",visibility=SHADER_VISIBILITY_AMPLIFICATION"; break; case DxilShaderVisibility::Domain: os << ",visibility=SHADER_VISIBILITY_DOMAIN"; break; case DxilShaderVisibility::Geometry: os << ",visibility=SHADER_VISIBILITY_GEOMETRY"; break; case DxilShaderVisibility::Hull: os << ",visibility=SHADER_VISIBILITY_HULL"; break; case DxilShaderVisibility::Mesh: os << ",visibility=SHADER_VISIBILITY_MESH"; break; case DxilShaderVisibility::Pixel: os << ",visibility=SHADER_VISIBILITY_PIXEL"; break; case DxilShaderVisibility::Vertex: os << ",visibility=SHADER_VISIBILITY_VERTEX"; break; } } template <typename ParamTy> void printRootParam(ParamTy &Param, raw_ostream &os) { switch (Param.ParameterType) { case DxilRootParameterType::CBV: printDescType(DxilDescriptorRangeType::CBV, os); printDesc(Param.Descriptor.ShaderRegister, Param.Descriptor.RegisterSpace, 0, os); break; case DxilRootParameterType::SRV: printDescType(DxilDescriptorRangeType::SRV, os); printDesc(Param.Descriptor.ShaderRegister, Param.Descriptor.RegisterSpace, 0, os); break; case DxilRootParameterType::UAV: printDescType(DxilDescriptorRangeType::UAV, os); printDesc(Param.Descriptor.ShaderRegister, Param.Descriptor.RegisterSpace, 0, os); break; case DxilRootParameterType::Constants32Bit: os << "RootConstants(num32BitConstants=" << Param.Constants.Num32BitValues << "b"; printDesc(Param.Constants.ShaderRegister, Param.Constants.RegisterSpace, 0, os); break; case DxilRootParameterType::DescriptorTable: os << "DescriptorTable("; printDescTable(Param.DescriptorTable, os); break; } printVisibility(Param.ShaderVisibility, os); os << ")"; } void printSampler(DxilStaticSamplerDesc &Sampler, raw_ostream &os) { // StaticSampler(s4, filter=FILTER_MIN_MAG_MIP_LINEAR) os << "StaticSampler(s" << Sampler.ShaderRegister << ", space=" << Sampler.RegisterSpace; // TODO: set the fileds. printVisibility(Sampler.ShaderVisibility, os); os << ")"; } template <typename DescTy> void printRootSig(DescTy &RS, raw_ostream &os) { printRootSigFlags(RS.Flags, os); for (unsigned i = 0; i < RS.NumParameters; i++) { auto *pParam = RS.pParameters + i; printRootParam(*pParam, os); os << ","; } for (unsigned i = 0; i < RS.NumStaticSamplers; i++) { auto *pSampler = RS.pStaticSamplers + i; printSampler(*pSampler, os); os << ","; } } } // namespace void printRootSignature(const DxilVersionedRootSignatureDesc &RS, raw_ostream &os) { switch (RS.Version) { case DxilRootSignatureVersion::Version_1_0: printRootSig(RS.Desc_1_0, os); break; case DxilRootSignatureVersion::Version_1_1: default: DXASSERT(RS.Version == DxilRootSignatureVersion::Version_1_1, "else version is incorrect"); printRootSig(RS.Desc_1_1, os); break; } os.flush(); } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilRootSignature/DxilRootSignatureConvert.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilRootSignatureConvert.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Convert root signature structures. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilConstants.h" #include "dxc/DxilRootSignature/DxilRootSignature.h" #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/WinFunctions.h" #include "dxc/Support/WinIncludes.h" #include "dxc/dxcapi.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <set> #include <string> #include <utility> #include <vector> #include "DxilRootSignatureHelper.h" using namespace llvm; using std::string; namespace hlsl { using namespace root_sig_helper; ////////////////////////////////////////////////////////////////////////////// template <typename IN_DXIL_ROOT_SIGNATURE_DESC, typename OUT_DXIL_ROOT_SIGNATURE_DESC, typename OUT_DXIL_ROOT_PARAMETER, typename OUT_DXIL_ROOT_DESCRIPTOR, typename OUT_DXIL_DESCRIPTOR_RANGE> void ConvertRootSignatureTemplate(const IN_DXIL_ROOT_SIGNATURE_DESC &DescIn, DxilRootSignatureVersion DescVersionOut, OUT_DXIL_ROOT_SIGNATURE_DESC &DescOut) { const IN_DXIL_ROOT_SIGNATURE_DESC *pDescIn = &DescIn; OUT_DXIL_ROOT_SIGNATURE_DESC *pDescOut = &DescOut; // Root signature descriptor. pDescOut->Flags = pDescIn->Flags; pDescOut->NumParameters = 0; pDescOut->NumStaticSamplers = 0; // Intialize all pointers early so that clean up works properly. pDescOut->pParameters = nullptr; pDescOut->pStaticSamplers = nullptr; // Root signature parameters. if (pDescIn->NumParameters > 0) { pDescOut->pParameters = new OUT_DXIL_ROOT_PARAMETER[pDescIn->NumParameters]; pDescOut->NumParameters = pDescIn->NumParameters; memset((void *)pDescOut->pParameters, 0, pDescOut->NumParameters * sizeof(OUT_DXIL_ROOT_PARAMETER)); } for (unsigned iRP = 0; iRP < pDescIn->NumParameters; iRP++) { const auto &ParamIn = pDescIn->pParameters[iRP]; OUT_DXIL_ROOT_PARAMETER &ParamOut = (OUT_DXIL_ROOT_PARAMETER &)pDescOut->pParameters[iRP]; ParamOut.ParameterType = ParamIn.ParameterType; ParamOut.ShaderVisibility = ParamIn.ShaderVisibility; switch (ParamIn.ParameterType) { case DxilRootParameterType::DescriptorTable: { ParamOut.DescriptorTable.pDescriptorRanges = nullptr; unsigned NumRanges = ParamIn.DescriptorTable.NumDescriptorRanges; if (NumRanges > 0) { ParamOut.DescriptorTable.pDescriptorRanges = new OUT_DXIL_DESCRIPTOR_RANGE[NumRanges]; ParamOut.DescriptorTable.NumDescriptorRanges = NumRanges; } for (unsigned i = 0; i < NumRanges; i++) { const auto &RangeIn = ParamIn.DescriptorTable.pDescriptorRanges[i]; OUT_DXIL_DESCRIPTOR_RANGE &RangeOut = (OUT_DXIL_DESCRIPTOR_RANGE &) ParamOut.DescriptorTable.pDescriptorRanges[i]; RangeOut.RangeType = RangeIn.RangeType; RangeOut.NumDescriptors = RangeIn.NumDescriptors; RangeOut.BaseShaderRegister = RangeIn.BaseShaderRegister; RangeOut.RegisterSpace = RangeIn.RegisterSpace; RangeOut.OffsetInDescriptorsFromTableStart = RangeIn.OffsetInDescriptorsFromTableStart; DxilDescriptorRangeFlags Flags = GetFlags(RangeIn); SetFlags(RangeOut, Flags); } break; } case DxilRootParameterType::Constants32Bit: { ParamOut.Constants.Num32BitValues = ParamIn.Constants.Num32BitValues; ParamOut.Constants.ShaderRegister = ParamIn.Constants.ShaderRegister; ParamOut.Constants.RegisterSpace = ParamIn.Constants.RegisterSpace; break; } case DxilRootParameterType::CBV: case DxilRootParameterType::SRV: case DxilRootParameterType::UAV: { ParamOut.Descriptor.ShaderRegister = ParamIn.Descriptor.ShaderRegister; ParamOut.Descriptor.RegisterSpace = ParamIn.Descriptor.RegisterSpace; DxilRootDescriptorFlags Flags = GetFlags(ParamIn.Descriptor); SetFlags(ParamOut.Descriptor, Flags); break; } default: IFT(E_FAIL); } } // Static samplers. if (pDescIn->NumStaticSamplers > 0) { pDescOut->pStaticSamplers = new DxilStaticSamplerDesc[pDescIn->NumStaticSamplers]; pDescOut->NumStaticSamplers = pDescIn->NumStaticSamplers; memcpy((void *)pDescOut->pStaticSamplers, pDescIn->pStaticSamplers, pDescOut->NumStaticSamplers * sizeof(DxilStaticSamplerDesc)); } } void ConvertRootSignature( const DxilVersionedRootSignatureDesc *pRootSignatureIn, DxilRootSignatureVersion RootSignatureVersionOut, const DxilVersionedRootSignatureDesc **ppRootSignatureOut) { IFTBOOL(pRootSignatureIn != nullptr && ppRootSignatureOut != nullptr, E_INVALIDARG); *ppRootSignatureOut = nullptr; if (pRootSignatureIn->Version == RootSignatureVersionOut) { // No conversion. Return the original root signature pointer; no cloning. *ppRootSignatureOut = pRootSignatureIn; return; } DxilVersionedRootSignatureDesc *pRootSignatureOut = nullptr; try { pRootSignatureOut = new DxilVersionedRootSignatureDesc(); memset(pRootSignatureOut, 0, sizeof(*pRootSignatureOut)); // Convert root signature. switch (RootSignatureVersionOut) { case DxilRootSignatureVersion::Version_1_0: switch (pRootSignatureIn->Version) { case DxilRootSignatureVersion::Version_1_1: pRootSignatureOut->Version = DxilRootSignatureVersion::Version_1_0; ConvertRootSignatureTemplate<DxilRootSignatureDesc1, DxilRootSignatureDesc, DxilRootParameter, DxilRootDescriptor, DxilDescriptorRange>( pRootSignatureIn->Desc_1_1, DxilRootSignatureVersion::Version_1_0, pRootSignatureOut->Desc_1_0); break; default: IFT(E_INVALIDARG); } break; case DxilRootSignatureVersion::Version_1_1: switch (pRootSignatureIn->Version) { case DxilRootSignatureVersion::Version_1_0: pRootSignatureOut->Version = DxilRootSignatureVersion::Version_1_1; ConvertRootSignatureTemplate<DxilRootSignatureDesc, DxilRootSignatureDesc1, DxilRootParameter1, DxilRootDescriptor1, DxilDescriptorRange1>( pRootSignatureIn->Desc_1_0, DxilRootSignatureVersion::Version_1_1, pRootSignatureOut->Desc_1_1); break; default: IFT(E_INVALIDARG); } break; default: IFT(E_INVALIDARG); break; } } catch (...) { DeleteRootSignature(pRootSignatureOut); throw; } *ppRootSignatureOut = pRootSignatureOut; } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilRootSignature/CMakeLists.txt
# Copyright (C) Microsoft Corporation. All rights reserved. # This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. add_llvm_library(LLVMDxilRootSignature DxilRootSignature.cpp DxilRootSignatureConvert.cpp DxilRootSignatureSerializer.cpp DxilRootSignatureValidator.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/IR ) add_dependencies(LLVMDxilRootSignature intrinsics_gen)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilRootSignature/LLVMBuild.txt
; Copyright (C) Microsoft Corporation. All rights reserved. ; This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = DxilRootSignature parent = Libraries required_libraries = BitReader Core DXIL DxilContainer DxcSupport IPA Support
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilRootSignature/DxilRootSignatureSerializer.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilRootSignatureSerializer.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Serializer for root signature structures. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilConstants.h" #include "dxc/DxilRootSignature/DxilRootSignature.h" #include "dxc/Support/FileIOHelper.h" #include "dxc/Support/Global.h" #include "dxc/Support/WinFunctions.h" #include "dxc/Support/WinIncludes.h" #include "dxc/dxcapi.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <set> #include <string> #include <utility> #include <vector> #include "DxilRootSignatureHelper.h" using namespace llvm; using std::string; namespace hlsl { using namespace root_sig_helper; ////////////////////////////////////////////////////////////////////////////// // Simple serializer. class SimpleSerializer { struct Segment { void *pData; unsigned cbSize; bool bOwner; unsigned Offset; Segment *pNext; }; public: SimpleSerializer(); ~SimpleSerializer(); HRESULT AddBlock(void *pData, unsigned cbSize, unsigned *pOffset); HRESULT ReserveBlock(void **ppData, unsigned cbSize, unsigned *pOffset); HRESULT Compact(char *pData, unsigned cbSize); unsigned GetSize(); protected: unsigned m_cbSegments; Segment *m_pSegment; Segment **m_ppSegment; }; SimpleSerializer::SimpleSerializer() { m_cbSegments = 0; m_pSegment = nullptr; m_ppSegment = &m_pSegment; } SimpleSerializer::~SimpleSerializer() { while (m_pSegment) { Segment *pSegment = m_pSegment; m_pSegment = pSegment->pNext; if (pSegment->bOwner) { delete[](char *) pSegment->pData; } delete pSegment; } } HRESULT SimpleSerializer::AddBlock(void *pData, unsigned cbSize, unsigned *pOffset) { Segment *pSegment = nullptr; IFRBOOL(!(cbSize != 0 && pData == nullptr), E_FAIL); IFROOM(pSegment = new (std::nothrow) Segment); pSegment->pData = pData; m_cbSegments = (m_cbSegments + 3) & ~3; pSegment->Offset = m_cbSegments; pSegment->cbSize = cbSize; pSegment->bOwner = false; pSegment->pNext = nullptr; m_cbSegments += pSegment->cbSize; *m_ppSegment = pSegment; m_ppSegment = &pSegment->pNext; if (pOffset != nullptr) { *pOffset = pSegment->Offset; } return S_OK; } HRESULT SimpleSerializer::ReserveBlock(void **ppData, unsigned cbSize, unsigned *pOffset) { HRESULT hr = S_OK; Segment *pSegment = nullptr; void *pClonedData = nullptr; IFCOOM(pSegment = new (std::nothrow) Segment); pSegment->pData = nullptr; IFCOOM(pClonedData = new (std::nothrow) char[cbSize]); pSegment->pData = pClonedData; m_cbSegments = (m_cbSegments + 3) & ~3; pSegment->Offset = m_cbSegments; pSegment->cbSize = cbSize; pSegment->bOwner = true; pSegment->pNext = nullptr; m_cbSegments += pSegment->cbSize; *m_ppSegment = pSegment; m_ppSegment = &pSegment->pNext; *ppData = pClonedData; if (pOffset) { memcpy(pOffset, &pSegment->Offset, sizeof(pSegment->Offset)); } Cleanup: if (FAILED(hr)) { delete[](char *) pClonedData; delete pSegment; } return hr; } HRESULT SimpleSerializer::Compact(char *pData, unsigned cbSize) { unsigned cb = GetSize(); IFRBOOL(cb <= cbSize, E_FAIL); DXASSERT_NOMSG(cb <= UINT32_MAX / 2); char *p = (char *)pData; cb = 0; for (Segment *pSegment = m_pSegment; pSegment; pSegment = pSegment->pNext) { unsigned cbAlign = ((cb + 3) & ~3) - cb; assert(p + cbAlign <= pData + cbSize); memset(p, 0xab, cbAlign); p += cbAlign; cb += cbAlign; assert(p + pSegment->cbSize <= pData + cbSize); memcpy(p, pSegment->pData, pSegment->cbSize); p += pSegment->cbSize; cb += pSegment->cbSize; } // Trailing zeros assert(p + cbSize - cb <= pData + cbSize); memset(p, 0xab, cbSize - cb); return S_OK; } unsigned SimpleSerializer::GetSize() { // Round up to 4==sizeof(unsigned). return ((m_cbSegments + 3) >> 2) * 4; } template <typename T_ROOT_SIGNATURE_DESC, typename T_ROOT_PARAMETER, typename T_ROOT_DESCRIPTOR_INTERNAL, typename T_DESCRIPTOR_RANGE_INTERNAL> void SerializeRootSignatureTemplate(const T_ROOT_SIGNATURE_DESC *pRootSignature, DxilRootSignatureVersion DescVersion, IDxcBlob **ppBlob, DiagnosticPrinter &DiagPrinter, bool bAllowReservedRegisterSpace) { DxilContainerRootSignatureDesc RS; uint32_t Offset; SimpleSerializer Serializer; IFT(Serializer.AddBlock(&RS, sizeof(RS), &Offset)); IFTBOOL(Offset == 0, E_FAIL); const T_ROOT_SIGNATURE_DESC *pRS = pRootSignature; RS.Version = (uint32_t)DescVersion; RS.Flags = (uint32_t)pRS->Flags; RS.NumParameters = pRS->NumParameters; RS.NumStaticSamplers = pRS->NumStaticSamplers; DxilContainerRootParameter *pRP; IFT(Serializer.ReserveBlock( (void **)&pRP, sizeof(DxilContainerRootParameter) * RS.NumParameters, &RS.RootParametersOffset)); for (uint32_t iRP = 0; iRP < RS.NumParameters; iRP++) { const T_ROOT_PARAMETER *pInRP = &pRS->pParameters[iRP]; DxilContainerRootParameter *pOutRP = &pRP[iRP]; pOutRP->ParameterType = (uint32_t)pInRP->ParameterType; pOutRP->ShaderVisibility = (uint32_t)pInRP->ShaderVisibility; switch (pInRP->ParameterType) { case DxilRootParameterType::DescriptorTable: { DxilContainerRootDescriptorTable *p1; IFT(Serializer.ReserveBlock((void **)&p1, sizeof(DxilContainerRootDescriptorTable), &pOutRP->PayloadOffset)); p1->NumDescriptorRanges = pInRP->DescriptorTable.NumDescriptorRanges; T_DESCRIPTOR_RANGE_INTERNAL *p2; IFT(Serializer.ReserveBlock((void **)&p2, sizeof(T_DESCRIPTOR_RANGE_INTERNAL) * p1->NumDescriptorRanges, &p1->DescriptorRangesOffset)); for (uint32_t i = 0; i < p1->NumDescriptorRanges; i++) { p2[i].RangeType = (uint32_t)pInRP->DescriptorTable.pDescriptorRanges[i].RangeType; p2[i].NumDescriptors = pInRP->DescriptorTable.pDescriptorRanges[i].NumDescriptors; p2[i].BaseShaderRegister = pInRP->DescriptorTable.pDescriptorRanges[i].BaseShaderRegister; p2[i].RegisterSpace = pInRP->DescriptorTable.pDescriptorRanges[i].RegisterSpace; p2[i].OffsetInDescriptorsFromTableStart = pInRP->DescriptorTable.pDescriptorRanges[i] .OffsetInDescriptorsFromTableStart; DxilDescriptorRangeFlags Flags = GetFlags(pInRP->DescriptorTable.pDescriptorRanges[i]); SetFlags(p2[i], Flags); } break; } case DxilRootParameterType::Constants32Bit: { DxilRootConstants *p; IFT(Serializer.ReserveBlock((void **)&p, sizeof(DxilRootConstants), &pOutRP->PayloadOffset)); p->Num32BitValues = pInRP->Constants.Num32BitValues; p->ShaderRegister = pInRP->Constants.ShaderRegister; p->RegisterSpace = pInRP->Constants.RegisterSpace; break; } case DxilRootParameterType::CBV: case DxilRootParameterType::SRV: case DxilRootParameterType::UAV: { T_ROOT_DESCRIPTOR_INTERNAL *p; IFT(Serializer.ReserveBlock((void **)&p, sizeof(T_ROOT_DESCRIPTOR_INTERNAL), &pOutRP->PayloadOffset)); p->ShaderRegister = pInRP->Descriptor.ShaderRegister; p->RegisterSpace = pInRP->Descriptor.RegisterSpace; DxilRootDescriptorFlags Flags = GetFlags(pInRP->Descriptor); SetFlags(*p, Flags); break; } default: EAT(DiagPrinter << "D3DSerializeRootSignature: unknown root parameter type (" << (uint32_t)pInRP->ParameterType << ")\n"); } } DxilStaticSamplerDesc *pSS; unsigned StaticSamplerSize = sizeof(DxilStaticSamplerDesc) * RS.NumStaticSamplers; IFT(Serializer.ReserveBlock((void **)&pSS, StaticSamplerSize, &RS.StaticSamplersOffset)); if (StaticSamplerSize > 0) memcpy(pSS, pRS->pStaticSamplers, StaticSamplerSize); // Create the result blob. CDxcMallocHeapPtr<char> bytes(DxcGetThreadMallocNoRef()); CComPtr<IDxcBlob> pBlob; unsigned cb = Serializer.GetSize(); DXASSERT_NOMSG((cb & 0x3) == 0); IFTBOOL(bytes.Allocate(cb), E_OUTOFMEMORY); IFT(Serializer.Compact(bytes.m_pData, cb)); IFT(DxcCreateBlobOnMalloc(bytes.m_pData, bytes.GetMallocNoRef(), cb, ppBlob)); bytes.Detach(); // Ownership transfered to ppBlob. } void SerializeRootSignature( const DxilVersionedRootSignatureDesc *pRootSignature, IDxcBlob **ppBlob, IDxcBlobEncoding **ppErrorBlob, bool bAllowReservedRegisterSpace) { DXASSERT_NOMSG(pRootSignature != nullptr); DXASSERT_NOMSG(ppBlob != nullptr); DXASSERT_NOMSG(ppErrorBlob != nullptr); *ppBlob = nullptr; *ppErrorBlob = nullptr; // TODO: change SerializeRootSignature to take raw_ostream& string DiagString; raw_string_ostream DiagStream(DiagString); DiagnosticPrinterRawOStream DiagPrinter(DiagStream); // Verify root signature. if (!VerifyRootSignature(pRootSignature, DiagStream, bAllowReservedRegisterSpace)) { DiagStream.flush(); DxcCreateBlobWithEncodingOnHeapCopy(DiagString.c_str(), DiagString.size(), CP_UTF8, ppErrorBlob); return; } try { switch (pRootSignature->Version) { case DxilRootSignatureVersion::Version_1_0: SerializeRootSignatureTemplate<DxilRootSignatureDesc, DxilRootParameter, DxilRootDescriptor, DxilContainerDescriptorRange>( &pRootSignature->Desc_1_0, DxilRootSignatureVersion::Version_1_0, ppBlob, DiagPrinter, bAllowReservedRegisterSpace); break; case DxilRootSignatureVersion::Version_1_1: default: DXASSERT(pRootSignature->Version == DxilRootSignatureVersion::Version_1_1, "else VerifyRootSignature didn't validate"); SerializeRootSignatureTemplate<DxilRootSignatureDesc1, DxilRootParameter1, DxilContainerRootDescriptor1, DxilContainerDescriptorRange1>( &pRootSignature->Desc_1_1, DxilRootSignatureVersion::Version_1_1, ppBlob, DiagPrinter, bAllowReservedRegisterSpace); break; } } catch (...) { DiagStream.flush(); DxcCreateBlobWithEncodingOnHeapCopy(DiagString.c_str(), DiagString.size(), CP_UTF8, ppErrorBlob); } } template <typename T_ROOT_SIGNATURE_DESC, typename T_ROOT_PARAMETER, typename T_ROOT_DESCRIPTOR, typename T_ROOT_DESCRIPTOR_INTERNAL, typename T_DESCRIPTOR_RANGE, typename T_DESCRIPTOR_RANGE_INTERNAL> void DeserializeRootSignatureTemplate( const void *pSrcData, uint32_t SrcDataSizeInBytes, DxilRootSignatureVersion DescVersion, T_ROOT_SIGNATURE_DESC &RootSignatureDesc) { // Note that in case of failure, outside code must deallocate memory. T_ROOT_SIGNATURE_DESC *pRootSignature = &RootSignatureDesc; const char *pData = (const char *)pSrcData; const char *pMaxPtr = pData + SrcDataSizeInBytes; UNREFERENCED_PARAMETER(DescVersion); DXASSERT_NOMSG(((const uint32_t *)pData)[0] == (uint32_t)DescVersion); // Root signature. IFTBOOL(pData + sizeof(DxilContainerRootSignatureDesc) <= pMaxPtr, E_FAIL); const DxilContainerRootSignatureDesc *pRS = (const DxilContainerRootSignatureDesc *)pData; pRootSignature->Flags = (DxilRootSignatureFlags)pRS->Flags; pRootSignature->NumParameters = pRS->NumParameters; pRootSignature->NumStaticSamplers = pRS->NumStaticSamplers; // Intialize all pointers early so that clean up works properly. pRootSignature->pParameters = nullptr; pRootSignature->pStaticSamplers = nullptr; size_t s = sizeof(DxilContainerRootParameter) * pRS->NumParameters; const DxilContainerRootParameter *pInRTS = (const DxilContainerRootParameter *)(pData + pRS->RootParametersOffset); IFTBOOL(((const char *)pInRTS) + s <= pMaxPtr, E_FAIL); if (pRootSignature->NumParameters) { pRootSignature->pParameters = new T_ROOT_PARAMETER[pRootSignature->NumParameters]; memset((void *)pRootSignature->pParameters, 0, pRootSignature->NumParameters * sizeof(T_ROOT_PARAMETER)); } for (unsigned iRP = 0; iRP < pRootSignature->NumParameters; iRP++) { DxilRootParameterType ParameterType = (DxilRootParameterType)pInRTS[iRP].ParameterType; T_ROOT_PARAMETER *pOutRTS = (T_ROOT_PARAMETER *)&pRootSignature->pParameters[iRP]; pOutRTS->ParameterType = ParameterType; pOutRTS->ShaderVisibility = (DxilShaderVisibility)pInRTS[iRP].ShaderVisibility; switch (ParameterType) { case DxilRootParameterType::DescriptorTable: { const DxilContainerRootDescriptorTable *p1 = (const DxilContainerRootDescriptorTable *)(pData + pInRTS[iRP].PayloadOffset); IFTBOOL((const char *)p1 + sizeof(DxilContainerRootDescriptorTable) <= pMaxPtr, E_FAIL); pOutRTS->DescriptorTable.NumDescriptorRanges = p1->NumDescriptorRanges; pOutRTS->DescriptorTable.pDescriptorRanges = nullptr; const T_DESCRIPTOR_RANGE_INTERNAL *p2 = (const T_DESCRIPTOR_RANGE_INTERNAL *)(pData + p1->DescriptorRangesOffset); IFTBOOL((const char *)p2 + sizeof(T_DESCRIPTOR_RANGE_INTERNAL) <= pMaxPtr, E_FAIL); if (p1->NumDescriptorRanges) { pOutRTS->DescriptorTable.pDescriptorRanges = new T_DESCRIPTOR_RANGE[p1->NumDescriptorRanges]; } for (unsigned i = 0; i < p1->NumDescriptorRanges; i++) { T_DESCRIPTOR_RANGE *p3 = (T_DESCRIPTOR_RANGE *)&pOutRTS->DescriptorTable .pDescriptorRanges[i]; p3->RangeType = (DxilDescriptorRangeType)p2[i].RangeType; p3->NumDescriptors = p2[i].NumDescriptors; p3->BaseShaderRegister = p2[i].BaseShaderRegister; p3->RegisterSpace = p2[i].RegisterSpace; p3->OffsetInDescriptorsFromTableStart = p2[i].OffsetInDescriptorsFromTableStart; DxilDescriptorRangeFlags Flags = GetFlags(p2[i]); SetFlags(*p3, Flags); } break; } case DxilRootParameterType::Constants32Bit: { const DxilRootConstants *p = (const DxilRootConstants *)(pData + pInRTS[iRP].PayloadOffset); IFTBOOL((const char *)p + sizeof(DxilRootConstants) <= pMaxPtr, E_FAIL); pOutRTS->Constants.Num32BitValues = p->Num32BitValues; pOutRTS->Constants.ShaderRegister = p->ShaderRegister; pOutRTS->Constants.RegisterSpace = p->RegisterSpace; break; } case DxilRootParameterType::CBV: case DxilRootParameterType::SRV: case DxilRootParameterType::UAV: { const T_ROOT_DESCRIPTOR *p = (const T_ROOT_DESCRIPTOR *)(pData + pInRTS[iRP].PayloadOffset); IFTBOOL((const char *)p + sizeof(T_ROOT_DESCRIPTOR) <= pMaxPtr, E_FAIL); pOutRTS->Descriptor.ShaderRegister = p->ShaderRegister; pOutRTS->Descriptor.RegisterSpace = p->RegisterSpace; DxilRootDescriptorFlags Flags = GetFlags(*p); SetFlags(pOutRTS->Descriptor, Flags); break; } default: IFT(E_FAIL); } } s = sizeof(DxilStaticSamplerDesc) * pRS->NumStaticSamplers; const DxilStaticSamplerDesc *pInSS = (const DxilStaticSamplerDesc *)(pData + pRS->StaticSamplersOffset); IFTBOOL(((const char *)pInSS) + s <= pMaxPtr, E_FAIL); if (pRootSignature->NumStaticSamplers) { pRootSignature->pStaticSamplers = new DxilStaticSamplerDesc[pRootSignature->NumStaticSamplers]; memcpy((void *)pRootSignature->pStaticSamplers, pInSS, s); } } void DeserializeRootSignature( const void *pSrcData, uint32_t SrcDataSizeInBytes, const DxilVersionedRootSignatureDesc **ppRootSignature) { DxilVersionedRootSignatureDesc *pRootSignature = nullptr; IFTBOOL(pSrcData != nullptr && SrcDataSizeInBytes != 0 && ppRootSignature != nullptr, E_INVALIDARG); IFTBOOL(*ppRootSignature == nullptr, E_INVALIDARG); const char *pData = (const char *)pSrcData; IFTBOOL(pData + sizeof(uint32_t) < pData + SrcDataSizeInBytes, E_FAIL); const DxilRootSignatureVersion Version = (const DxilRootSignatureVersion)((const uint32_t *)pData)[0]; pRootSignature = new DxilVersionedRootSignatureDesc(); try { switch (Version) { case DxilRootSignatureVersion::Version_1_0: pRootSignature->Version = DxilRootSignatureVersion::Version_1_0; DeserializeRootSignatureTemplate<DxilRootSignatureDesc, DxilRootParameter, DxilRootDescriptor, DxilRootDescriptor, DxilDescriptorRange, DxilContainerDescriptorRange>( pSrcData, SrcDataSizeInBytes, DxilRootSignatureVersion::Version_1_0, pRootSignature->Desc_1_0); break; case DxilRootSignatureVersion::Version_1_1: pRootSignature->Version = DxilRootSignatureVersion::Version_1_1; DeserializeRootSignatureTemplate< DxilRootSignatureDesc1, DxilRootParameter1, DxilRootDescriptor1, DxilContainerRootDescriptor1, DxilDescriptorRange1, DxilContainerDescriptorRange1>(pSrcData, SrcDataSizeInBytes, DxilRootSignatureVersion::Version_1_1, pRootSignature->Desc_1_1); break; default: IFT(E_FAIL); break; } } catch (...) { DeleteRootSignature(pRootSignature); throw; } *ppRootSignature = pRootSignature; } } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilRootSignature/DxilRootSignatureHelper.h
/////////////////////////////////////////////////////////////////////////////// // // // DxilRootSignature.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides support for manipulating root signature structures. // // // /////////////////////////////////////////////////////////////////////////////// #pragma once #include "dxc/DxilRootSignature/DxilRootSignature.h" #include "dxc/Support/Global.h" #include "dxc/Support/WinIncludes.h" namespace hlsl { DEFINE_ENUM_FLAG_OPERATORS(DxilRootSignatureFlags) DEFINE_ENUM_FLAG_OPERATORS(DxilRootDescriptorFlags) DEFINE_ENUM_FLAG_OPERATORS(DxilDescriptorRangeType) DEFINE_ENUM_FLAG_OPERATORS(DxilDescriptorRangeFlags) // Execute (error) and throw. #define EAT(x) \ { \ (x); \ throw ::hlsl::Exception(E_FAIL); \ } namespace root_sig_helper { // GetFlags/SetFlags overloads. DxilRootDescriptorFlags GetFlags(const DxilRootDescriptor &); void SetFlags(DxilRootDescriptor &, DxilRootDescriptorFlags); DxilRootDescriptorFlags GetFlags(const DxilRootDescriptor1 &D); void SetFlags(DxilRootDescriptor1 &D, DxilRootDescriptorFlags Flags); void SetFlags(DxilContainerRootDescriptor1 &D, DxilRootDescriptorFlags Flags); DxilDescriptorRangeFlags GetFlags(const DxilDescriptorRange &D); void SetFlags(DxilDescriptorRange &, DxilDescriptorRangeFlags); DxilDescriptorRangeFlags GetFlags(const DxilContainerDescriptorRange &D); void SetFlags(DxilContainerDescriptorRange &, DxilDescriptorRangeFlags); DxilDescriptorRangeFlags GetFlags(const DxilDescriptorRange1 &D); void SetFlags(DxilDescriptorRange1 &D, DxilDescriptorRangeFlags Flags); DxilDescriptorRangeFlags GetFlags(const DxilContainerDescriptorRange1 &D); void SetFlags(DxilContainerDescriptorRange1 &D, DxilDescriptorRangeFlags Flags); } // namespace root_sig_helper } // namespace hlsl
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Linker/LinkModules.cpp
//===- lib/Linker/LinkModules.cpp - Module Linker Implementation ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the LLVM module linker. // //===----------------------------------------------------------------------===// #include "llvm/Linker/Linker.h" #include "llvm-c/Linker.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/TypeFinder.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Cloning.h" #include <cctype> #include <tuple> using namespace llvm; //===----------------------------------------------------------------------===// // TypeMap implementation. //===----------------------------------------------------------------------===// namespace { class TypeMapTy : public ValueMapTypeRemapper { /// This is a mapping from a source type to a destination type to use. DenseMap<Type*, Type*> MappedTypes; /// When checking to see if two subgraphs are isomorphic, we speculatively /// add types to MappedTypes, but keep track of them here in case we need to /// roll back. SmallVector<Type*, 16> SpeculativeTypes; SmallVector<StructType*, 16> SpeculativeDstOpaqueTypes; /// This is a list of non-opaque structs in the source module that are mapped /// to an opaque struct in the destination module. SmallVector<StructType*, 16> SrcDefinitionsToResolve; /// This is the set of opaque types in the destination modules who are /// getting a body from the source module. SmallPtrSet<StructType*, 16> DstResolvedOpaqueTypes; public: TypeMapTy(Linker::IdentifiedStructTypeSet &DstStructTypesSet) : DstStructTypesSet(DstStructTypesSet) {} Linker::IdentifiedStructTypeSet &DstStructTypesSet; /// Indicate that the specified type in the destination module is conceptually /// equivalent to the specified type in the source module. void addTypeMapping(Type *DstTy, Type *SrcTy); /// Produce a body for an opaque type in the dest module from a type /// definition in the source module. void linkDefinedTypeBodies(); /// Return the mapped type to use for the specified input type from the /// source module. Type *get(Type *SrcTy); Type *get(Type *SrcTy, SmallPtrSet<StructType *, 8> &Visited); void finishType(StructType *DTy, StructType *STy, ArrayRef<Type *> ETypes); FunctionType *get(FunctionType *T) { return cast<FunctionType>(get((Type *)T)); } /// Dump out the type map for debugging purposes. void dump() const { for (auto &Pair : MappedTypes) { dbgs() << "TypeMap: "; Pair.first->print(dbgs()); dbgs() << " => "; Pair.second->print(dbgs()); dbgs() << '\n'; } } private: Type *remapType(Type *SrcTy) override { return get(SrcTy); } bool areTypesIsomorphic(Type *DstTy, Type *SrcTy); }; } void TypeMapTy::addTypeMapping(Type *DstTy, Type *SrcTy) { assert(SpeculativeTypes.empty()); assert(SpeculativeDstOpaqueTypes.empty()); // Check to see if these types are recursively isomorphic and establish a // mapping between them if so. if (!areTypesIsomorphic(DstTy, SrcTy)) { // Oops, they aren't isomorphic. Just discard this request by rolling out // any speculative mappings we've established. for (Type *Ty : SpeculativeTypes) MappedTypes.erase(Ty); SrcDefinitionsToResolve.resize(SrcDefinitionsToResolve.size() - SpeculativeDstOpaqueTypes.size()); for (StructType *Ty : SpeculativeDstOpaqueTypes) DstResolvedOpaqueTypes.erase(Ty); } else { for (Type *Ty : SpeculativeTypes) if (auto *STy = dyn_cast<StructType>(Ty)) if (STy->hasName()) STy->setName(""); } SpeculativeTypes.clear(); SpeculativeDstOpaqueTypes.clear(); } /// Recursively walk this pair of types, returning true if they are isomorphic, /// false if they are not. bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) { // Two types with differing kinds are clearly not isomorphic. if (DstTy->getTypeID() != SrcTy->getTypeID()) return false; // If we have an entry in the MappedTypes table, then we have our answer. Type *&Entry = MappedTypes[SrcTy]; if (Entry) return Entry == DstTy; // Two identical types are clearly isomorphic. Remember this // non-speculatively. if (DstTy == SrcTy) { Entry = DstTy; return true; } // Okay, we have two types with identical kinds that we haven't seen before. // If this is an opaque struct type, special case it. if (StructType *SSTy = dyn_cast<StructType>(SrcTy)) { // Mapping an opaque type to any struct, just keep the dest struct. if (SSTy->isOpaque()) { Entry = DstTy; SpeculativeTypes.push_back(SrcTy); return true; } // Mapping a non-opaque source type to an opaque dest. If this is the first // type that we're mapping onto this destination type then we succeed. Keep // the dest, but fill it in later. If this is the second (different) type // that we're trying to map onto the same opaque type then we fail. if (cast<StructType>(DstTy)->isOpaque()) { // We can only map one source type onto the opaque destination type. if (!DstResolvedOpaqueTypes.insert(cast<StructType>(DstTy)).second) return false; SrcDefinitionsToResolve.push_back(SSTy); SpeculativeTypes.push_back(SrcTy); SpeculativeDstOpaqueTypes.push_back(cast<StructType>(DstTy)); Entry = DstTy; return true; } } // If the number of subtypes disagree between the two types, then we fail. if (SrcTy->getNumContainedTypes() != DstTy->getNumContainedTypes()) return false; // Fail if any of the extra properties (e.g. array size) of the type disagree. if (isa<IntegerType>(DstTy)) return false; // bitwidth disagrees. if (PointerType *PT = dyn_cast<PointerType>(DstTy)) { if (PT->getAddressSpace() != cast<PointerType>(SrcTy)->getAddressSpace()) return false; } else if (FunctionType *FT = dyn_cast<FunctionType>(DstTy)) { if (FT->isVarArg() != cast<FunctionType>(SrcTy)->isVarArg()) return false; } else if (StructType *DSTy = dyn_cast<StructType>(DstTy)) { StructType *SSTy = cast<StructType>(SrcTy); if (DSTy->isLiteral() != SSTy->isLiteral() || DSTy->isPacked() != SSTy->isPacked()) return false; } else if (ArrayType *DATy = dyn_cast<ArrayType>(DstTy)) { if (DATy->getNumElements() != cast<ArrayType>(SrcTy)->getNumElements()) return false; } else if (VectorType *DVTy = dyn_cast<VectorType>(DstTy)) { if (DVTy->getNumElements() != cast<VectorType>(SrcTy)->getNumElements()) return false; } // Otherwise, we speculate that these two types will line up and recursively // check the subelements. Entry = DstTy; SpeculativeTypes.push_back(SrcTy); for (unsigned I = 0, E = SrcTy->getNumContainedTypes(); I != E; ++I) if (!areTypesIsomorphic(DstTy->getContainedType(I), SrcTy->getContainedType(I))) return false; // If everything seems to have lined up, then everything is great. return true; } void TypeMapTy::linkDefinedTypeBodies() { SmallVector<Type*, 16> Elements; for (StructType *SrcSTy : SrcDefinitionsToResolve) { StructType *DstSTy = cast<StructType>(MappedTypes[SrcSTy]); assert(DstSTy->isOpaque()); // Map the body of the source type over to a new body for the dest type. Elements.resize(SrcSTy->getNumElements()); for (unsigned I = 0, E = Elements.size(); I != E; ++I) Elements[I] = get(SrcSTy->getElementType(I)); DstSTy->setBody(Elements, SrcSTy->isPacked()); DstStructTypesSet.switchToNonOpaque(DstSTy); } SrcDefinitionsToResolve.clear(); DstResolvedOpaqueTypes.clear(); } void TypeMapTy::finishType(StructType *DTy, StructType *STy, ArrayRef<Type *> ETypes) { DTy->setBody(ETypes, STy->isPacked()); // Steal STy's name. if (STy->hasName()) { SmallString<16> TmpName = STy->getName(); STy->setName(""); DTy->setName(TmpName); } DstStructTypesSet.addNonOpaque(DTy); } Type *TypeMapTy::get(Type *Ty) { SmallPtrSet<StructType *, 8> Visited; return get(Ty, Visited); } Type *TypeMapTy::get(Type *Ty, SmallPtrSet<StructType *, 8> &Visited) { // If we already have an entry for this type, return it. Type **Entry = &MappedTypes[Ty]; if (*Entry) return *Entry; // These are types that LLVM itself will unique. bool IsUniqued = !isa<StructType>(Ty) || cast<StructType>(Ty)->isLiteral(); #ifndef NDEBUG if (!IsUniqued) { for (auto &Pair : MappedTypes) { assert(!(Pair.first != Ty && Pair.second == Ty) && "mapping to a source type"); } } #endif if (!IsUniqued && !Visited.insert(cast<StructType>(Ty)).second) { StructType *DTy = StructType::create(Ty->getContext()); return *Entry = DTy; } // If this is not a recursive type, then just map all of the elements and // then rebuild the type from inside out. SmallVector<Type *, 4> ElementTypes; // If there are no element types to map, then the type is itself. This is // true for the anonymous {} struct, things like 'float', integers, etc. if (Ty->getNumContainedTypes() == 0 && IsUniqued) return *Entry = Ty; // Remap all of the elements, keeping track of whether any of them change. bool AnyChange = false; ElementTypes.resize(Ty->getNumContainedTypes()); for (unsigned I = 0, E = Ty->getNumContainedTypes(); I != E; ++I) { ElementTypes[I] = get(Ty->getContainedType(I), Visited); AnyChange |= ElementTypes[I] != Ty->getContainedType(I); } // If we found our type while recursively processing stuff, just use it. Entry = &MappedTypes[Ty]; if (*Entry) { if (auto *DTy = dyn_cast<StructType>(*Entry)) { if (DTy->isOpaque()) { auto *STy = cast<StructType>(Ty); finishType(DTy, STy, ElementTypes); } } return *Entry; } // If all of the element types mapped directly over and the type is not // a nomed struct, then the type is usable as-is. if (!AnyChange && IsUniqued) return *Entry = Ty; // Otherwise, rebuild a modified type. switch (Ty->getTypeID()) { default: llvm_unreachable("unknown derived type to remap"); case Type::ArrayTyID: return *Entry = ArrayType::get(ElementTypes[0], cast<ArrayType>(Ty)->getNumElements()); case Type::VectorTyID: return *Entry = VectorType::get(ElementTypes[0], cast<VectorType>(Ty)->getNumElements()); case Type::PointerTyID: return *Entry = PointerType::get(ElementTypes[0], cast<PointerType>(Ty)->getAddressSpace()); case Type::FunctionTyID: return *Entry = FunctionType::get(ElementTypes[0], makeArrayRef(ElementTypes).slice(1), cast<FunctionType>(Ty)->isVarArg()); case Type::StructTyID: { auto *STy = cast<StructType>(Ty); bool IsPacked = STy->isPacked(); if (IsUniqued) return *Entry = StructType::get(Ty->getContext(), ElementTypes, IsPacked); // If the type is opaque, we can just use it directly. if (STy->isOpaque()) { DstStructTypesSet.addOpaque(STy); return *Entry = Ty; } if (StructType *OldT = DstStructTypesSet.findNonOpaque(ElementTypes, IsPacked)) { STy->setName(""); return *Entry = OldT; } if (!AnyChange) { DstStructTypesSet.addNonOpaque(STy); return *Entry = Ty; } StructType *DTy = StructType::create(Ty->getContext()); finishType(DTy, STy, ElementTypes); return *Entry = DTy; } } } //===----------------------------------------------------------------------===// // ModuleLinker implementation. //===----------------------------------------------------------------------===// namespace { class ModuleLinker; /// Creates prototypes for functions that are lazily linked on the fly. This /// speeds up linking for modules with many/ lazily linked functions of which /// few get used. class ValueMaterializerTy : public ValueMaterializer { TypeMapTy &TypeMap; Module *DstM; std::vector<GlobalValue *> &LazilyLinkGlobalValues; public: ValueMaterializerTy(TypeMapTy &TypeMap, Module *DstM, std::vector<GlobalValue *> &LazilyLinkGlobalValues) : ValueMaterializer(), TypeMap(TypeMap), DstM(DstM), LazilyLinkGlobalValues(LazilyLinkGlobalValues) {} Value *materializeValueFor(Value *V) override; }; class LinkDiagnosticInfo : public DiagnosticInfo { const Twine &Msg; public: LinkDiagnosticInfo(DiagnosticSeverity Severity, const Twine &Msg); void print(DiagnosticPrinter &DP) const override; }; LinkDiagnosticInfo::LinkDiagnosticInfo(DiagnosticSeverity Severity, const Twine &Msg) : DiagnosticInfo(DK_Linker, Severity), Msg(Msg) {} void LinkDiagnosticInfo::print(DiagnosticPrinter &DP) const { DP << Msg; } /// This is an implementation class for the LinkModules function, which is the /// entrypoint for this file. class ModuleLinker { Module *DstM, *SrcM; TypeMapTy TypeMap; ValueMaterializerTy ValMaterializer; /// Mapping of values from what they used to be in Src, to what they are now /// in DstM. ValueToValueMapTy is a ValueMap, which involves some overhead /// due to the use of Value handles which the Linker doesn't actually need, /// but this allows us to reuse the ValueMapper code. ValueToValueMapTy ValueMap; struct AppendingVarInfo { GlobalVariable *NewGV; // New aggregate global in dest module. const Constant *DstInit; // Old initializer from dest module. const Constant *SrcInit; // Old initializer from src module. }; std::vector<AppendingVarInfo> AppendingVars; // Set of items not to link in from source. SmallPtrSet<const Value *, 16> DoNotLinkFromSource; // Vector of GlobalValues to lazily link in. std::vector<GlobalValue *> LazilyLinkGlobalValues; /// Functions that have replaced other functions. SmallPtrSet<const Function *, 16> OverridingFunctions; DiagnosticHandlerFunction DiagnosticHandler; /// For symbol clashes, prefer those from Src. bool OverrideFromSrc; public: ModuleLinker(Module *dstM, Linker::IdentifiedStructTypeSet &Set, Module *srcM, DiagnosticHandlerFunction DiagnosticHandler, bool OverrideFromSrc) : DstM(dstM), SrcM(srcM), TypeMap(Set), ValMaterializer(TypeMap, DstM, LazilyLinkGlobalValues), DiagnosticHandler(DiagnosticHandler), OverrideFromSrc(OverrideFromSrc) { } bool run(); private: bool shouldLinkFromSource(bool &LinkFromSrc, const GlobalValue &Dest, const GlobalValue &Src); /// Helper method for setting a message and returning an error code. bool emitError(const Twine &Message) { DiagnosticHandler(LinkDiagnosticInfo(DS_Error, Message)); return true; } void emitWarning(const Twine &Message) { DiagnosticHandler(LinkDiagnosticInfo(DS_Warning, Message)); } bool getComdatLeader(Module *M, StringRef ComdatName, const GlobalVariable *&GVar); bool computeResultingSelectionKind(StringRef ComdatName, Comdat::SelectionKind Src, Comdat::SelectionKind Dst, Comdat::SelectionKind &Result, bool &LinkFromSrc); std::map<const Comdat *, std::pair<Comdat::SelectionKind, bool>> ComdatsChosen; bool getComdatResult(const Comdat *SrcC, Comdat::SelectionKind &SK, bool &LinkFromSrc); /// Given a global in the source module, return the global in the /// destination module that is being linked to, if any. GlobalValue *getLinkedToGlobal(const GlobalValue *SrcGV) { // If the source has no name it can't link. If it has local linkage, // there is no name match-up going on. if (!SrcGV->hasName() || SrcGV->hasLocalLinkage()) return nullptr; // Otherwise see if we have a match in the destination module's symtab. GlobalValue *DGV = DstM->getNamedValue(SrcGV->getName()); if (!DGV) return nullptr; // If we found a global with the same name in the dest module, but it has // internal linkage, we are really not doing any linkage here. if (DGV->hasLocalLinkage()) return nullptr; // Otherwise, we do in fact link to the destination global. return DGV; } void computeTypeMapping(); void upgradeMismatchedGlobalArray(StringRef Name); void upgradeMismatchedGlobals(); bool linkAppendingVarProto(GlobalVariable *DstGV, const GlobalVariable *SrcGV); bool linkGlobalValueProto(GlobalValue *GV); bool linkModuleFlagsMetadata(); void linkAppendingVarInit(const AppendingVarInfo &AVI); void linkGlobalInit(GlobalVariable &Dst, GlobalVariable &Src); bool linkFunctionBody(Function &Dst, Function &Src); void linkAliasBody(GlobalAlias &Dst, GlobalAlias &Src); bool linkGlobalValueBody(GlobalValue &Src); void linkNamedMDNodes(); void stripReplacedSubprograms(); }; } /// The LLVM SymbolTable class autorenames globals that conflict in the symbol /// table. This is good for all clients except for us. Go through the trouble /// to force this back. static void forceRenaming(GlobalValue *GV, StringRef Name) { // If the global doesn't force its name or if it already has the right name, // there is nothing for us to do. if (GV->hasLocalLinkage() || GV->getName() == Name) return; Module *M = GV->getParent(); // If there is a conflict, rename the conflict. if (GlobalValue *ConflictGV = M->getNamedValue(Name)) { GV->takeName(ConflictGV); ConflictGV->setName(Name); // This will cause ConflictGV to get renamed assert(ConflictGV->getName() != Name && "forceRenaming didn't work"); } else { GV->setName(Name); // Force the name back } } /// copy additional attributes (those not needed to construct a GlobalValue) /// from the SrcGV to the DestGV. static void copyGVAttributes(GlobalValue *DestGV, const GlobalValue *SrcGV) { DestGV->copyAttributesFrom(SrcGV); forceRenaming(DestGV, SrcGV->getName()); } static bool isLessConstraining(GlobalValue::VisibilityTypes a, GlobalValue::VisibilityTypes b) { if (a == GlobalValue::HiddenVisibility) return false; if (b == GlobalValue::HiddenVisibility) return true; if (a == GlobalValue::ProtectedVisibility) return false; if (b == GlobalValue::ProtectedVisibility) return true; return false; } /// Loop through the global variables in the src module and merge them into the /// dest module. static GlobalVariable *copyGlobalVariableProto(TypeMapTy &TypeMap, Module &DstM, const GlobalVariable *SGVar) { // No linking to be performed or linking from the source: simply create an // identical version of the symbol over in the dest module... the // initializer will be filled in later by LinkGlobalInits. GlobalVariable *NewDGV = new GlobalVariable( DstM, TypeMap.get(SGVar->getType()->getElementType()), SGVar->isConstant(), SGVar->getLinkage(), /*init*/ nullptr, SGVar->getName(), /*insertbefore*/ nullptr, SGVar->getThreadLocalMode(), SGVar->getType()->getAddressSpace()); return NewDGV; } /// Link the function in the source module into the destination module if /// needed, setting up mapping information. static Function *copyFunctionProto(TypeMapTy &TypeMap, Module &DstM, const Function *SF) { // If there is no linkage to be performed or we are linking from the source, // bring SF over. return Function::Create(TypeMap.get(SF->getFunctionType()), SF->getLinkage(), SF->getName(), &DstM); } /// Set up prototypes for any aliases that come over from the source module. static GlobalAlias *copyGlobalAliasProto(TypeMapTy &TypeMap, Module &DstM, const GlobalAlias *SGA) { // If there is no linkage to be performed or we're linking from the source, // bring over SGA. auto *PTy = cast<PointerType>(TypeMap.get(SGA->getType())); return GlobalAlias::create(PTy, SGA->getLinkage(), SGA->getName(), &DstM); } static GlobalValue *copyGlobalValueProto(TypeMapTy &TypeMap, Module &DstM, const GlobalValue *SGV) { GlobalValue *NewGV; if (auto *SGVar = dyn_cast<GlobalVariable>(SGV)) NewGV = copyGlobalVariableProto(TypeMap, DstM, SGVar); else if (auto *SF = dyn_cast<Function>(SGV)) NewGV = copyFunctionProto(TypeMap, DstM, SF); else NewGV = copyGlobalAliasProto(TypeMap, DstM, cast<GlobalAlias>(SGV)); copyGVAttributes(NewGV, SGV); return NewGV; } Value *ValueMaterializerTy::materializeValueFor(Value *V) { auto *SGV = dyn_cast<GlobalValue>(V); if (!SGV) return nullptr; GlobalValue *DGV = copyGlobalValueProto(TypeMap, *DstM, SGV); if (Comdat *SC = SGV->getComdat()) { if (auto *DGO = dyn_cast<GlobalObject>(DGV)) { Comdat *DC = DstM->getOrInsertComdat(SC->getName()); DGO->setComdat(DC); } } LazilyLinkGlobalValues.push_back(SGV); return DGV; } bool ModuleLinker::getComdatLeader(Module *M, StringRef ComdatName, const GlobalVariable *&GVar) { const GlobalValue *GVal = M->getNamedValue(ComdatName); if (const auto *GA = dyn_cast_or_null<GlobalAlias>(GVal)) { GVal = GA->getBaseObject(); if (!GVal) // We cannot resolve the size of the aliasee yet. return emitError("Linking COMDATs named '" + ComdatName + "': COMDAT key involves incomputable alias size."); } GVar = dyn_cast_or_null<GlobalVariable>(GVal); if (!GVar) return emitError( "Linking COMDATs named '" + ComdatName + "': GlobalVariable required for data dependent selection!"); return false; } bool ModuleLinker::computeResultingSelectionKind(StringRef ComdatName, Comdat::SelectionKind Src, Comdat::SelectionKind Dst, Comdat::SelectionKind &Result, bool &LinkFromSrc) { // The ability to mix Comdat::SelectionKind::Any with // Comdat::SelectionKind::Largest is a behavior that comes from COFF. bool DstAnyOrLargest = Dst == Comdat::SelectionKind::Any || Dst == Comdat::SelectionKind::Largest; bool SrcAnyOrLargest = Src == Comdat::SelectionKind::Any || Src == Comdat::SelectionKind::Largest; if (DstAnyOrLargest && SrcAnyOrLargest) { if (Dst == Comdat::SelectionKind::Largest || Src == Comdat::SelectionKind::Largest) Result = Comdat::SelectionKind::Largest; else Result = Comdat::SelectionKind::Any; } else if (Src == Dst) { Result = Dst; } else { return emitError("Linking COMDATs named '" + ComdatName + "': invalid selection kinds!"); } switch (Result) { case Comdat::SelectionKind::Any: // Go with Dst. LinkFromSrc = false; break; case Comdat::SelectionKind::NoDuplicates: return emitError("Linking COMDATs named '" + ComdatName + "': noduplicates has been violated!"); case Comdat::SelectionKind::ExactMatch: case Comdat::SelectionKind::Largest: case Comdat::SelectionKind::SameSize: { const GlobalVariable *DstGV; const GlobalVariable *SrcGV; if (getComdatLeader(DstM, ComdatName, DstGV) || getComdatLeader(SrcM, ComdatName, SrcGV)) return true; const DataLayout &DstDL = DstM->getDataLayout(); const DataLayout &SrcDL = SrcM->getDataLayout(); uint64_t DstSize = DstDL.getTypeAllocSize(DstGV->getType()->getPointerElementType()); uint64_t SrcSize = SrcDL.getTypeAllocSize(SrcGV->getType()->getPointerElementType()); if (Result == Comdat::SelectionKind::ExactMatch) { if (SrcGV->getInitializer() != DstGV->getInitializer()) return emitError("Linking COMDATs named '" + ComdatName + "': ExactMatch violated!"); LinkFromSrc = false; } else if (Result == Comdat::SelectionKind::Largest) { LinkFromSrc = SrcSize > DstSize; } else if (Result == Comdat::SelectionKind::SameSize) { if (SrcSize != DstSize) return emitError("Linking COMDATs named '" + ComdatName + "': SameSize violated!"); LinkFromSrc = false; } else { llvm_unreachable("unknown selection kind"); } break; } } return false; } bool ModuleLinker::getComdatResult(const Comdat *SrcC, Comdat::SelectionKind &Result, bool &LinkFromSrc) { Comdat::SelectionKind SSK = SrcC->getSelectionKind(); StringRef ComdatName = SrcC->getName(); Module::ComdatSymTabType &ComdatSymTab = DstM->getComdatSymbolTable(); Module::ComdatSymTabType::iterator DstCI = ComdatSymTab.find(ComdatName); if (DstCI == ComdatSymTab.end()) { // Use the comdat if it is only available in one of the modules. LinkFromSrc = true; Result = SSK; return false; } const Comdat *DstC = &DstCI->second; Comdat::SelectionKind DSK = DstC->getSelectionKind(); return computeResultingSelectionKind(ComdatName, SSK, DSK, Result, LinkFromSrc); } bool ModuleLinker::shouldLinkFromSource(bool &LinkFromSrc, const GlobalValue &Dest, const GlobalValue &Src) { // Should we unconditionally use the Src? if (OverrideFromSrc) { LinkFromSrc = true; return false; } // We always have to add Src if it has appending linkage. if (Src.hasAppendingLinkage()) { LinkFromSrc = true; return false; } bool SrcIsDeclaration = Src.isDeclarationForLinker(); bool DestIsDeclaration = Dest.isDeclarationForLinker(); if (SrcIsDeclaration) { // If Src is external or if both Src & Dest are external.. Just link the // external globals, we aren't adding anything. if (Src.hasDLLImportStorageClass()) { // If one of GVs is marked as DLLImport, result should be dllimport'ed. LinkFromSrc = DestIsDeclaration; return false; } // If the Dest is weak, use the source linkage. LinkFromSrc = Dest.hasExternalWeakLinkage(); return false; } if (DestIsDeclaration) { // If Dest is external but Src is not: LinkFromSrc = true; return false; } if (Src.hasCommonLinkage()) { if (Dest.hasLinkOnceLinkage() || Dest.hasWeakLinkage()) { LinkFromSrc = true; return false; } if (!Dest.hasCommonLinkage()) { LinkFromSrc = false; return false; } const DataLayout &DL = Dest.getParent()->getDataLayout(); uint64_t DestSize = DL.getTypeAllocSize(Dest.getType()->getElementType()); uint64_t SrcSize = DL.getTypeAllocSize(Src.getType()->getElementType()); LinkFromSrc = SrcSize > DestSize; return false; } if (Src.isWeakForLinker()) { assert(!Dest.hasExternalWeakLinkage()); assert(!Dest.hasAvailableExternallyLinkage()); if (Dest.hasLinkOnceLinkage() && Src.hasWeakLinkage()) { LinkFromSrc = true; return false; } LinkFromSrc = false; return false; } if (Dest.isWeakForLinker()) { assert(Src.hasExternalLinkage()); LinkFromSrc = true; return false; } assert(!Src.hasExternalWeakLinkage()); assert(!Dest.hasExternalWeakLinkage()); assert(Dest.hasExternalLinkage() && Src.hasExternalLinkage() && "Unexpected linkage type!"); return emitError("Linking globals named '" + Src.getName() + "': symbol multiply defined!"); } /// Loop over all of the linked values to compute type mappings. For example, /// if we link "extern Foo *x" and "Foo *x = NULL", then we have two struct /// types 'Foo' but one got renamed when the module was loaded into the same /// LLVMContext. void ModuleLinker::computeTypeMapping() { for (GlobalValue &SGV : SrcM->globals()) { GlobalValue *DGV = getLinkedToGlobal(&SGV); if (!DGV) continue; if (!DGV->hasAppendingLinkage() || !SGV.hasAppendingLinkage()) { TypeMap.addTypeMapping(DGV->getType(), SGV.getType()); continue; } // Unify the element type of appending arrays. ArrayType *DAT = cast<ArrayType>(DGV->getType()->getElementType()); ArrayType *SAT = cast<ArrayType>(SGV.getType()->getElementType()); TypeMap.addTypeMapping(DAT->getElementType(), SAT->getElementType()); } for (GlobalValue &SGV : *SrcM) { if (GlobalValue *DGV = getLinkedToGlobal(&SGV)) TypeMap.addTypeMapping(DGV->getType(), SGV.getType()); } for (GlobalValue &SGV : SrcM->aliases()) { if (GlobalValue *DGV = getLinkedToGlobal(&SGV)) TypeMap.addTypeMapping(DGV->getType(), SGV.getType()); } // Incorporate types by name, scanning all the types in the source module. // At this point, the destination module may have a type "%foo = { i32 }" for // example. When the source module got loaded into the same LLVMContext, if // it had the same type, it would have been renamed to "%foo.42 = { i32 }". std::vector<StructType *> Types = SrcM->getIdentifiedStructTypes(); for (StructType *ST : Types) { if (!ST->hasName()) continue; // Check to see if there is a dot in the name followed by a digit. size_t DotPos = ST->getName().rfind('.'); if (DotPos == 0 || DotPos == StringRef::npos || ST->getName().back() == '.' || !isdigit(static_cast<unsigned char>(ST->getName()[DotPos + 1]))) continue; // Check to see if the destination module has a struct with the prefix name. StructType *DST = DstM->getTypeByName(ST->getName().substr(0, DotPos)); if (!DST) continue; // Don't use it if this actually came from the source module. They're in // the same LLVMContext after all. Also don't use it unless the type is // actually used in the destination module. This can happen in situations // like this: // // Module A Module B // -------- -------- // %Z = type { %A } %B = type { %C.1 } // %A = type { %B.1, [7 x i8] } %C.1 = type { i8* } // %B.1 = type { %C } %A.2 = type { %B.3, [5 x i8] } // %C = type { i8* } %B.3 = type { %C.1 } // // When we link Module B with Module A, the '%B' in Module B is // used. However, that would then use '%C.1'. But when we process '%C.1', // we prefer to take the '%C' version. So we are then left with both // '%C.1' and '%C' being used for the same types. This leads to some // variables using one type and some using the other. if (TypeMap.DstStructTypesSet.hasType(DST)) TypeMap.addTypeMapping(DST, ST); } // Now that we have discovered all of the type equivalences, get a body for // any 'opaque' types in the dest module that are now resolved. TypeMap.linkDefinedTypeBodies(); } static void upgradeGlobalArray(GlobalVariable *GV) { ArrayType *ATy = cast<ArrayType>(GV->getType()->getElementType()); StructType *OldTy = cast<StructType>(ATy->getElementType()); assert(OldTy->getNumElements() == 2 && "Expected to upgrade from 2 elements"); // Get the upgraded 3 element type. PointerType *VoidPtrTy = Type::getInt8Ty(GV->getContext())->getPointerTo(); Type *Tys[3] = {OldTy->getElementType(0), OldTy->getElementType(1), VoidPtrTy}; StructType *NewTy = StructType::get(GV->getContext(), Tys, false); // Build new constants with a null third field filled in. Constant *OldInitC = GV->getInitializer(); ConstantArray *OldInit = dyn_cast<ConstantArray>(OldInitC); if (!OldInit && !isa<ConstantAggregateZero>(OldInitC)) // Invalid initializer; give up. return; std::vector<Constant *> Initializers; if (OldInit && OldInit->getNumOperands()) { Value *Null = Constant::getNullValue(VoidPtrTy); for (Use &U : OldInit->operands()) { ConstantStruct *Init = cast<ConstantStruct>(U.get()); Initializers.push_back(ConstantStruct::get( NewTy, Init->getOperand(0), Init->getOperand(1), Null, nullptr)); } } assert(Initializers.size() == ATy->getNumElements() && "Failed to copy all array elements"); // Replace the old GV with a new one. ATy = ArrayType::get(NewTy, Initializers.size()); Constant *NewInit = ConstantArray::get(ATy, Initializers); GlobalVariable *NewGV = new GlobalVariable( *GV->getParent(), ATy, GV->isConstant(), GV->getLinkage(), NewInit, "", GV, GV->getThreadLocalMode(), GV->getType()->getAddressSpace(), GV->isExternallyInitialized()); NewGV->copyAttributesFrom(GV); NewGV->takeName(GV); assert(GV->use_empty() && "program cannot use initializer list"); GV->eraseFromParent(); } void ModuleLinker::upgradeMismatchedGlobalArray(StringRef Name) { // Look for the global arrays. auto *DstGV = dyn_cast_or_null<GlobalVariable>(DstM->getNamedValue(Name)); if (!DstGV) return; auto *SrcGV = dyn_cast_or_null<GlobalVariable>(SrcM->getNamedValue(Name)); if (!SrcGV) return; // Check if the types already match. auto *DstTy = cast<ArrayType>(DstGV->getType()->getElementType()); auto *SrcTy = cast<ArrayType>(TypeMap.get(SrcGV->getType()->getElementType())); if (DstTy == SrcTy) return; // Grab the element types. We can only upgrade an array of a two-field // struct. Only bother if the other one has three-fields. auto *DstEltTy = cast<StructType>(DstTy->getElementType()); auto *SrcEltTy = cast<StructType>(SrcTy->getElementType()); if (DstEltTy->getNumElements() == 2 && SrcEltTy->getNumElements() == 3) { upgradeGlobalArray(DstGV); return; } if (DstEltTy->getNumElements() == 3 && SrcEltTy->getNumElements() == 2) upgradeGlobalArray(SrcGV); // We can't upgrade any other differences. } void ModuleLinker::upgradeMismatchedGlobals() { upgradeMismatchedGlobalArray("llvm.global_ctors"); upgradeMismatchedGlobalArray("llvm.global_dtors"); } /// If there were any appending global variables, link them together now. /// Return true on error. bool ModuleLinker::linkAppendingVarProto(GlobalVariable *DstGV, const GlobalVariable *SrcGV) { if (!SrcGV->hasAppendingLinkage() || !DstGV->hasAppendingLinkage()) return emitError("Linking globals named '" + SrcGV->getName() + "': can only link appending global with another appending global!"); ArrayType *DstTy = cast<ArrayType>(DstGV->getType()->getElementType()); ArrayType *SrcTy = cast<ArrayType>(TypeMap.get(SrcGV->getType()->getElementType())); Type *EltTy = DstTy->getElementType(); // Check to see that they two arrays agree on type. if (EltTy != SrcTy->getElementType()) return emitError("Appending variables with different element types!"); if (DstGV->isConstant() != SrcGV->isConstant()) return emitError("Appending variables linked with different const'ness!"); if (DstGV->getAlignment() != SrcGV->getAlignment()) return emitError( "Appending variables with different alignment need to be linked!"); if (DstGV->getVisibility() != SrcGV->getVisibility()) return emitError( "Appending variables with different visibility need to be linked!"); if (DstGV->hasUnnamedAddr() != SrcGV->hasUnnamedAddr()) return emitError( "Appending variables with different unnamed_addr need to be linked!"); if (StringRef(DstGV->getSection()) != SrcGV->getSection()) return emitError( "Appending variables with different section name need to be linked!"); uint64_t NewSize = DstTy->getNumElements() + SrcTy->getNumElements(); ArrayType *NewType = ArrayType::get(EltTy, NewSize); // Create the new global variable. GlobalVariable *NG = new GlobalVariable(*DstGV->getParent(), NewType, SrcGV->isConstant(), DstGV->getLinkage(), /*init*/nullptr, /*name*/"", DstGV, DstGV->getThreadLocalMode(), DstGV->getType()->getAddressSpace()); // Propagate alignment, visibility and section info. copyGVAttributes(NG, DstGV); AppendingVarInfo AVI; AVI.NewGV = NG; AVI.DstInit = DstGV->getInitializer(); AVI.SrcInit = SrcGV->getInitializer(); AppendingVars.push_back(AVI); // Replace any uses of the two global variables with uses of the new // global. ValueMap[SrcGV] = ConstantExpr::getBitCast(NG, TypeMap.get(SrcGV->getType())); DstGV->replaceAllUsesWith(ConstantExpr::getBitCast(NG, DstGV->getType())); DstGV->eraseFromParent(); // Track the source variable so we don't try to link it. DoNotLinkFromSource.insert(SrcGV); return false; } bool ModuleLinker::linkGlobalValueProto(GlobalValue *SGV) { GlobalValue *DGV = getLinkedToGlobal(SGV); // Handle the ultra special appending linkage case first. if (DGV && DGV->hasAppendingLinkage()) return linkAppendingVarProto(cast<GlobalVariable>(DGV), cast<GlobalVariable>(SGV)); bool LinkFromSrc = true; Comdat *C = nullptr; GlobalValue::VisibilityTypes Visibility = SGV->getVisibility(); bool HasUnnamedAddr = SGV->hasUnnamedAddr(); if (const Comdat *SC = SGV->getComdat()) { Comdat::SelectionKind SK; std::tie(SK, LinkFromSrc) = ComdatsChosen[SC]; C = DstM->getOrInsertComdat(SC->getName()); C->setSelectionKind(SK); } else if (DGV) { if (shouldLinkFromSource(LinkFromSrc, *DGV, *SGV)) return true; } if (!LinkFromSrc) { // Track the source global so that we don't attempt to copy it over when // processing global initializers. DoNotLinkFromSource.insert(SGV); if (DGV) // Make sure to remember this mapping. ValueMap[SGV] = ConstantExpr::getBitCast(DGV, TypeMap.get(SGV->getType())); } if (DGV) { Visibility = isLessConstraining(Visibility, DGV->getVisibility()) ? DGV->getVisibility() : Visibility; HasUnnamedAddr = HasUnnamedAddr && DGV->hasUnnamedAddr(); } if (!LinkFromSrc && !DGV) return false; GlobalValue *NewGV; if (!LinkFromSrc) { NewGV = DGV; } else { // If the GV is to be lazily linked, don't create it just yet. // The ValueMaterializerTy will deal with creating it if it's used. if (!DGV && !OverrideFromSrc && (SGV->hasLocalLinkage() || SGV->hasLinkOnceLinkage() || SGV->hasAvailableExternallyLinkage())) { DoNotLinkFromSource.insert(SGV); return false; } NewGV = copyGlobalValueProto(TypeMap, *DstM, SGV); if (DGV && isa<Function>(DGV)) if (auto *NewF = dyn_cast<Function>(NewGV)) OverridingFunctions.insert(NewF); } NewGV->setUnnamedAddr(HasUnnamedAddr); NewGV->setVisibility(Visibility); if (auto *NewGO = dyn_cast<GlobalObject>(NewGV)) { if (C) NewGO->setComdat(C); if (DGV && DGV->hasCommonLinkage() && SGV->hasCommonLinkage()) NewGO->setAlignment(std::max(DGV->getAlignment(), SGV->getAlignment())); } if (auto *NewGVar = dyn_cast<GlobalVariable>(NewGV)) { auto *DGVar = dyn_cast_or_null<GlobalVariable>(DGV); auto *SGVar = dyn_cast<GlobalVariable>(SGV); if (DGVar && SGVar && DGVar->isDeclaration() && SGVar->isDeclaration() && (!DGVar->isConstant() || !SGVar->isConstant())) NewGVar->setConstant(false); } // Make sure to remember this mapping. if (NewGV != DGV) { if (DGV) { DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewGV, DGV->getType())); DGV->eraseFromParent(); } ValueMap[SGV] = NewGV; } return false; } static void getArrayElements(const Constant *C, SmallVectorImpl<Constant *> &Dest) { unsigned NumElements = cast<ArrayType>(C->getType())->getNumElements(); for (unsigned i = 0; i != NumElements; ++i) Dest.push_back(C->getAggregateElement(i)); } void ModuleLinker::linkAppendingVarInit(const AppendingVarInfo &AVI) { // Merge the initializer. SmallVector<Constant *, 16> DstElements; getArrayElements(AVI.DstInit, DstElements); SmallVector<Constant *, 16> SrcElements; getArrayElements(AVI.SrcInit, SrcElements); ArrayType *NewType = cast<ArrayType>(AVI.NewGV->getType()->getElementType()); StringRef Name = AVI.NewGV->getName(); bool IsNewStructor = (Name == "llvm.global_ctors" || Name == "llvm.global_dtors") && cast<StructType>(NewType->getElementType())->getNumElements() == 3; for (auto *V : SrcElements) { if (IsNewStructor) { Constant *Key = V->getAggregateElement(2); if (DoNotLinkFromSource.count(Key)) continue; } DstElements.push_back( MapValue(V, ValueMap, RF_None, &TypeMap, &ValMaterializer)); } if (IsNewStructor) { NewType = ArrayType::get(NewType->getElementType(), DstElements.size()); AVI.NewGV->mutateType(PointerType::get(NewType, 0)); } AVI.NewGV->setInitializer(ConstantArray::get(NewType, DstElements)); } /// Update the initializers in the Dest module now that all globals that may be /// referenced are in Dest. void ModuleLinker::linkGlobalInit(GlobalVariable &Dst, GlobalVariable &Src) { // Figure out what the initializer looks like in the dest module. Dst.setInitializer(MapValue(Src.getInitializer(), ValueMap, RF_None, &TypeMap, &ValMaterializer)); } /// Copy the source function over into the dest function and fix up references /// to values. At this point we know that Dest is an external function, and /// that Src is not. bool ModuleLinker::linkFunctionBody(Function &Dst, Function &Src) { assert(Dst.isDeclaration() && !Src.isDeclaration()); // Materialize if needed. if (std::error_code EC = Src.materialize()) return emitError(EC.message()); // Link in the prefix data. if (Src.hasPrefixData()) Dst.setPrefixData(MapValue(Src.getPrefixData(), ValueMap, RF_None, &TypeMap, &ValMaterializer)); // Link in the prologue data. if (Src.hasPrologueData()) Dst.setPrologueData(MapValue(Src.getPrologueData(), ValueMap, RF_None, &TypeMap, &ValMaterializer)); // Link in the personality function. if (Src.hasPersonalityFn()) Dst.setPersonalityFn(MapValue(Src.getPersonalityFn(), ValueMap, RF_None, &TypeMap, &ValMaterializer)); // Go through and convert function arguments over, remembering the mapping. Function::arg_iterator DI = Dst.arg_begin(); for (Argument &Arg : Src.args()) { DI->setName(Arg.getName()); // Copy the name over. // Add a mapping to our mapping. ValueMap[&Arg] = DI; ++DI; } // Copy over the metadata attachments. SmallVector<std::pair<unsigned, MDNode *>, 8> MDs; Src.getAllMetadata(MDs); for (const auto &I : MDs) Dst.setMetadata(I.first, MapMetadata(I.second, ValueMap, RF_None, &TypeMap, &ValMaterializer)); // Splice the body of the source function into the dest function. Dst.getBasicBlockList().splice(Dst.end(), Src.getBasicBlockList()); // At this point, all of the instructions and values of the function are now // copied over. The only problem is that they are still referencing values in // the Source function as operands. Loop through all of the operands of the // functions and patch them up to point to the local versions. for (BasicBlock &BB : Dst) for (Instruction &I : BB) RemapInstruction(&I, ValueMap, RF_IgnoreMissingEntries, &TypeMap, &ValMaterializer); // There is no need to map the arguments anymore. for (Argument &Arg : Src.args()) ValueMap.erase(&Arg); Src.dematerialize(); return false; } void ModuleLinker::linkAliasBody(GlobalAlias &Dst, GlobalAlias &Src) { Constant *Aliasee = Src.getAliasee(); Constant *Val = MapValue(Aliasee, ValueMap, RF_None, &TypeMap, &ValMaterializer); Dst.setAliasee(Val); } bool ModuleLinker::linkGlobalValueBody(GlobalValue &Src) { Value *Dst = ValueMap[&Src]; assert(Dst); if (auto *F = dyn_cast<Function>(&Src)) return linkFunctionBody(cast<Function>(*Dst), *F); if (auto *GVar = dyn_cast<GlobalVariable>(&Src)) { linkGlobalInit(cast<GlobalVariable>(*Dst), *GVar); return false; } linkAliasBody(cast<GlobalAlias>(*Dst), cast<GlobalAlias>(Src)); return false; } /// Insert all of the named MDNodes in Src into the Dest module. void ModuleLinker::linkNamedMDNodes() { const NamedMDNode *SrcModFlags = SrcM->getModuleFlagsMetadata(); for (const NamedMDNode &NMD : SrcM->named_metadata()) { // Don't link module flags here. Do them separately. if (&NMD == SrcModFlags) continue; NamedMDNode *DestNMD = DstM->getOrInsertNamedMetadata(NMD.getName()); // Add Src elements into Dest node. for (const MDNode *op : NMD.operands()) DestNMD->addOperand( MapMetadata(op, ValueMap, RF_None, &TypeMap, &ValMaterializer)); } } /// Drop DISubprograms that have been superseded. /// /// FIXME: this creates an asymmetric result: we strip functions from losing /// subprograms in DstM, but leave losing subprograms in SrcM. /// TODO: Remove this logic once the backend can correctly determine canonical /// subprograms. void ModuleLinker::stripReplacedSubprograms() { // Avoid quadratic runtime by returning early when there's nothing to do. if (OverridingFunctions.empty()) return; // Move the functions now, so the set gets cleared even on early returns. auto Functions = std::move(OverridingFunctions); OverridingFunctions.clear(); // Drop functions from subprograms if they've been overridden by the new // compile unit. NamedMDNode *CompileUnits = DstM->getNamedMetadata("llvm.dbg.cu"); if (!CompileUnits) return; for (unsigned I = 0, E = CompileUnits->getNumOperands(); I != E; ++I) { auto *CU = cast<DICompileUnit>(CompileUnits->getOperand(I)); assert(CU && "Expected valid compile unit"); for (DISubprogram *SP : CU->getSubprograms()) { if (!SP || !SP->getFunction() || !Functions.count(SP->getFunction())) continue; // Prevent DebugInfoFinder from tagging this as the canonical subprogram, // since the canonical one is in the incoming module. SP->replaceFunction(nullptr); } } } /// Merge the linker flags in Src into the Dest module. bool ModuleLinker::linkModuleFlagsMetadata() { // If the source module has no module flags, we are done. const NamedMDNode *SrcModFlags = SrcM->getModuleFlagsMetadata(); if (!SrcModFlags) return false; // If the destination module doesn't have module flags yet, then just copy // over the source module's flags. NamedMDNode *DstModFlags = DstM->getOrInsertModuleFlagsMetadata(); if (DstModFlags->getNumOperands() == 0) { for (unsigned I = 0, E = SrcModFlags->getNumOperands(); I != E; ++I) DstModFlags->addOperand(SrcModFlags->getOperand(I)); return false; } // First build a map of the existing module flags and requirements. DenseMap<MDString *, std::pair<MDNode *, unsigned>> Flags; SmallSetVector<MDNode*, 16> Requirements; for (unsigned I = 0, E = DstModFlags->getNumOperands(); I != E; ++I) { MDNode *Op = DstModFlags->getOperand(I); ConstantInt *Behavior = mdconst::extract<ConstantInt>(Op->getOperand(0)); MDString *ID = cast<MDString>(Op->getOperand(1)); if (Behavior->getZExtValue() == Module::Require) { Requirements.insert(cast<MDNode>(Op->getOperand(2))); } else { Flags[ID] = std::make_pair(Op, I); } } // Merge in the flags from the source module, and also collect its set of // requirements. bool HasErr = false; for (unsigned I = 0, E = SrcModFlags->getNumOperands(); I != E; ++I) { MDNode *SrcOp = SrcModFlags->getOperand(I); ConstantInt *SrcBehavior = mdconst::extract<ConstantInt>(SrcOp->getOperand(0)); MDString *ID = cast<MDString>(SrcOp->getOperand(1)); MDNode *DstOp; unsigned DstIndex; std::tie(DstOp, DstIndex) = Flags.lookup(ID); unsigned SrcBehaviorValue = SrcBehavior->getZExtValue(); // If this is a requirement, add it and continue. if (SrcBehaviorValue == Module::Require) { // If the destination module does not already have this requirement, add // it. if (Requirements.insert(cast<MDNode>(SrcOp->getOperand(2)))) { DstModFlags->addOperand(SrcOp); } continue; } // If there is no existing flag with this ID, just add it. if (!DstOp) { Flags[ID] = std::make_pair(SrcOp, DstModFlags->getNumOperands()); DstModFlags->addOperand(SrcOp); continue; } // Otherwise, perform a merge. ConstantInt *DstBehavior = mdconst::extract<ConstantInt>(DstOp->getOperand(0)); unsigned DstBehaviorValue = DstBehavior->getZExtValue(); // If either flag has override behavior, handle it first. if (DstBehaviorValue == Module::Override) { // Diagnose inconsistent flags which both have override behavior. if (SrcBehaviorValue == Module::Override && SrcOp->getOperand(2) != DstOp->getOperand(2)) { HasErr |= emitError("linking module flags '" + ID->getString() + "': IDs have conflicting override values"); } continue; } else if (SrcBehaviorValue == Module::Override) { // Update the destination flag to that of the source. DstModFlags->setOperand(DstIndex, SrcOp); Flags[ID].first = SrcOp; continue; } // Diagnose inconsistent merge behavior types. if (SrcBehaviorValue != DstBehaviorValue) { HasErr |= emitError("linking module flags '" + ID->getString() + "': IDs have conflicting behaviors"); continue; } auto replaceDstValue = [&](MDNode *New) { Metadata *FlagOps[] = {DstOp->getOperand(0), ID, New}; MDNode *Flag = MDNode::get(DstM->getContext(), FlagOps); DstModFlags->setOperand(DstIndex, Flag); Flags[ID].first = Flag; }; // Perform the merge for standard behavior types. switch (SrcBehaviorValue) { case Module::Require: case Module::Override: llvm_unreachable("not possible"); case Module::Error: { // Emit an error if the values differ. if (SrcOp->getOperand(2) != DstOp->getOperand(2)) { HasErr |= emitError("linking module flags '" + ID->getString() + "': IDs have conflicting values"); } continue; } case Module::Warning: { // Emit a warning if the values differ. if (SrcOp->getOperand(2) != DstOp->getOperand(2)) { emitWarning("linking module flags '" + ID->getString() + "': IDs have conflicting values"); } continue; } case Module::Append: { MDNode *DstValue = cast<MDNode>(DstOp->getOperand(2)); MDNode *SrcValue = cast<MDNode>(SrcOp->getOperand(2)); SmallVector<Metadata *, 8> MDs; MDs.reserve(DstValue->getNumOperands() + SrcValue->getNumOperands()); MDs.append(DstValue->op_begin(), DstValue->op_end()); MDs.append(SrcValue->op_begin(), SrcValue->op_end()); replaceDstValue(MDNode::get(DstM->getContext(), MDs)); break; } case Module::AppendUnique: { SmallSetVector<Metadata *, 16> Elts; MDNode *DstValue = cast<MDNode>(DstOp->getOperand(2)); MDNode *SrcValue = cast<MDNode>(SrcOp->getOperand(2)); Elts.insert(DstValue->op_begin(), DstValue->op_end()); Elts.insert(SrcValue->op_begin(), SrcValue->op_end()); replaceDstValue(MDNode::get(DstM->getContext(), makeArrayRef(Elts.begin(), Elts.end()))); break; } } } // Check all of the requirements. for (unsigned I = 0, E = Requirements.size(); I != E; ++I) { MDNode *Requirement = Requirements[I]; MDString *Flag = cast<MDString>(Requirement->getOperand(0)); Metadata *ReqValue = Requirement->getOperand(1); MDNode *Op = Flags[Flag].first; if (!Op || Op->getOperand(2) != ReqValue) { HasErr |= emitError("linking module flags '" + Flag->getString() + "': does not have the required value"); continue; } } return HasErr; } // This function returns true if the triples match. static bool triplesMatch(const Triple &T0, const Triple &T1) { // If vendor is apple, ignore the version number. if (T0.getVendor() == Triple::Apple) return T0.getArch() == T1.getArch() && T0.getSubArch() == T1.getSubArch() && T0.getVendor() == T1.getVendor() && T0.getOS() == T1.getOS(); return T0 == T1; } // This function returns the merged triple. static std::string mergeTriples(const Triple &SrcTriple, const Triple &DstTriple) { // If vendor is apple, pick the triple with the larger version number. if (SrcTriple.getVendor() == Triple::Apple) if (DstTriple.isOSVersionLT(SrcTriple)) return SrcTriple.str(); return DstTriple.str(); } bool ModuleLinker::run() { assert(DstM && "Null destination module"); assert(SrcM && "Null source module"); // Inherit the target data from the source module if the destination module // doesn't have one already. if (DstM->getDataLayout().isDefault()) DstM->setDataLayout(SrcM->getDataLayout()); if (SrcM->getDataLayout() != DstM->getDataLayout()) { emitWarning("Linking two modules of different data layouts: '" + SrcM->getModuleIdentifier() + "' is '" + SrcM->getDataLayoutStr() + "' whereas '" + DstM->getModuleIdentifier() + "' is '" + DstM->getDataLayoutStr() + "'\n"); } // Copy the target triple from the source to dest if the dest's is empty. if (DstM->getTargetTriple().empty() && !SrcM->getTargetTriple().empty()) DstM->setTargetTriple(SrcM->getTargetTriple()); Triple SrcTriple(SrcM->getTargetTriple()), DstTriple(DstM->getTargetTriple()); if (!SrcM->getTargetTriple().empty() && !triplesMatch(SrcTriple, DstTriple)) emitWarning("Linking two modules of different target triples: " + SrcM->getModuleIdentifier() + "' is '" + SrcM->getTargetTriple() + "' whereas '" + DstM->getModuleIdentifier() + "' is '" + DstM->getTargetTriple() + "'\n"); DstM->setTargetTriple(mergeTriples(SrcTriple, DstTriple)); // Append the module inline asm string. if (!SrcM->getModuleInlineAsm().empty()) { if (DstM->getModuleInlineAsm().empty()) DstM->setModuleInlineAsm(SrcM->getModuleInlineAsm()); else DstM->setModuleInlineAsm(DstM->getModuleInlineAsm()+"\n"+ SrcM->getModuleInlineAsm()); } // Loop over all of the linked values to compute type mappings. computeTypeMapping(); ComdatsChosen.clear(); for (const auto &SMEC : SrcM->getComdatSymbolTable()) { const Comdat &C = SMEC.getValue(); if (ComdatsChosen.count(&C)) continue; Comdat::SelectionKind SK; bool LinkFromSrc; if (getComdatResult(&C, SK, LinkFromSrc)) return true; ComdatsChosen[&C] = std::make_pair(SK, LinkFromSrc); } // Upgrade mismatched global arrays. upgradeMismatchedGlobals(); // Insert all of the globals in src into the DstM module... without linking // initializers (which could refer to functions not yet mapped over). for (GlobalVariable &GV : SrcM->globals()) if (linkGlobalValueProto(&GV)) return true; // Link the functions together between the two modules, without doing function // bodies... this just adds external function prototypes to the DstM // function... We do this so that when we begin processing function bodies, // all of the global values that may be referenced are available in our // ValueMap. for (Function &F :*SrcM) if (linkGlobalValueProto(&F)) return true; // If there were any aliases, link them now. for (GlobalAlias &GA : SrcM->aliases()) if (linkGlobalValueProto(&GA)) return true; for (const AppendingVarInfo &AppendingVar : AppendingVars) linkAppendingVarInit(AppendingVar); for (const auto &Entry : DstM->getComdatSymbolTable()) { const Comdat &C = Entry.getValue(); if (C.getSelectionKind() == Comdat::Any) continue; const GlobalValue *GV = SrcM->getNamedValue(C.getName()); if (GV) MapValue(GV, ValueMap, RF_None, &TypeMap, &ValMaterializer); } // Strip replaced subprograms before mapping any metadata -- so that we're // not changing metadata from the source module (note that // linkGlobalValueBody() eventually calls RemapInstruction() and therefore // MapMetadata()) -- but after linking global value protocols -- so that // OverridingFunctions has been built. stripReplacedSubprograms(); // Link in the function bodies that are defined in the source module into // DstM. for (Function &SF : *SrcM) { // Skip if no body (function is external). if (SF.isDeclaration()) continue; // Skip if not linking from source. if (DoNotLinkFromSource.count(&SF)) continue; if (linkGlobalValueBody(SF)) return true; } // Resolve all uses of aliases with aliasees. for (GlobalAlias &Src : SrcM->aliases()) { if (DoNotLinkFromSource.count(&Src)) continue; linkGlobalValueBody(Src); } // Remap all of the named MDNodes in Src into the DstM module. We do this // after linking GlobalValues so that MDNodes that reference GlobalValues // are properly remapped. linkNamedMDNodes(); // Merge the module flags into the DstM module. if (linkModuleFlagsMetadata()) return true; // Update the initializers in the DstM module now that all globals that may // be referenced are in DstM. for (GlobalVariable &Src : SrcM->globals()) { // Only process initialized GV's or ones not already in dest. if (!Src.hasInitializer() || DoNotLinkFromSource.count(&Src)) continue; linkGlobalValueBody(Src); } // Process vector of lazily linked in functions. while (!LazilyLinkGlobalValues.empty()) { GlobalValue *SGV = LazilyLinkGlobalValues.back(); LazilyLinkGlobalValues.pop_back(); assert(!SGV->isDeclaration() && "users should not pass down decls"); if (linkGlobalValueBody(*SGV)) return true; } return false; } Linker::StructTypeKeyInfo::KeyTy::KeyTy(ArrayRef<Type *> E, bool P) : ETypes(E), IsPacked(P) {} Linker::StructTypeKeyInfo::KeyTy::KeyTy(const StructType *ST) : ETypes(ST->elements()), IsPacked(ST->isPacked()) {} bool Linker::StructTypeKeyInfo::KeyTy::operator==(const KeyTy &That) const { if (IsPacked != That.IsPacked) return false; if (ETypes != That.ETypes) return false; return true; } bool Linker::StructTypeKeyInfo::KeyTy::operator!=(const KeyTy &That) const { return !this->operator==(That); } StructType *Linker::StructTypeKeyInfo::getEmptyKey() { return DenseMapInfo<StructType *>::getEmptyKey(); } StructType *Linker::StructTypeKeyInfo::getTombstoneKey() { return DenseMapInfo<StructType *>::getTombstoneKey(); } unsigned Linker::StructTypeKeyInfo::getHashValue(const KeyTy &Key) { return hash_combine(hash_combine_range(Key.ETypes.begin(), Key.ETypes.end()), Key.IsPacked); } unsigned Linker::StructTypeKeyInfo::getHashValue(const StructType *ST) { return getHashValue(KeyTy(ST)); } bool Linker::StructTypeKeyInfo::isEqual(const KeyTy &LHS, const StructType *RHS) { if (RHS == getEmptyKey() || RHS == getTombstoneKey()) return false; return LHS == KeyTy(RHS); } bool Linker::StructTypeKeyInfo::isEqual(const StructType *LHS, const StructType *RHS) { if (RHS == getEmptyKey()) return LHS == getEmptyKey(); if (RHS == getTombstoneKey()) return LHS == getTombstoneKey(); return KeyTy(LHS) == KeyTy(RHS); } void Linker::IdentifiedStructTypeSet::addNonOpaque(StructType *Ty) { assert(!Ty->isOpaque()); NonOpaqueStructTypes.insert(Ty); } void Linker::IdentifiedStructTypeSet::switchToNonOpaque(StructType *Ty) { assert(!Ty->isOpaque()); NonOpaqueStructTypes.insert(Ty); bool Removed = OpaqueStructTypes.erase(Ty); (void)Removed; assert(Removed); } void Linker::IdentifiedStructTypeSet::addOpaque(StructType *Ty) { assert(Ty->isOpaque()); OpaqueStructTypes.insert(Ty); } StructType * Linker::IdentifiedStructTypeSet::findNonOpaque(ArrayRef<Type *> ETypes, bool IsPacked) { Linker::StructTypeKeyInfo::KeyTy Key(ETypes, IsPacked); auto I = NonOpaqueStructTypes.find_as(Key); if (I == NonOpaqueStructTypes.end()) return nullptr; return *I; } bool Linker::IdentifiedStructTypeSet::hasType(StructType *Ty) { if (Ty->isOpaque()) return OpaqueStructTypes.count(Ty); auto I = NonOpaqueStructTypes.find(Ty); if (I == NonOpaqueStructTypes.end()) return false; return *I == Ty; } void Linker::init(Module *M, DiagnosticHandlerFunction DiagnosticHandler) { this->Composite = M; this->DiagnosticHandler = DiagnosticHandler; TypeFinder StructTypes; StructTypes.run(*M, true); for (StructType *Ty : StructTypes) { if (Ty->isOpaque()) IdentifiedStructTypes.addOpaque(Ty); else IdentifiedStructTypes.addNonOpaque(Ty); } } Linker::Linker(Module *M, DiagnosticHandlerFunction DiagnosticHandler) { init(M, DiagnosticHandler); } Linker::Linker(Module *M) { init(M, [this](const DiagnosticInfo &DI) { Composite->getContext().diagnose(DI); }); } Linker::~Linker() { } void Linker::deleteModule() { delete Composite; Composite = nullptr; } bool Linker::linkInModule(Module *Src, bool OverrideSymbols) { ModuleLinker TheLinker(Composite, IdentifiedStructTypes, Src, DiagnosticHandler, OverrideSymbols); bool RetCode = TheLinker.run(); Composite->dropTriviallyDeadConstantArrays(); return RetCode; } void Linker::setModule(Module *Dst) { init(Dst, DiagnosticHandler); } //===----------------------------------------------------------------------===// // LinkModules entrypoint. //===----------------------------------------------------------------------===// /// This function links two modules together, with the resulting Dest module /// modified to be the composite of the two input modules. If an error occurs, /// true is returned and ErrorMsg (if not null) is set to indicate the problem. /// Upon failure, the Dest module could be in a modified state, and shouldn't be /// relied on to be consistent. bool Linker::LinkModules(Module *Dest, Module *Src, DiagnosticHandlerFunction DiagnosticHandler) { Linker L(Dest, DiagnosticHandler); return L.linkInModule(Src); } bool Linker::LinkModules(Module *Dest, Module *Src) { Linker L(Dest); return L.linkInModule(Src); } //===----------------------------------------------------------------------===// // C API. //===----------------------------------------------------------------------===// LLVMBool LLVMLinkModules(LLVMModuleRef Dest, LLVMModuleRef Src, LLVMLinkerMode Unused, char **OutMessages) { Module *D = unwrap(Dest); std::string Message; raw_string_ostream Stream(Message); DiagnosticPrinterRawOStream DP(Stream); LLVMBool Result = Linker::LinkModules( D, unwrap(Src), [&](const DiagnosticInfo &DI) { DI.print(DP); }); if (OutMessages && Result) { Stream.flush(); #ifdef _WIN32 *OutMessages = _strdup(Message.c_str()); // HLSL Change for strdup #else *OutMessages = strdup(Message.c_str()); #endif } return Result; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Linker/CMakeLists.txt
add_llvm_library(LLVMLinker LinkModules.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Linker DEPENDS intrinsics_gen )
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Linker/LLVMBuild.txt
;===- ./lib/Linker/LLVMBuild.txt -------------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = Linker parent = Libraries required_libraries = Core Support TransformUtils
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/TargetMachineC.cpp
//===-- TargetMachine.cpp -------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the LLVM-C part of TargetMachine.h // //===----------------------------------------------------------------------===// #include "llvm-c/TargetMachine.h" #include "llvm-c/Core.h" #include "llvm-c/Target.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Module.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Support/CodeGen.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Support/Host.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetSubtargetInfo.h" #include <cassert> #include <cstdlib> #include <cstring> using namespace llvm; inline TargetMachine *unwrap(LLVMTargetMachineRef P) { return reinterpret_cast<TargetMachine*>(P); } inline Target *unwrap(LLVMTargetRef P) { return reinterpret_cast<Target*>(P); } inline LLVMTargetMachineRef wrap(const TargetMachine *P) { return reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P)); } inline LLVMTargetRef wrap(const Target * P) { return reinterpret_cast<LLVMTargetRef>(const_cast<Target*>(P)); } LLVMTargetRef LLVMGetFirstTarget() { if (TargetRegistry::targets().begin() == TargetRegistry::targets().end()) { return nullptr; } const Target *target = &*TargetRegistry::targets().begin(); return wrap(target); } LLVMTargetRef LLVMGetNextTarget(LLVMTargetRef T) { return wrap(unwrap(T)->getNext()); } LLVMTargetRef LLVMGetTargetFromName(const char *Name) { StringRef NameRef = Name; auto I = std::find_if( TargetRegistry::targets().begin(), TargetRegistry::targets().end(), [&](const Target &T) { return T.getName() == NameRef; }); return I != TargetRegistry::targets().end() ? wrap(&*I) : nullptr; } LLVMBool LLVMGetTargetFromTriple(const char* TripleStr, LLVMTargetRef *T, char **ErrorMessage) { std::string Error; *T = wrap(TargetRegistry::lookupTarget(TripleStr, Error)); if (!*T) { if (ErrorMessage) *ErrorMessage = strdup(Error.c_str()); return 1; } return 0; } const char * LLVMGetTargetName(LLVMTargetRef T) { return unwrap(T)->getName(); } const char * LLVMGetTargetDescription(LLVMTargetRef T) { return unwrap(T)->getShortDescription(); } LLVMBool LLVMTargetHasJIT(LLVMTargetRef T) { return unwrap(T)->hasJIT(); } LLVMBool LLVMTargetHasTargetMachine(LLVMTargetRef T) { return unwrap(T)->hasTargetMachine(); } LLVMBool LLVMTargetHasAsmBackend(LLVMTargetRef T) { return unwrap(T)->hasMCAsmBackend(); } LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T, const char* Triple, const char* CPU, const char* Features, LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc, LLVMCodeModel CodeModel) { Reloc::Model RM; switch (Reloc){ case LLVMRelocStatic: RM = Reloc::Static; break; case LLVMRelocPIC: RM = Reloc::PIC_; break; case LLVMRelocDynamicNoPic: RM = Reloc::DynamicNoPIC; break; default: RM = Reloc::Default; break; } CodeModel::Model CM = unwrap(CodeModel); CodeGenOpt::Level OL; switch (Level) { case LLVMCodeGenLevelNone: OL = CodeGenOpt::None; break; case LLVMCodeGenLevelLess: OL = CodeGenOpt::Less; break; case LLVMCodeGenLevelAggressive: OL = CodeGenOpt::Aggressive; break; default: OL = CodeGenOpt::Default; break; } TargetOptions opt; return wrap(unwrap(T)->createTargetMachine(Triple, CPU, Features, opt, RM, CM, OL)); } void LLVMDisposeTargetMachine(LLVMTargetMachineRef T) { delete unwrap(T); } LLVMTargetRef LLVMGetTargetMachineTarget(LLVMTargetMachineRef T) { const Target* target = &(unwrap(T)->getTarget()); return wrap(target); } char* LLVMGetTargetMachineTriple(LLVMTargetMachineRef T) { std::string StringRep = unwrap(T)->getTargetTriple().str(); return strdup(StringRep.c_str()); } char* LLVMGetTargetMachineCPU(LLVMTargetMachineRef T) { std::string StringRep = unwrap(T)->getTargetCPU(); return strdup(StringRep.c_str()); } char* LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T) { std::string StringRep = unwrap(T)->getTargetFeatureString(); return strdup(StringRep.c_str()); } LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) { return wrap(unwrap(T)->getDataLayout()); } void LLVMSetTargetMachineAsmVerbosity(LLVMTargetMachineRef T, LLVMBool VerboseAsm) { unwrap(T)->Options.MCOptions.AsmVerbose = VerboseAsm; } static LLVMBool LLVMTargetMachineEmit(LLVMTargetMachineRef T, LLVMModuleRef M, raw_pwrite_stream &OS, LLVMCodeGenFileType codegen, char **ErrorMessage) { TargetMachine* TM = unwrap(T); Module* Mod = unwrap(M); legacy::PassManager pass; std::string error; const DataLayout *td = TM->getDataLayout(); if (!td) { error = "No DataLayout in TargetMachine"; *ErrorMessage = strdup(error.c_str()); return true; } Mod->setDataLayout(*td); TargetMachine::CodeGenFileType ft; switch (codegen) { case LLVMAssemblyFile: ft = TargetMachine::CGFT_AssemblyFile; break; default: ft = TargetMachine::CGFT_ObjectFile; break; } if (TM->addPassesToEmitFile(pass, OS, ft)) { error = "TargetMachine can't emit a file of this type"; *ErrorMessage = strdup(error.c_str()); return true; } pass.run(*Mod); OS.flush(); return false; } LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, char* Filename, LLVMCodeGenFileType codegen, char** ErrorMessage) { std::error_code EC; raw_fd_ostream dest(Filename, EC, sys::fs::F_None); if (EC) { *ErrorMessage = strdup(EC.message().c_str()); return true; } bool Result = LLVMTargetMachineEmit(T, M, dest, codegen, ErrorMessage); dest.flush(); return Result; } LLVMBool LLVMTargetMachineEmitToMemoryBuffer(LLVMTargetMachineRef T, LLVMModuleRef M, LLVMCodeGenFileType codegen, char** ErrorMessage, LLVMMemoryBufferRef *OutMemBuf) { SmallString<0> CodeString; raw_svector_ostream OStream(CodeString); bool Result = LLVMTargetMachineEmit(T, M, OStream, codegen, ErrorMessage); OStream.flush(); StringRef Data = OStream.str(); *OutMemBuf = LLVMCreateMemoryBufferWithMemoryRangeCopy(Data.data(), Data.size(), ""); return Result; } #if 0 // HLSL Change Starts - DXIL has a fixed triple char *LLVMGetDefaultTargetTriple(void) { return strdup(sys::getDefaultTargetTriple().c_str()); } #endif // HLSL Change Ends void LLVMAddAnalysisPasses(LLVMTargetMachineRef T, LLVMPassManagerRef PM) { unwrap(PM)->add( createTargetTransformInfoWrapperPass(unwrap(T)->getTargetIRAnalysis())); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/TargetRecip.cpp
//===-------------------------- TargetRecip.cpp ---------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This class is used to customize machine-specific reciprocal estimate code // generation in a target-independent way. // If a target does not support operations in this specification, then code // generation will default to using supported operations. // //===----------------------------------------------------------------------===// #include "llvm/ADT/StringRef.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Target/TargetRecip.h" #include <map> using namespace llvm; // These are the names of the individual reciprocal operations. These are // the key strings for queries and command-line inputs. // In addition, the command-line interface recognizes the global parameters // "all", "none", and "default". static const char *RecipOps[] = { "divd", "divf", "vec-divd", "vec-divf", "sqrtd", "sqrtf", "vec-sqrtd", "vec-sqrtf", }; // The uninitialized state is needed for the enabled settings and refinement // steps because custom settings may arrive via the command-line before target // defaults are set. TargetRecip::TargetRecip() { unsigned NumStrings = llvm::array_lengthof(RecipOps); for (unsigned i = 0; i < NumStrings; ++i) RecipMap.insert(std::make_pair(RecipOps[i], RecipParams())); } static bool parseRefinementStep(const StringRef &In, size_t &Position, uint8_t &Value) { const char RefStepToken = ':'; Position = In.find(RefStepToken); if (Position == StringRef::npos) return false; StringRef RefStepString = In.substr(Position + 1); // Allow exactly one numeric character for the additional refinement // step parameter. if (RefStepString.size() == 1) { char RefStepChar = RefStepString[0]; if (RefStepChar >= '0' && RefStepChar <= '9') { Value = RefStepChar - '0'; return true; } } report_fatal_error("Invalid refinement step for -recip."); } bool TargetRecip::parseGlobalParams(const std::string &Arg) { StringRef ArgSub = Arg; // Look for an optional setting of the number of refinement steps needed // for this type of reciprocal operation. size_t RefPos; uint8_t RefSteps; StringRef RefStepString; if (parseRefinementStep(ArgSub, RefPos, RefSteps)) { // Split the string for further processing. RefStepString = ArgSub.substr(RefPos + 1); ArgSub = ArgSub.substr(0, RefPos); } bool Enable; bool UseDefaults; if (ArgSub == "all") { UseDefaults = false; Enable = true; } else if (ArgSub == "none") { UseDefaults = false; Enable = false; } else if (ArgSub == "default") { UseDefaults = true; } else { // Any other string is invalid or an individual setting. return false; } // All enable values will be initialized to target defaults if 'default' was // specified. if (!UseDefaults) for (auto &KV : RecipMap) KV.second.Enabled = Enable; // Custom refinement count was specified with all, none, or default. if (!RefStepString.empty()) for (auto &KV : RecipMap) KV.second.RefinementSteps = RefSteps; return true; } void TargetRecip::parseIndividualParams(const std::vector<std::string> &Args) { static const char DisabledPrefix = '!'; unsigned NumArgs = Args.size(); for (unsigned i = 0; i != NumArgs; ++i) { StringRef Val = Args[i]; bool IsDisabled = Val[0] == DisabledPrefix; // Ignore the disablement token for string matching. if (IsDisabled) Val = Val.substr(1); size_t RefPos; uint8_t RefSteps; StringRef RefStepString; if (parseRefinementStep(Val, RefPos, RefSteps)) { // Split the string for further processing. RefStepString = Val.substr(RefPos + 1); Val = Val.substr(0, RefPos); } RecipIter Iter = RecipMap.find(Val); if (Iter == RecipMap.end()) { // Try again specifying float suffix. Iter = RecipMap.find(Val.str() + 'f'); if (Iter == RecipMap.end()) { Iter = RecipMap.find(Val.str() + 'd'); assert(Iter == RecipMap.end() && "Float entry missing from map"); report_fatal_error("Invalid option for -recip."); } // The option was specified without a float or double suffix. if (RecipMap[Val.str() + 'd'].Enabled != Uninitialized) { // Make sure that the double entry was not already specified. // The float entry will be checked below. report_fatal_error("Duplicate option for -recip."); } } if (Iter->second.Enabled != Uninitialized) report_fatal_error("Duplicate option for -recip."); // Mark the matched option as found. Do not allow duplicate specifiers. Iter->second.Enabled = !IsDisabled; if (!RefStepString.empty()) Iter->second.RefinementSteps = RefSteps; // If the precision was not specified, the double entry is also initialized. if (Val.back() != 'f' && Val.back() != 'd') { RecipMap[Val.str() + 'd'].Enabled = !IsDisabled; if (!RefStepString.empty()) RecipMap[Val.str() + 'd'].RefinementSteps = RefSteps; } } } TargetRecip::TargetRecip(const std::vector<std::string> &Args) : TargetRecip() { unsigned NumArgs = Args.size(); // Check if "all", "default", or "none" was specified. if (NumArgs == 1 && parseGlobalParams(Args[0])) return; parseIndividualParams(Args); } bool TargetRecip::isEnabled(const StringRef &Key) const { ConstRecipIter Iter = RecipMap.find(Key); assert(Iter != RecipMap.end() && "Unknown name for reciprocal map"); assert(Iter->second.Enabled != Uninitialized && "Enablement setting was not initialized"); return Iter->second.Enabled; } unsigned TargetRecip::getRefinementSteps(const StringRef &Key) const { ConstRecipIter Iter = RecipMap.find(Key); assert(Iter != RecipMap.end() && "Unknown name for reciprocal map"); assert(Iter->second.RefinementSteps != Uninitialized && "Refinement step setting was not initialized"); return Iter->second.RefinementSteps; } /// Custom settings (previously initialized values) override target defaults. void TargetRecip::setDefaults(const StringRef &Key, bool Enable, unsigned RefSteps) { if (Key == "all") { for (auto &KV : RecipMap) { RecipParams &RP = KV.second; if (RP.Enabled == Uninitialized) RP.Enabled = Enable; if (RP.RefinementSteps == Uninitialized) RP.RefinementSteps = RefSteps; } } else { RecipParams &RP = RecipMap[Key]; if (RP.Enabled == Uninitialized) RP.Enabled = Enable; if (RP.RefinementSteps == Uninitialized) RP.RefinementSteps = RefSteps; } } bool TargetRecip::operator==(const TargetRecip &Other) const { for (const auto &KV : RecipMap) { const StringRef &Op = KV.first; const RecipParams &RP = KV.second; const RecipParams &OtherRP = Other.RecipMap.find(Op)->second; if (RP.RefinementSteps != OtherRP.RefinementSteps) return false; if (RP.Enabled != OtherRP.Enabled) return false; } return true; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/TargetSubtargetInfo.cpp
//===-- TargetSubtargetInfo.cpp - General Target Information ---------------==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file describes the general parts of a Subtarget. // //===----------------------------------------------------------------------===// #include "llvm/Support/CommandLine.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; //--------------------------------------------------------------------------- // TargetSubtargetInfo Class // TargetSubtargetInfo::TargetSubtargetInfo( const Triple &TT, StringRef CPU, StringRef FS, ArrayRef<SubtargetFeatureKV> PF, ArrayRef<SubtargetFeatureKV> PD, const SubtargetInfoKV *ProcSched, const MCWriteProcResEntry *WPR, const MCWriteLatencyEntry *WL, const MCReadAdvanceEntry *RA, const InstrStage *IS, const unsigned *OC, const unsigned *FP) : MCSubtargetInfo(TT, CPU, FS, PF, PD, ProcSched, WPR, WL, RA, IS, OC, FP) { } TargetSubtargetInfo::~TargetSubtargetInfo() {} bool TargetSubtargetInfo::enableAtomicExpand() const { return true; } bool TargetSubtargetInfo::enableMachineScheduler() const { return false; } bool TargetSubtargetInfo::enableJoinGlobalCopies() const { return enableMachineScheduler(); } bool TargetSubtargetInfo::enableRALocalReassignment( CodeGenOpt::Level OptLevel) const { return true; } bool TargetSubtargetInfo::enablePostRAScheduler() const { return getSchedModel().PostRAScheduler; } bool TargetSubtargetInfo::useAA() const { return false; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/TargetMachine.cpp
//===-- TargetMachine.cpp - General Target Information ---------------------==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file describes the general parts of a Target machine. // //===----------------------------------------------------------------------===// #include "llvm/Target/TargetMachine.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Mangler.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCCodeGenInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCTargetOptions.h" #include "llvm/MC/SectionKind.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Support/CommandLine.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; //--------------------------------------------------------------------------- // TargetMachine Class // TargetMachine::TargetMachine(const Target &T, StringRef DataLayoutString, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options) : TheTarget(T), DL(DataLayoutString), TargetTriple(TT), TargetCPU(CPU), TargetFS(FS), CodeGenInfo(nullptr), AsmInfo(nullptr), MRI(nullptr), MII(nullptr), STI(nullptr), RequireStructuredCFG(false), Options(Options) {} TargetMachine::~TargetMachine() { delete CodeGenInfo; delete AsmInfo; delete MRI; delete MII; delete STI; } /// \brief Reset the target options based on the function's attributes. // FIXME: This function needs to go away for a number of reasons: // a) global state on the TargetMachine is terrible in general, // b) there's no default state here to keep, // c) these target options should be passed only on the function // and not on the TargetMachine (via TargetOptions) at all. void TargetMachine::resetTargetOptions(const Function &F) const { #define RESET_OPTION(X, Y) \ do { \ if (F.hasFnAttribute(Y)) \ Options.X = (F.getFnAttribute(Y).getValueAsString() == "true"); \ } while (0) RESET_OPTION(LessPreciseFPMADOption, "less-precise-fpmad"); RESET_OPTION(UnsafeFPMath, "unsafe-fp-math"); RESET_OPTION(NoInfsFPMath, "no-infs-fp-math"); RESET_OPTION(NoNaNsFPMath, "no-nans-fp-math"); } /// getRelocationModel - Returns the code generation relocation model. The /// choices are static, PIC, and dynamic-no-pic, and target default. Reloc::Model TargetMachine::getRelocationModel() const { if (!CodeGenInfo) return Reloc::Default; return CodeGenInfo->getRelocationModel(); } /// getCodeModel - Returns the code model. The choices are small, kernel, /// medium, large, and target default. CodeModel::Model TargetMachine::getCodeModel() const { if (!CodeGenInfo) return CodeModel::Default; return CodeGenInfo->getCodeModel(); } /// Get the IR-specified TLS model for Var. static TLSModel::Model getSelectedTLSModel(const GlobalValue *GV) { switch (GV->getThreadLocalMode()) { case GlobalVariable::NotThreadLocal: llvm_unreachable("getSelectedTLSModel for non-TLS variable"); break; case GlobalVariable::GeneralDynamicTLSModel: return TLSModel::GeneralDynamic; case GlobalVariable::LocalDynamicTLSModel: return TLSModel::LocalDynamic; case GlobalVariable::InitialExecTLSModel: return TLSModel::InitialExec; case GlobalVariable::LocalExecTLSModel: return TLSModel::LocalExec; } llvm_unreachable("invalid TLS model"); } TLSModel::Model TargetMachine::getTLSModel(const GlobalValue *GV) const { bool isLocal = GV->hasLocalLinkage(); bool isDeclaration = GV->isDeclaration(); bool isPIC = getRelocationModel() == Reloc::PIC_; bool isPIE = Options.PositionIndependentExecutable; // FIXME: what should we do for protected and internal visibility? // For variables, is internal different from hidden? bool isHidden = GV->hasHiddenVisibility(); TLSModel::Model Model; if (isPIC && !isPIE) { if (isLocal || isHidden) Model = TLSModel::LocalDynamic; else Model = TLSModel::GeneralDynamic; } else { if (!isDeclaration || isHidden) Model = TLSModel::LocalExec; else Model = TLSModel::InitialExec; } // If the user specified a more specific model, use that. TLSModel::Model SelectedModel = getSelectedTLSModel(GV); if (SelectedModel > Model) return SelectedModel; return Model; } /// getOptLevel - Returns the optimization level: None, Less, /// Default, or Aggressive. CodeGenOpt::Level TargetMachine::getOptLevel() const { if (!CodeGenInfo) return CodeGenOpt::Default; return CodeGenInfo->getOptLevel(); } void TargetMachine::setOptLevel(CodeGenOpt::Level Level) const { if (CodeGenInfo) CodeGenInfo->setOptLevel(Level); } TargetIRAnalysis TargetMachine::getTargetIRAnalysis() { return TargetIRAnalysis([](Function &F) { return TargetTransformInfo(F.getParent()->getDataLayout()); }); } static bool canUsePrivateLabel(const MCAsmInfo &AsmInfo, const MCSection &Section) { if (!AsmInfo.isSectionAtomizableBySymbols(Section)) return true; // If it is not dead stripped, it is safe to use private labels. const MCSectionMachO &SMO = cast<MCSectionMachO>(Section); if (SMO.hasAttribute(MachO::S_ATTR_NO_DEAD_STRIP)) return true; return false; } void TargetMachine::getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV, Mangler &Mang, bool MayAlwaysUsePrivate) const { if (MayAlwaysUsePrivate || !GV->hasPrivateLinkage()) { // Simple case: If GV is not private, it is not important to find out if // private labels are legal in this case or not. Mang.getNameWithPrefix(Name, GV, false); return; } SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, *this); const TargetLoweringObjectFile *TLOF = getObjFileLowering(); const MCSection *TheSection = TLOF->SectionForGlobal(GV, GVKind, Mang, *this); bool CannotUsePrivateLabel = !canUsePrivateLabel(*AsmInfo, *TheSection); TLOF->getNameWithPrefix(Name, GV, CannotUsePrivateLabel, Mang, *this); } MCSymbol *TargetMachine::getSymbol(const GlobalValue *GV, Mangler &Mang) const { SmallString<128> NameStr; getNameWithPrefix(NameStr, GV, Mang); const TargetLoweringObjectFile *TLOF = getObjFileLowering(); return TLOF->getContext().getOrCreateSymbol(NameStr); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/Target.cpp
//===-- Target.cpp --------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the common infrastructure (including C bindings) for // libLLVMTarget.a, which implements target information. // //===----------------------------------------------------------------------===// #include "llvm-c/Target.h" #include "llvm-c/Initialization.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Value.h" #include "llvm/InitializePasses.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include <cstring> using namespace llvm; inline TargetLibraryInfoImpl *unwrap(LLVMTargetLibraryInfoRef P) { return reinterpret_cast<TargetLibraryInfoImpl*>(P); } inline LLVMTargetLibraryInfoRef wrap(const TargetLibraryInfoImpl *P) { TargetLibraryInfoImpl *X = const_cast<TargetLibraryInfoImpl*>(P); return reinterpret_cast<LLVMTargetLibraryInfoRef>(X); } void llvm::initializeTarget(PassRegistry &Registry) { initializeTargetLibraryInfoWrapperPassPass(Registry); initializeTargetTransformInfoWrapperPassPass(Registry); } void LLVMInitializeTarget(LLVMPassRegistryRef R) { initializeTarget(*unwrap(R)); } LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) { return wrap(new DataLayout(StringRep)); } void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) { } void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI, LLVMPassManagerRef PM) { unwrap(PM)->add(new TargetLibraryInfoWrapperPass(*unwrap(TLI))); } char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef TD) { std::string StringRep = unwrap(TD)->getStringRepresentation(); return _strdup(StringRep.c_str()); // HLSL Change strdup to _strdup } LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef TD) { return unwrap(TD)->isLittleEndian() ? LLVMLittleEndian : LLVMBigEndian; } unsigned LLVMPointerSize(LLVMTargetDataRef TD) { return unwrap(TD)->getPointerSize(0); } unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) { return unwrap(TD)->getPointerSize(AS); } LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) { return wrap(unwrap(TD)->getIntPtrType(getGlobalContext())); } LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) { return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), AS)); } LLVMTypeRef LLVMIntPtrTypeInContext(LLVMContextRef C, LLVMTargetDataRef TD) { return wrap(unwrap(TD)->getIntPtrType(*unwrap(C))); } LLVMTypeRef LLVMIntPtrTypeForASInContext(LLVMContextRef C, LLVMTargetDataRef TD, unsigned AS) { return wrap(unwrap(TD)->getIntPtrType(*unwrap(C), AS)); } unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getTypeSizeInBits(unwrap(Ty)); } unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getTypeStoreSize(unwrap(Ty)); } unsigned long long LLVMABISizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getTypeAllocSize(unwrap(Ty)); } unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getABITypeAlignment(unwrap(Ty)); } unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getABITypeAlignment(unwrap(Ty)); } unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getPrefTypeAlignment(unwrap(Ty)); } unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef TD, LLVMValueRef GlobalVar) { return unwrap(TD)->getPreferredAlignment(unwrap<GlobalVariable>(GlobalVar)); } unsigned LLVMElementAtOffset(LLVMTargetDataRef TD, LLVMTypeRef StructTy, unsigned long long Offset) { StructType *STy = unwrap<StructType>(StructTy); return unwrap(TD)->getStructLayout(STy)->getElementContainingOffset(Offset); } unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef TD, LLVMTypeRef StructTy, unsigned Element) { StructType *STy = unwrap<StructType>(StructTy); return unwrap(TD)->getStructLayout(STy)->getElementOffset(Element); } void LLVMDisposeTargetData(LLVMTargetDataRef TD) { delete unwrap(TD); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/CMakeLists.txt
list(APPEND LLVM_COMMON_DEPENDS intrinsics_gen) add_llvm_library(LLVMTarget Target.cpp TargetIntrinsicInfo.cpp TargetLoweringObjectFile.cpp TargetMachine.cpp TargetMachineC.cpp TargetRecip.cpp TargetSubtargetInfo.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Target ) foreach(t ${LLVM_TARGETS_TO_BUILD}) if (NOT t STREQUAL "None") # HLSL Change message(STATUS "Targeting ${t}") add_subdirectory(${t}) endif() # HLSL Change endforeach()
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/LLVMBuild.txt
;===- ./lib/Target/LLVMBuild.txt -------------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; ; Please keep these as one per line so that out-of-tree merges ; will typically require only insertion of a line. [common] subdirectories = # HLSL Change: remove ARM, AArch64, BPF, CppBackend, Hexagon, MSP430, Mips, PowerPC, Spac, SystemZ, WebAssembly, X86, XCore, NVPTX, AMDGPU ; This is a special group whose required libraries are extended (by llvm-build) ; with the best execution engine (the native JIT, if available, or the ; interpreter). [component_0] type = LibraryGroup name = Engine parent = Libraries ; This is a special group whose required libraries are extended (by llvm-build) ; with the configured native target, if any. [component_1] type = LibraryGroup name = Native parent = Libraries ; This is a special group whose required libraries are extended (by llvm-build) ; with the configured native code generator, if any. [component_2] type = LibraryGroup name = NativeCodeGen parent = Libraries ; The component for the actual target library itself. [component_3] type = Library name = Target parent = Libraries required_libraries = Analysis Core Support ; MC - HLSL Change ; This is a special group whose required libraries are extended (by llvm-build) ; with every built target, which makes it easy for tools to include every ; target. [component_4] type = LibraryGroup name = all-targets parent = Libraries
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/README.txt
Target Independent Opportunities: //===---------------------------------------------------------------------===// We should recognized various "overflow detection" idioms and translate them into llvm.uadd.with.overflow and similar intrinsics. Here is a multiply idiom: unsigned int mul(unsigned int a,unsigned int b) { if ((unsigned long long)a*b>0xffffffff) exit(0); return a*b; } The legalization code for mul-with-overflow needs to be made more robust before this can be implemented though. //===---------------------------------------------------------------------===// Get the C front-end to expand hypot(x,y) -> llvm.sqrt(x*x+y*y) when errno and precision don't matter (ffastmath). Misc/mandel will like this. :) This isn't safe in general, even on darwin. See the libm implementation of hypot for examples (which special case when x/y are exactly zero to get signed zeros etc right). //===---------------------------------------------------------------------===// On targets with expensive 64-bit multiply, we could LSR this: for (i = ...; ++i) { x = 1ULL << i; into: long long tmp = 1; for (i = ...; ++i, tmp+=tmp) x = tmp; This would be a win on ppc32, but not x86 or ppc64. //===---------------------------------------------------------------------===// Shrink: (setlt (loadi32 P), 0) -> (setlt (loadi8 Phi), 0) //===---------------------------------------------------------------------===// Reassociate should turn things like: int factorial(int X) { return X*X*X*X*X*X*X*X; } into llvm.powi calls, allowing the code generator to produce balanced multiplication trees. First, the intrinsic needs to be extended to support integers, and second the code generator needs to be enhanced to lower these to multiplication trees. //===---------------------------------------------------------------------===// Interesting? testcase for add/shift/mul reassoc: int bar(int x, int y) { return x*x*x+y+x*x*x*x*x*y*y*y*y; } int foo(int z, int n) { return bar(z, n) + bar(2*z, 2*n); } This is blocked on not handling X*X*X -> powi(X, 3) (see note above). The issue is that we end up getting t = 2*X s = t*t and don't turn this into 4*X*X, which is the same number of multiplies and is canonical, because the 2*X has multiple uses. Here's a simple example: define i32 @test15(i32 %X1) { %B = mul i32 %X1, 47 ; X1*47 %C = mul i32 %B, %B ret i32 %C } //===---------------------------------------------------------------------===// Reassociate should handle the example in GCC PR16157: extern int a0, a1, a2, a3, a4; extern int b0, b1, b2, b3, b4; void f () { /* this can be optimized to four additions... */ b4 = a4 + a3 + a2 + a1 + a0; b3 = a3 + a2 + a1 + a0; b2 = a2 + a1 + a0; b1 = a1 + a0; } This requires reassociating to forms of expressions that are already available, something that reassoc doesn't think about yet. //===---------------------------------------------------------------------===// These two functions should generate the same code on big-endian systems: int g(int *j,int *l) { return memcmp(j,l,4); } int h(int *j, int *l) { return *j - *l; } this could be done in SelectionDAGISel.cpp, along with other special cases, for 1,2,4,8 bytes. //===---------------------------------------------------------------------===// It would be nice to revert this patch: http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20060213/031986.html And teach the dag combiner enough to simplify the code expanded before legalize. It seems plausible that this knowledge would let it simplify other stuff too. //===---------------------------------------------------------------------===// For vector types, DataLayout.cpp::getTypeInfo() returns alignment that is equal to the type size. It works but can be overly conservative as the alignment of specific vector types are target dependent. //===---------------------------------------------------------------------===// We should produce an unaligned load from code like this: v4sf example(float *P) { return (v4sf){P[0], P[1], P[2], P[3] }; } //===---------------------------------------------------------------------===// Add support for conditional increments, and other related patterns. Instead of: movl 136(%esp), %eax cmpl $0, %eax je LBB16_2 #cond_next LBB16_1: #cond_true incl _foo LBB16_2: #cond_next emit: movl _foo, %eax cmpl $1, %edi sbbl $-1, %eax movl %eax, _foo //===---------------------------------------------------------------------===// Combine: a = sin(x), b = cos(x) into a,b = sincos(x). Expand these to calls of sin/cos and stores: double sincos(double x, double *sin, double *cos); float sincosf(float x, float *sin, float *cos); long double sincosl(long double x, long double *sin, long double *cos); Doing so could allow SROA of the destination pointers. See also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=17687 This is now easily doable with MRVs. We could even make an intrinsic for this if anyone cared enough about sincos. //===---------------------------------------------------------------------===// quantum_sigma_x in 462.libquantum contains the following loop: for(i=0; i<reg->size; i++) { /* Flip the target bit of each basis state */ reg->node[i].state ^= ((MAX_UNSIGNED) 1 << target); } Where MAX_UNSIGNED/state is a 64-bit int. On a 32-bit platform it would be just so cool to turn it into something like: long long Res = ((MAX_UNSIGNED) 1 << target); if (target < 32) { for(i=0; i<reg->size; i++) reg->node[i].state ^= Res & 0xFFFFFFFFULL; } else { for(i=0; i<reg->size; i++) reg->node[i].state ^= Res & 0xFFFFFFFF00000000ULL } ... which would only do one 32-bit XOR per loop iteration instead of two. It would also be nice to recognize the reg->size doesn't alias reg->node[i], but this requires TBAA. //===---------------------------------------------------------------------===// This isn't recognized as bswap by instcombine (yes, it really is bswap): unsigned long reverse(unsigned v) { unsigned t; t = v ^ ((v << 16) | (v >> 16)); t &= ~0xff0000; v = (v << 24) | (v >> 8); return v ^ (t >> 8); } //===---------------------------------------------------------------------===// [LOOP DELETION] We don't delete this output free loop, because trip count analysis doesn't realize that it is finite (if it were infinite, it would be undefined). Not having this blocks Loop Idiom from matching strlen and friends. void foo(char *C) { int x = 0; while (*C) ++x,++C; } //===---------------------------------------------------------------------===// [LOOP RECOGNITION] These idioms should be recognized as popcount (see PR1488): unsigned countbits_slow(unsigned v) { unsigned c; for (c = 0; v; v >>= 1) c += v & 1; return c; } unsigned int popcount(unsigned int input) { unsigned int count = 0; for (unsigned int i = 0; i < 4 * 8; i++) count += (input >> i) & i; return count; } This should be recognized as CLZ: rdar://8459039 unsigned clz_a(unsigned a) { int i; for (i=0;i<32;i++) if (a & (1<<(31-i))) return i; return 32; } This sort of thing should be added to the loop idiom pass. //===---------------------------------------------------------------------===// These should turn into single 16-bit (unaligned?) loads on little/big endian processors. unsigned short read_16_le(const unsigned char *adr) { return adr[0] | (adr[1] << 8); } unsigned short read_16_be(const unsigned char *adr) { return (adr[0] << 8) | adr[1]; } //===---------------------------------------------------------------------===// -instcombine should handle this transform: icmp pred (sdiv X / C1 ), C2 when X, C1, and C2 are unsigned. Similarly for udiv and signed operands. Currently InstCombine avoids this transform but will do it when the signs of the operands and the sign of the divide match. See the FIXME in InstructionCombining.cpp in the visitSetCondInst method after the switch case for Instruction::UDiv (around line 4447) for more details. The SingleSource/Benchmarks/Shootout-C++/hash and hash2 tests have examples of this construct. //===---------------------------------------------------------------------===// [LOOP OPTIMIZATION] SingleSource/Benchmarks/Misc/dt.c shows several interesting optimization opportunities in its double_array_divs_variable function: it needs loop interchange, memory promotion (which LICM already does), vectorization and variable trip count loop unrolling (since it has a constant trip count). ICC apparently produces this very nice code with -ffast-math: ..B1.70: # Preds ..B1.70 ..B1.69 mulpd %xmm0, %xmm1 #108.2 mulpd %xmm0, %xmm1 #108.2 mulpd %xmm0, %xmm1 #108.2 mulpd %xmm0, %xmm1 #108.2 addl $8, %edx # cmpl $131072, %edx #108.2 jb ..B1.70 # Prob 99% #108.2 It would be better to count down to zero, but this is a lot better than what we do. //===---------------------------------------------------------------------===// Consider: typedef unsigned U32; typedef unsigned long long U64; int test (U32 *inst, U64 *regs) { U64 effective_addr2; U32 temp = *inst; int r1 = (temp >> 20) & 0xf; int b2 = (temp >> 16) & 0xf; effective_addr2 = temp & 0xfff; if (b2) effective_addr2 += regs[b2]; b2 = (temp >> 12) & 0xf; if (b2) effective_addr2 += regs[b2]; effective_addr2 &= regs[4]; if ((effective_addr2 & 3) == 0) return 1; return 0; } Note that only the low 2 bits of effective_addr2 are used. On 32-bit systems, we don't eliminate the computation of the top half of effective_addr2 because we don't have whole-function selection dags. On x86, this means we use one extra register for the function when effective_addr2 is declared as U64 than when it is declared U32. PHI Slicing could be extended to do this. //===---------------------------------------------------------------------===// Tail call elim should be more aggressive, checking to see if the call is followed by an uncond branch to an exit block. ; This testcase is due to tail-duplication not wanting to copy the return ; instruction into the terminating blocks because there was other code ; optimized out of the function after the taildup happened. ; RUN: llvm-as < %s | opt -tailcallelim | llvm-dis | not grep call define i32 @t4(i32 %a) { entry: %tmp.1 = and i32 %a, 1 ; <i32> [#uses=1] %tmp.2 = icmp ne i32 %tmp.1, 0 ; <i1> [#uses=1] br i1 %tmp.2, label %then.0, label %else.0 then.0: ; preds = %entry %tmp.5 = add i32 %a, -1 ; <i32> [#uses=1] %tmp.3 = call i32 @t4( i32 %tmp.5 ) ; <i32> [#uses=1] br label %return else.0: ; preds = %entry %tmp.7 = icmp ne i32 %a, 0 ; <i1> [#uses=1] br i1 %tmp.7, label %then.1, label %return then.1: ; preds = %else.0 %tmp.11 = add i32 %a, -2 ; <i32> [#uses=1] %tmp.9 = call i32 @t4( i32 %tmp.11 ) ; <i32> [#uses=1] br label %return return: ; preds = %then.1, %else.0, %then.0 %result.0 = phi i32 [ 0, %else.0 ], [ %tmp.3, %then.0 ], [ %tmp.9, %then.1 ] ret i32 %result.0 } //===---------------------------------------------------------------------===// Tail recursion elimination should handle: int pow2m1(int n) { if (n == 0) return 0; return 2 * pow2m1 (n - 1) + 1; } Also, multiplies can be turned into SHL's, so they should be handled as if they were associative. "return foo() << 1" can be tail recursion eliminated. //===---------------------------------------------------------------------===// Argument promotion should promote arguments for recursive functions, like this: ; RUN: llvm-as < %s | opt -argpromotion | llvm-dis | grep x.val define internal i32 @foo(i32* %x) { entry: %tmp = load i32* %x ; <i32> [#uses=0] %tmp.foo = call i32 @foo( i32* %x ) ; <i32> [#uses=1] ret i32 %tmp.foo } define i32 @bar(i32* %x) { entry: %tmp3 = call i32 @foo( i32* %x ) ; <i32> [#uses=1] ret i32 %tmp3 } //===---------------------------------------------------------------------===// We should investigate an instruction sinking pass. Consider this silly example in pic mode: #include <assert.h> void foo(int x) { assert(x); //... } we compile this to: _foo: subl $28, %esp call "L1$pb" "L1$pb": popl %eax cmpl $0, 32(%esp) je LBB1_2 # cond_true LBB1_1: # return # ... addl $28, %esp ret LBB1_2: # cond_true ... The PIC base computation (call+popl) is only used on one path through the code, but is currently always computed in the entry block. It would be better to sink the picbase computation down into the block for the assertion, as it is the only one that uses it. This happens for a lot of code with early outs. Another example is loads of arguments, which are usually emitted into the entry block on targets like x86. If not used in all paths through a function, they should be sunk into the ones that do. In this case, whole-function-isel would also handle this. //===---------------------------------------------------------------------===// Investigate lowering of sparse switch statements into perfect hash tables: http://burtleburtle.net/bob/hash/perfect.html //===---------------------------------------------------------------------===// We should turn things like "load+fabs+store" and "load+fneg+store" into the corresponding integer operations. On a yonah, this loop: double a[256]; void foo() { int i, b; for (b = 0; b < 10000000; b++) for (i = 0; i < 256; i++) a[i] = -a[i]; } is twice as slow as this loop: long long a[256]; void foo() { int i, b; for (b = 0; b < 10000000; b++) for (i = 0; i < 256; i++) a[i] ^= (1ULL << 63); } and I suspect other processors are similar. On X86 in particular this is a big win because doing this with integers allows the use of read/modify/write instructions. //===---------------------------------------------------------------------===// DAG Combiner should try to combine small loads into larger loads when profitable. For example, we compile this C++ example: struct THotKey { short Key; bool Control; bool Shift; bool Alt; }; extern THotKey m_HotKey; THotKey GetHotKey () { return m_HotKey; } into (-m64 -O3 -fno-exceptions -static -fomit-frame-pointer): __Z9GetHotKeyv: ## @_Z9GetHotKeyv movq _m_HotKey@GOTPCREL(%rip), %rax movzwl (%rax), %ecx movzbl 2(%rax), %edx shlq $16, %rdx orq %rcx, %rdx movzbl 3(%rax), %ecx shlq $24, %rcx orq %rdx, %rcx movzbl 4(%rax), %eax shlq $32, %rax orq %rcx, %rax ret //===---------------------------------------------------------------------===// We should add an FRINT node to the DAG to model targets that have legal implementations of ceil/floor/rint. //===---------------------------------------------------------------------===// Consider: int test() { long long input[8] = {1,0,1,0,1,0,1,0}; foo(input); } Clang compiles this into: call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 16, i1 false) %0 = getelementptr [8 x i64]* %input, i64 0, i64 0 store i64 1, i64* %0, align 16 %1 = getelementptr [8 x i64]* %input, i64 0, i64 2 store i64 1, i64* %1, align 16 %2 = getelementptr [8 x i64]* %input, i64 0, i64 4 store i64 1, i64* %2, align 16 %3 = getelementptr [8 x i64]* %input, i64 0, i64 6 store i64 1, i64* %3, align 16 Which gets codegen'd into: pxor %xmm0, %xmm0 movaps %xmm0, -16(%rbp) movaps %xmm0, -32(%rbp) movaps %xmm0, -48(%rbp) movaps %xmm0, -64(%rbp) movq $1, -64(%rbp) movq $1, -48(%rbp) movq $1, -32(%rbp) movq $1, -16(%rbp) It would be better to have 4 movq's of 0 instead of the movaps's. //===---------------------------------------------------------------------===// http://llvm.org/PR717: The following code should compile into "ret int undef". Instead, LLVM produces "ret int 0": int f() { int x = 4; int y; if (x == 3) y = 0; return y; } //===---------------------------------------------------------------------===// The loop unroller should partially unroll loops (instead of peeling them) when code growth isn't too bad and when an unroll count allows simplification of some code within the loop. One trivial example is: #include <stdio.h> int main() { int nRet = 17; int nLoop; for ( nLoop = 0; nLoop < 1000; nLoop++ ) { if ( nLoop & 1 ) nRet += 2; else nRet -= 1; } return nRet; } Unrolling by 2 would eliminate the '&1' in both copies, leading to a net reduction in code size. The resultant code would then also be suitable for exit value computation. //===---------------------------------------------------------------------===// We miss a bunch of rotate opportunities on various targets, including ppc, x86, etc. On X86, we miss a bunch of 'rotate by variable' cases because the rotate matching code in dag combine doesn't look through truncates aggressively enough. Here are some testcases reduces from GCC PR17886: unsigned long long f5(unsigned long long x, unsigned long long y) { return (x << 8) | ((y >> 48) & 0xffull); } unsigned long long f6(unsigned long long x, unsigned long long y, int z) { switch(z) { case 1: return (x << 8) | ((y >> 48) & 0xffull); case 2: return (x << 16) | ((y >> 40) & 0xffffull); case 3: return (x << 24) | ((y >> 32) & 0xffffffull); case 4: return (x << 32) | ((y >> 24) & 0xffffffffull); default: return (x << 40) | ((y >> 16) & 0xffffffffffull); } } //===---------------------------------------------------------------------===// This (and similar related idioms): unsigned int foo(unsigned char i) { return i | (i<<8) | (i<<16) | (i<<24); } compiles into: define i32 @foo(i8 zeroext %i) nounwind readnone ssp noredzone { entry: %conv = zext i8 %i to i32 %shl = shl i32 %conv, 8 %shl5 = shl i32 %conv, 16 %shl9 = shl i32 %conv, 24 %or = or i32 %shl9, %conv %or6 = or i32 %or, %shl5 %or10 = or i32 %or6, %shl ret i32 %or10 } it would be better as: unsigned int bar(unsigned char i) { unsigned int j=i | (i << 8); return j | (j<<16); } aka: define i32 @bar(i8 zeroext %i) nounwind readnone ssp noredzone { entry: %conv = zext i8 %i to i32 %shl = shl i32 %conv, 8 %or = or i32 %shl, %conv %shl5 = shl i32 %or, 16 %or6 = or i32 %shl5, %or ret i32 %or6 } or even i*0x01010101, depending on the speed of the multiplier. The best way to handle this is to canonicalize it to a multiply in IR and have codegen handle lowering multiplies to shifts on cpus where shifts are faster. //===---------------------------------------------------------------------===// We do a number of simplifications in simplify libcalls to strength reduce standard library functions, but we don't currently merge them together. For example, it is useful to merge memcpy(a,b,strlen(b)) -> strcpy. This can only be done safely if "b" isn't modified between the strlen and memcpy of course. //===---------------------------------------------------------------------===// We compile this program: (from GCC PR11680) http://gcc.gnu.org/bugzilla/attachment.cgi?id=4487 Into code that runs the same speed in fast/slow modes, but both modes run 2x slower than when compile with GCC (either 4.0 or 4.2): $ llvm-g++ perf.cpp -O3 -fno-exceptions $ time ./a.out fast 1.821u 0.003s 0:01.82 100.0% 0+0k 0+0io 0pf+0w $ g++ perf.cpp -O3 -fno-exceptions $ time ./a.out fast 0.821u 0.001s 0:00.82 100.0% 0+0k 0+0io 0pf+0w It looks like we are making the same inlining decisions, so this may be raw codegen badness or something else (haven't investigated). //===---------------------------------------------------------------------===// Divisibility by constant can be simplified (according to GCC PR12849) from being a mulhi to being a mul lo (cheaper). Testcase: void bar(unsigned n) { if (n % 3 == 0) true(); } This is equivalent to the following, where 2863311531 is the multiplicative inverse of 3, and 1431655766 is ((2^32)-1)/3+1: void bar(unsigned n) { if (n * 2863311531U < 1431655766U) true(); } The same transformation can work with an even modulo with the addition of a rotate: rotate the result of the multiply to the right by the number of bits which need to be zero for the condition to be true, and shrink the compare RHS by the same amount. Unless the target supports rotates, though, that transformation probably isn't worthwhile. The transformation can also easily be made to work with non-zero equality comparisons: just transform, for example, "n % 3 == 1" to "(n-1) % 3 == 0". //===---------------------------------------------------------------------===// Better mod/ref analysis for scanf would allow us to eliminate the vtable and a bunch of other stuff from this example (see PR1604): #include <cstdio> struct test { int val; virtual ~test() {} }; int main() { test t; std::scanf("%d", &t.val); std::printf("%d\n", t.val); } //===---------------------------------------------------------------------===// These functions perform the same computation, but produce different assembly. define i8 @select(i8 %x) readnone nounwind { %A = icmp ult i8 %x, 250 %B = select i1 %A, i8 0, i8 1 ret i8 %B } define i8 @addshr(i8 %x) readnone nounwind { %A = zext i8 %x to i9 %B = add i9 %A, 6 ;; 256 - 250 == 6 %C = lshr i9 %B, 8 %D = trunc i9 %C to i8 ret i8 %D } //===---------------------------------------------------------------------===// From gcc bug 24696: int f (unsigned long a, unsigned long b, unsigned long c) { return ((a & (c - 1)) != 0) || ((b & (c - 1)) != 0); } int f (unsigned long a, unsigned long b, unsigned long c) { return ((a & (c - 1)) != 0) | ((b & (c - 1)) != 0); } Both should combine to ((a|b) & (c-1)) != 0. Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// From GCC Bug 20192: #define PMD_MASK (~((1UL << 23) - 1)) void clear_pmd_range(unsigned long start, unsigned long end) { if (!(start & ~PMD_MASK) && !(end & ~PMD_MASK)) f(); } The expression should optimize to something like "!((start|end)&~PMD_MASK). Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// unsigned int f(unsigned int i, unsigned int n) {++i; if (i == n) ++i; return i;} unsigned int f2(unsigned int i, unsigned int n) {++i; i += i == n; return i;} These should combine to the same thing. Currently, the first function produces better code on X86. //===---------------------------------------------------------------------===// From GCC Bug 15784: #define abs(x) x>0?x:-x int f(int x, int y) { return (abs(x)) >= 0; } This should optimize to x == INT_MIN. (With -fwrapv.) Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// From GCC Bug 14753: void rotate_cst (unsigned int a) { a = (a << 10) | (a >> 22); if (a == 123) bar (); } void minus_cst (unsigned int a) { unsigned int tem; tem = 20 - a; if (tem == 5) bar (); } void mask_gt (unsigned int a) { /* This is equivalent to a > 15. */ if ((a & ~7) > 8) bar (); } void rshift_gt (unsigned int a) { /* This is equivalent to a > 23. */ if ((a >> 2) > 5) bar (); } All should simplify to a single comparison. All of these are currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// From GCC Bug 32605: int c(int* x) {return (char*)x+2 == (char*)x;} Should combine to 0. Currently not optimized with "clang -emit-llvm-bc | opt -O3" (although llc can optimize it). //===---------------------------------------------------------------------===// int a(unsigned b) {return ((b << 31) | (b << 30)) >> 31;} Should be combined to "((b >> 1) | b) & 1". Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// unsigned a(unsigned x, unsigned y) { return x | (y & 1) | (y & 2);} Should combine to "x | (y & 3)". Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int a, int b, int c) {return (~a & c) | ((c|a) & b);} Should fold to "(~a & c) | (a & b)". Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int a,int b) {return (~(a|b))|a;} Should fold to "a|~b". Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int a, int b) {return (a&&b) || (a&&!b);} Should fold to "a". Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int a, int b, int c) {return (a&&b) || (!a&&c);} Should fold to "a ? b : c", or at least something sane. Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int a, int b, int c) {return (a&&b) || (a&&c) || (a&&b&&c);} Should fold to a && (b || c). Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int x) {return x | ((x & 8) ^ 8);} Should combine to x | 8. Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int x) {return x ^ ((x & 8) ^ 8);} Should also combine to x | 8. Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int a(int x) {return ((x | -9) ^ 8) & x;} Should combine to x & -9. Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// unsigned a(unsigned a) {return a * 0x11111111 >> 28 & 1;} Should combine to "a * 0x88888888 >> 31". Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// unsigned a(char* x) {if ((*x & 32) == 0) return b();} There's an unnecessary zext in the generated code with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// unsigned a(unsigned long long x) {return 40 * (x >> 1);} Should combine to "20 * (((unsigned)x) & -2)". Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int g(int x) { return (x - 10) < 0; } Should combine to "x <= 9" (the sub has nsw). Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int g(int x) { return (x + 10) < 0; } Should combine to "x < -10" (the add has nsw). Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// int f(int i, int j) { return i < j + 1; } int g(int i, int j) { return j > i - 1; } Should combine to "i <= j" (the add/sub has nsw). Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// unsigned f(unsigned x) { return ((x & 7) + 1) & 15; } The & 15 part should be optimized away, it doesn't change the result. Currently not optimized with "clang -emit-llvm-bc | opt -O3". //===---------------------------------------------------------------------===// This was noticed in the entryblock for grokdeclarator in 403.gcc: %tmp = icmp eq i32 %decl_context, 4 %decl_context_addr.0 = select i1 %tmp, i32 3, i32 %decl_context %tmp1 = icmp eq i32 %decl_context_addr.0, 1 %decl_context_addr.1 = select i1 %tmp1, i32 0, i32 %decl_context_addr.0 tmp1 should be simplified to something like: (!tmp || decl_context == 1) This allows recursive simplifications, tmp1 is used all over the place in the function, e.g. by: %tmp23 = icmp eq i32 %decl_context_addr.1, 0 ; <i1> [#uses=1] %tmp24 = xor i1 %tmp1, true ; <i1> [#uses=1] %or.cond8 = and i1 %tmp23, %tmp24 ; <i1> [#uses=1] later. //===---------------------------------------------------------------------===// [STORE SINKING] Store sinking: This code: void f (int n, int *cond, int *res) { int i; *res = 0; for (i = 0; i < n; i++) if (*cond) *res ^= 234; /* (*) */ } On this function GVN hoists the fully redundant value of *res, but nothing moves the store out. This gives us this code: bb: ; preds = %bb2, %entry %.rle = phi i32 [ 0, %entry ], [ %.rle6, %bb2 ] %i.05 = phi i32 [ 0, %entry ], [ %indvar.next, %bb2 ] %1 = load i32* %cond, align 4 %2 = icmp eq i32 %1, 0 br i1 %2, label %bb2, label %bb1 bb1: ; preds = %bb %3 = xor i32 %.rle, 234 store i32 %3, i32* %res, align 4 br label %bb2 bb2: ; preds = %bb, %bb1 %.rle6 = phi i32 [ %3, %bb1 ], [ %.rle, %bb ] %indvar.next = add i32 %i.05, 1 %exitcond = icmp eq i32 %indvar.next, %n br i1 %exitcond, label %return, label %bb DSE should sink partially dead stores to get the store out of the loop. Here's another partial dead case: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12395 //===---------------------------------------------------------------------===// Scalar PRE hoists the mul in the common block up to the else: int test (int a, int b, int c, int g) { int d, e; if (a) d = b * c; else d = b - c; e = b * c + g; return d + e; } It would be better to do the mul once to reduce codesize above the if. This is GCC PR38204. //===---------------------------------------------------------------------===// This simple function from 179.art: int winner, numf2s; struct { double y; int reset; } *Y; void find_match() { int i; winner = 0; for (i=0;i<numf2s;i++) if (Y[i].y > Y[winner].y) winner =i; } Compiles into (with clang TBAA): for.body: ; preds = %for.inc, %bb.nph %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.inc ] %i.01718 = phi i32 [ 0, %bb.nph ], [ %i.01719, %for.inc ] %tmp4 = getelementptr inbounds %struct.anon* %tmp3, i64 %indvar, i32 0 %tmp5 = load double* %tmp4, align 8, !tbaa !4 %idxprom7 = sext i32 %i.01718 to i64 %tmp10 = getelementptr inbounds %struct.anon* %tmp3, i64 %idxprom7, i32 0 %tmp11 = load double* %tmp10, align 8, !tbaa !4 %cmp12 = fcmp ogt double %tmp5, %tmp11 br i1 %cmp12, label %if.then, label %for.inc if.then: ; preds = %for.body %i.017 = trunc i64 %indvar to i32 br label %for.inc for.inc: ; preds = %for.body, %if.then %i.01719 = phi i32 [ %i.01718, %for.body ], [ %i.017, %if.then ] %indvar.next = add i64 %indvar, 1 %exitcond = icmp eq i64 %indvar.next, %tmp22 br i1 %exitcond, label %for.cond.for.end_crit_edge, label %for.body It is good that we hoisted the reloads of numf2's, and Y out of the loop and sunk the store to winner out. However, this is awful on several levels: the conditional truncate in the loop (-indvars at fault? why can't we completely promote the IV to i64?). Beyond that, we have a partially redundant load in the loop: if "winner" (aka %i.01718) isn't updated, we reload Y[winner].y the next time through the loop. Similarly, the addressing that feeds it (including the sext) is redundant. In the end we get this generated assembly: LBB0_2: ## %for.body ## =>This Inner Loop Header: Depth=1 movsd (%rdi), %xmm0 movslq %edx, %r8 shlq $4, %r8 ucomisd (%rcx,%r8), %xmm0 jbe LBB0_4 movl %esi, %edx LBB0_4: ## %for.inc addq $16, %rdi incq %rsi cmpq %rsi, %rax jne LBB0_2 All things considered this isn't too bad, but we shouldn't need the movslq or the shlq instruction, or the load folded into ucomisd every time through the loop. On an x86-specific topic, if the loop can't be restructure, the movl should be a cmov. //===---------------------------------------------------------------------===// [STORE SINKING] GCC PR37810 is an interesting case where we should sink load/store reload into the if block and outside the loop, so we don't reload/store it on the non-call path. for () { *P += 1; if () call(); else ... -> tmp = *P for () { tmp += 1; if () { *P = tmp; call(); tmp = *P; } else ... } *P = tmp; We now hoist the reload after the call (Transforms/GVN/lpre-call-wrap.ll), but we don't sink the store. We need partially dead store sinking. //===---------------------------------------------------------------------===// [LOAD PRE CRIT EDGE SPLITTING] GCC PR37166: Sinking of loads prevents SROA'ing the "g" struct on the stack leading to excess stack traffic. This could be handled by GVN with some crazy symbolic phi translation. The code we get looks like (g is on the stack): bb2: ; preds = %bb1 .. %9 = getelementptr %struct.f* %g, i32 0, i32 0 store i32 %8, i32* %9, align bel %bb3 bb3: ; preds = %bb1, %bb2, %bb %c_addr.0 = phi %struct.f* [ %g, %bb2 ], [ %c, %bb ], [ %c, %bb1 ] %b_addr.0 = phi %struct.f* [ %b, %bb2 ], [ %g, %bb ], [ %b, %bb1 ] %10 = getelementptr %struct.f* %c_addr.0, i32 0, i32 0 %11 = load i32* %10, align 4 %11 is partially redundant, an in BB2 it should have the value %8. GCC PR33344 and PR35287 are similar cases. //===---------------------------------------------------------------------===// [LOAD PRE] There are many load PRE testcases in testsuite/gcc.dg/tree-ssa/loadpre* in the GCC testsuite, ones we don't get yet are (checked through loadpre25): [CRIT EDGE BREAKING] predcom-4.c [PRE OF READONLY CALL] loadpre5.c [TURN SELECT INTO BRANCH] loadpre14.c loadpre15.c actually a conditional increment: loadpre18.c loadpre19.c //===---------------------------------------------------------------------===// [LOAD PRE / STORE SINKING / SPEC HACK] This is a chunk of code from 456.hmmer: int f(int M, int *mc, int *mpp, int *tpmm, int *ip, int *tpim, int *dpp, int *tpdm, int xmb, int *bp, int *ms) { int k, sc; for (k = 1; k <= M; k++) { mc[k] = mpp[k-1] + tpmm[k-1]; if ((sc = ip[k-1] + tpim[k-1]) > mc[k]) mc[k] = sc; if ((sc = dpp[k-1] + tpdm[k-1]) > mc[k]) mc[k] = sc; if ((sc = xmb + bp[k]) > mc[k]) mc[k] = sc; mc[k] += ms[k]; } } It is very profitable for this benchmark to turn the conditional stores to mc[k] into a conditional move (select instr in IR) and allow the final store to do the store. See GCC PR27313 for more details. Note that this is valid to xform even with the new C++ memory model, since mc[k] is previously loaded and later stored. //===---------------------------------------------------------------------===// [SCALAR PRE] There are many PRE testcases in testsuite/gcc.dg/tree-ssa/ssa-pre-*.c in the GCC testsuite. //===---------------------------------------------------------------------===// There are some interesting cases in testsuite/gcc.dg/tree-ssa/pred-comm* in the GCC testsuite. For example, we get the first example in predcom-1.c, but miss the second one: unsigned fib[1000]; unsigned avg[1000]; __attribute__ ((noinline)) void count_averages(int n) { int i; for (i = 1; i < n; i++) avg[i] = (((unsigned long) fib[i - 1] + fib[i] + fib[i + 1]) / 3) & 0xffff; } which compiles into two loads instead of one in the loop. predcom-2.c is the same as predcom-1.c predcom-3.c is very similar but needs loads feeding each other instead of store->load. //===---------------------------------------------------------------------===// [ALIAS ANALYSIS] Type based alias analysis: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14705 We should do better analysis of posix_memalign. At the least it should no-capture its pointer argument, at best, we should know that the out-value result doesn't point to anything (like malloc). One example of this is in SingleSource/Benchmarks/Misc/dt.c //===---------------------------------------------------------------------===// Interesting missed case because of control flow flattening (should be 2 loads): http://gcc.gnu.org/bugzilla/show_bug.cgi?id=26629 With: llvm-gcc t2.c -S -o - -O0 -emit-llvm | llvm-as | opt -mem2reg -gvn -instcombine | llvm-dis we miss it because we need 1) CRIT EDGE 2) MULTIPLE DIFFERENT VALS PRODUCED BY ONE BLOCK OVER DIFFERENT PATHS //===---------------------------------------------------------------------===// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19633 We could eliminate the branch condition here, loading from null is undefined: struct S { int w, x, y, z; }; struct T { int r; struct S s; }; void bar (struct S, int); void foo (int a, struct T b) { struct S *c = 0; if (a) c = &b.s; bar (*c, a); } //===---------------------------------------------------------------------===// simplifylibcalls should do several optimizations for strspn/strcspn: strcspn(x, "a") -> inlined loop for up to 3 letters (similarly for strspn): size_t __strcspn_c3 (__const char *__s, int __reject1, int __reject2, int __reject3) { register size_t __result = 0; while (__s[__result] != '\0' && __s[__result] != __reject1 && __s[__result] != __reject2 && __s[__result] != __reject3) ++__result; return __result; } This should turn into a switch on the character. See PR3253 for some notes on codegen. 456.hmmer apparently uses strcspn and strspn a lot. 471.omnetpp uses strspn. //===---------------------------------------------------------------------===// simplifylibcalls should turn these snprintf idioms into memcpy (GCC PR47917) char buf1[6], buf2[6], buf3[4], buf4[4]; int i; int foo (void) { int ret = snprintf (buf1, sizeof buf1, "abcde"); ret += snprintf (buf2, sizeof buf2, "abcdef") * 16; ret += snprintf (buf3, sizeof buf3, "%s", i++ < 6 ? "abc" : "def") * 256; ret += snprintf (buf4, sizeof buf4, "%s", i++ > 10 ? "abcde" : "defgh")*4096; return ret; } //===---------------------------------------------------------------------===// "gas" uses this idiom: else if (strchr ("+-/*%|&^:[]()~", *intel_parser.op_string)) .. else if (strchr ("<>", *intel_parser.op_string) Those should be turned into a switch. SimplifyLibCalls only gets the second case. //===---------------------------------------------------------------------===// 252.eon contains this interesting code: %3072 = getelementptr [100 x i8]* %tempString, i32 0, i32 0 %3073 = call i8* @strcpy(i8* %3072, i8* %3071) nounwind %strlen = call i32 @strlen(i8* %3072) ; uses = 1 %endptr = getelementptr [100 x i8]* %tempString, i32 0, i32 %strlen call void @llvm.memcpy.i32(i8* %endptr, i8* getelementptr ([5 x i8]* @"\01LC42", i32 0, i32 0), i32 5, i32 1) %3074 = call i32 @strlen(i8* %endptr) nounwind readonly This is interesting for a couple reasons. First, in this: The memcpy+strlen strlen can be replaced with: %3074 = call i32 @strlen([5 x i8]* @"\01LC42") nounwind readonly Because the destination was just copied into the specified memory buffer. This, in turn, can be constant folded to "4". In other code, it contains: %endptr6978 = bitcast i8* %endptr69 to i32* store i32 7107374, i32* %endptr6978, align 1 %3167 = call i32 @strlen(i8* %endptr69) nounwind readonly Which could also be constant folded. Whatever is producing this should probably be fixed to leave this as a memcpy from a string. Further, eon also has an interesting partially redundant strlen call: bb8: ; preds = %_ZN18eonImageCalculatorC1Ev.exit %682 = getelementptr i8** %argv, i32 6 ; <i8**> [#uses=2] %683 = load i8** %682, align 4 ; <i8*> [#uses=4] %684 = load i8* %683, align 1 ; <i8> [#uses=1] %685 = icmp eq i8 %684, 0 ; <i1> [#uses=1] br i1 %685, label %bb10, label %bb9 bb9: ; preds = %bb8 %686 = call i32 @strlen(i8* %683) nounwind readonly %687 = icmp ugt i32 %686, 254 ; <i1> [#uses=1] br i1 %687, label %bb10, label %bb11 bb10: ; preds = %bb9, %bb8 %688 = call i32 @strlen(i8* %683) nounwind readonly This could be eliminated by doing the strlen once in bb8, saving code size and improving perf on the bb8->9->10 path. //===---------------------------------------------------------------------===// I see an interesting fully redundant call to strlen left in 186.crafty:InputMove which looks like: %movetext11 = getelementptr [128 x i8]* %movetext, i32 0, i32 0 bb62: ; preds = %bb55, %bb53 %promote.0 = phi i32 [ %169, %bb55 ], [ 0, %bb53 ] %171 = call i32 @strlen(i8* %movetext11) nounwind readonly align 1 %172 = add i32 %171, -1 ; <i32> [#uses=1] %173 = getelementptr [128 x i8]* %movetext, i32 0, i32 %172 ... no stores ... br i1 %or.cond, label %bb65, label %bb72 bb65: ; preds = %bb62 store i8 0, i8* %173, align 1 br label %bb72 bb72: ; preds = %bb65, %bb62 %trank.1 = phi i32 [ %176, %bb65 ], [ -1, %bb62 ] %177 = call i32 @strlen(i8* %movetext11) nounwind readonly align 1 Note that on the bb62->bb72 path, that the %177 strlen call is partially redundant with the %171 call. At worst, we could shove the %177 strlen call up into the bb65 block moving it out of the bb62->bb72 path. However, note that bb65 stores to the string, zeroing out the last byte. This means that on that path the value of %177 is actually just %171-1. A sub is cheaper than a strlen! This pattern repeats several times, basically doing: A = strlen(P); P[A-1] = 0; B = strlen(P); where it is "obvious" that B = A-1. //===---------------------------------------------------------------------===// 186.crafty has this interesting pattern with the "out.4543" variable: call void @llvm.memcpy.i32( i8* getelementptr ([10 x i8]* @out.4543, i32 0, i32 0), i8* getelementptr ([7 x i8]* @"\01LC28700", i32 0, i32 0), i32 7, i32 1) %101 = call@printf(i8* ... @out.4543, i32 0, i32 0)) nounwind It is basically doing: memcpy(globalarray, "string"); printf(..., globalarray); Anyway, by knowing that printf just reads the memory and forward substituting the string directly into the printf, this eliminates reads from globalarray. Since this pattern occurs frequently in crafty (due to the "DisplayTime" and other similar functions) there are many stores to "out". Once all the printfs stop using "out", all that is left is the memcpy's into it. This should allow globalopt to remove the "stored only" global. //===---------------------------------------------------------------------===// This code: define inreg i32 @foo(i8* inreg %p) nounwind { %tmp0 = load i8* %p %tmp1 = ashr i8 %tmp0, 5 %tmp2 = sext i8 %tmp1 to i32 ret i32 %tmp2 } could be dagcombine'd to a sign-extending load with a shift. For example, on x86 this currently gets this: movb (%eax), %al sarb $5, %al movsbl %al, %eax while it could get this: movsbl (%eax), %eax sarl $5, %eax //===---------------------------------------------------------------------===// GCC PR31029: int test(int x) { return 1-x == x; } // --> return false int test2(int x) { return 2-x == x; } // --> return x == 1 ? Always foldable for odd constants, what is the rule for even? //===---------------------------------------------------------------------===// PR 3381: GEP to field of size 0 inside a struct could be turned into GEP for next field in struct (which is at same address). For example: store of float into { {{}}, float } could be turned into a store to the float directly. //===---------------------------------------------------------------------===// The arg promotion pass should make use of nocapture to make its alias analysis stuff much more precise. //===---------------------------------------------------------------------===// The following functions should be optimized to use a select instead of a branch (from gcc PR40072): char char_int(int m) {if(m>7) return 0; return m;} int int_char(char m) {if(m>7) return 0; return m;} //===---------------------------------------------------------------------===// int func(int a, int b) { if (a & 0x80) b |= 0x80; else b &= ~0x80; return b; } Generates this: define i32 @func(i32 %a, i32 %b) nounwind readnone ssp { entry: %0 = and i32 %a, 128 ; <i32> [#uses=1] %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1] %2 = or i32 %b, 128 ; <i32> [#uses=1] %3 = and i32 %b, -129 ; <i32> [#uses=1] %b_addr.0 = select i1 %1, i32 %3, i32 %2 ; <i32> [#uses=1] ret i32 %b_addr.0 } However, it's functionally equivalent to: b = (b & ~0x80) | (a & 0x80); Which generates this: define i32 @func(i32 %a, i32 %b) nounwind readnone ssp { entry: %0 = and i32 %b, -129 ; <i32> [#uses=1] %1 = and i32 %a, 128 ; <i32> [#uses=1] %2 = or i32 %0, %1 ; <i32> [#uses=1] ret i32 %2 } This can be generalized for other forms: b = (b & ~0x80) | (a & 0x40) << 1; //===---------------------------------------------------------------------===// These two functions produce different code. They shouldn't: #include <stdint.h> uint8_t p1(uint8_t b, uint8_t a) { b = (b & ~0xc0) | (a & 0xc0); return (b); } uint8_t p2(uint8_t b, uint8_t a) { b = (b & ~0x40) | (a & 0x40); b = (b & ~0x80) | (a & 0x80); return (b); } define zeroext i8 @p1(i8 zeroext %b, i8 zeroext %a) nounwind readnone ssp { entry: %0 = and i8 %b, 63 ; <i8> [#uses=1] %1 = and i8 %a, -64 ; <i8> [#uses=1] %2 = or i8 %1, %0 ; <i8> [#uses=1] ret i8 %2 } define zeroext i8 @p2(i8 zeroext %b, i8 zeroext %a) nounwind readnone ssp { entry: %0 = and i8 %b, 63 ; <i8> [#uses=1] %.masked = and i8 %a, 64 ; <i8> [#uses=1] %1 = and i8 %a, -128 ; <i8> [#uses=1] %2 = or i8 %1, %0 ; <i8> [#uses=1] %3 = or i8 %2, %.masked ; <i8> [#uses=1] ret i8 %3 } //===---------------------------------------------------------------------===// IPSCCP does not currently propagate argument dependent constants through functions where it does not not all of the callers. This includes functions with normal external linkage as well as templates, C99 inline functions etc. Specifically, it does nothing to: define i32 @test(i32 %x, i32 %y, i32 %z) nounwind { entry: %0 = add nsw i32 %y, %z %1 = mul i32 %0, %x %2 = mul i32 %y, %z %3 = add nsw i32 %1, %2 ret i32 %3 } define i32 @test2() nounwind { entry: %0 = call i32 @test(i32 1, i32 2, i32 4) nounwind ret i32 %0 } It would be interesting extend IPSCCP to be able to handle simple cases like this, where all of the arguments to a call are constant. Because IPSCCP runs before inlining, trivial templates and inline functions are not yet inlined. The results for a function + set of constant arguments should be memoized in a map. //===---------------------------------------------------------------------===// The libcall constant folding stuff should be moved out of SimplifyLibcalls into libanalysis' constantfolding logic. This would allow IPSCCP to be able to handle simple things like this: static int foo(const char *X) { return strlen(X); } int bar() { return foo("abcd"); } //===---------------------------------------------------------------------===// functionattrs doesn't know much about memcpy/memset. This function should be marked readnone rather than readonly, since it only twiddles local memory, but functionattrs doesn't handle memset/memcpy/memmove aggressively: struct X { int *p; int *q; }; int foo() { int i = 0, j = 1; struct X x, y; int **p; y.p = &i; x.q = &j; p = __builtin_memcpy (&x, &y, sizeof (int *)); return **p; } This can be seen at: $ clang t.c -S -o - -mkernel -O0 -emit-llvm | opt -functionattrs -S //===---------------------------------------------------------------------===// Missed instcombine transformation: define i1 @a(i32 %x) nounwind readnone { entry: %cmp = icmp eq i32 %x, 30 %sub = add i32 %x, -30 %cmp2 = icmp ugt i32 %sub, 9 %or = or i1 %cmp, %cmp2 ret i1 %or } This should be optimized to a single compare. Testcase derived from gcc. //===---------------------------------------------------------------------===// Missed instcombine or reassociate transformation: int a(int a, int b) { return (a==12)&(b>47)&(b<58); } The sgt and slt should be combined into a single comparison. Testcase derived from gcc. //===---------------------------------------------------------------------===// Missed instcombine transformation: %382 = srem i32 %tmp14.i, 64 ; [#uses=1] %383 = zext i32 %382 to i64 ; [#uses=1] %384 = shl i64 %381, %383 ; [#uses=1] %385 = icmp slt i32 %tmp14.i, 64 ; [#uses=1] The srem can be transformed to an and because if %tmp14.i is negative, the shift is undefined. Testcase derived from 403.gcc. //===---------------------------------------------------------------------===// This is a range comparison on a divided result (from 403.gcc): %1337 = sdiv i32 %1336, 8 ; [#uses=1] %.off.i208 = add i32 %1336, 7 ; [#uses=1] %1338 = icmp ult i32 %.off.i208, 15 ; [#uses=1] We already catch this (removing the sdiv) if there isn't an add, we should handle the 'add' as well. This is a common idiom with it's builtin_alloca code. C testcase: int a(int x) { return (unsigned)(x/16+7) < 15; } Another similar case involves truncations on 64-bit targets: %361 = sdiv i64 %.046, 8 ; [#uses=1] %362 = trunc i64 %361 to i32 ; [#uses=2] ... %367 = icmp eq i32 %362, 0 ; [#uses=1] //===---------------------------------------------------------------------===// Missed instcombine/dagcombine transformation: define void @lshift_lt(i8 zeroext %a) nounwind { entry: %conv = zext i8 %a to i32 %shl = shl i32 %conv, 3 %cmp = icmp ult i32 %shl, 33 br i1 %cmp, label %if.then, label %if.end if.then: tail call void @bar() nounwind ret void if.end: ret void } declare void @bar() nounwind The shift should be eliminated. Testcase derived from gcc. //===---------------------------------------------------------------------===// These compile into different code, one gets recognized as a switch and the other doesn't due to phase ordering issues (PR6212): int test1(int mainType, int subType) { if (mainType == 7) subType = 4; else if (mainType == 9) subType = 6; else if (mainType == 11) subType = 9; return subType; } int test2(int mainType, int subType) { if (mainType == 7) subType = 4; if (mainType == 9) subType = 6; if (mainType == 11) subType = 9; return subType; } //===---------------------------------------------------------------------===// The following test case (from PR6576): define i32 @mul(i32 %a, i32 %b) nounwind readnone { entry: %cond1 = icmp eq i32 %b, 0 ; <i1> [#uses=1] br i1 %cond1, label %exit, label %bb.nph bb.nph: ; preds = %entry %tmp = mul i32 %b, %a ; <i32> [#uses=1] ret i32 %tmp exit: ; preds = %entry ret i32 0 } could be reduced to: define i32 @mul(i32 %a, i32 %b) nounwind readnone { entry: %tmp = mul i32 %b, %a ret i32 %tmp } //===---------------------------------------------------------------------===// We should use DSE + llvm.lifetime.end to delete dead vtable pointer updates. See GCC PR34949 Another interesting case is that something related could be used for variables that go const after their ctor has finished. In these cases, globalopt (which can statically run the constructor) could mark the global const (so it gets put in the readonly section). A testcase would be: #include <complex> using namespace std; const complex<char> should_be_in_rodata (42,-42); complex<char> should_be_in_data (42,-42); complex<char> should_be_in_bss; Where we currently evaluate the ctors but the globals don't become const because the optimizer doesn't know they "become const" after the ctor is done. See GCC PR4131 for more examples. //===---------------------------------------------------------------------===// In this code: long foo(long x) { return x > 1 ? x : 1; } LLVM emits a comparison with 1 instead of 0. 0 would be equivalent and cheaper on most targets. LLVM prefers comparisons with zero over non-zero in general, but in this case it choses instead to keep the max operation obvious. //===---------------------------------------------------------------------===// define void @a(i32 %x) nounwind { entry: switch i32 %x, label %if.end [ i32 0, label %if.then i32 1, label %if.then i32 2, label %if.then i32 3, label %if.then i32 5, label %if.then ] if.then: tail call void @foo() nounwind ret void if.end: ret void } declare void @foo() Generated code on x86-64 (other platforms give similar results): a: cmpl $5, %edi ja LBB2_2 cmpl $4, %edi jne LBB2_3 .LBB0_2: ret .LBB0_3: jmp foo # TAILCALL If we wanted to be really clever, we could simplify the whole thing to something like the following, which eliminates a branch: xorl $1, %edi cmpl $4, %edi ja .LBB0_2 ret .LBB0_2: jmp foo # TAILCALL //===---------------------------------------------------------------------===// We compile this: int foo(int a) { return (a & (~15)) / 16; } Into: define i32 @foo(i32 %a) nounwind readnone ssp { entry: %and = and i32 %a, -16 %div = sdiv i32 %and, 16 ret i32 %div } but this code (X & -A)/A is X >> log2(A) when A is a power of 2, so this case should be instcombined into just "a >> 4". We do get this at the codegen level, so something knows about it, but instcombine should catch it earlier: _foo: ## @foo ## BB#0: ## %entry movl %edi, %eax sarl $4, %eax ret //===---------------------------------------------------------------------===// This code (from GCC PR28685): int test(int a, int b) { int lt = a < b; int eq = a == b; if (lt) return 1; return eq; } Is compiled to: define i32 @test(i32 %a, i32 %b) nounwind readnone ssp { entry: %cmp = icmp slt i32 %a, %b br i1 %cmp, label %return, label %if.end if.end: ; preds = %entry %cmp5 = icmp eq i32 %a, %b %conv6 = zext i1 %cmp5 to i32 ret i32 %conv6 return: ; preds = %entry ret i32 1 } it could be: define i32 @test__(i32 %a, i32 %b) nounwind readnone ssp { entry: %0 = icmp sle i32 %a, %b %retval = zext i1 %0 to i32 ret i32 %retval } //===---------------------------------------------------------------------===// This code can be seen in viterbi: %64 = call noalias i8* @malloc(i64 %62) nounwind ... %67 = call i64 @llvm.objectsize.i64(i8* %64, i1 false) nounwind %68 = call i8* @__memset_chk(i8* %64, i32 0, i64 %62, i64 %67) nounwind llvm.objectsize.i64 should be taught about malloc/calloc, allowing it to fold to %62. This is a security win (overflows of malloc will get caught) and also a performance win by exposing more memsets to the optimizer. This occurs several times in viterbi. Note that this would change the semantics of @llvm.objectsize which by its current definition always folds to a constant. We also should make sure that we remove checking in code like char *p = malloc(strlen(s)+1); __strcpy_chk(p, s, __builtin_objectsize(p, 0)); //===---------------------------------------------------------------------===// clang -O3 currently compiles this code int g(unsigned int a) { unsigned int c[100]; c[10] = a; c[11] = a; unsigned int b = c[10] + c[11]; if(b > a*2) a = 4; else a = 8; return a + 7; } into define i32 @g(i32 a) nounwind readnone { %add = shl i32 %a, 1 %mul = shl i32 %a, 1 %cmp = icmp ugt i32 %add, %mul %a.addr.0 = select i1 %cmp, i32 11, i32 15 ret i32 %a.addr.0 } The icmp should fold to false. This CSE opportunity is only available after GVN and InstCombine have run. //===---------------------------------------------------------------------===// memcpyopt should turn this: define i8* @test10(i32 %x) { %alloc = call noalias i8* @malloc(i32 %x) nounwind call void @llvm.memset.p0i8.i32(i8* %alloc, i8 0, i32 %x, i32 1, i1 false) ret i8* %alloc } into a call to calloc. We should make sure that we analyze calloc as aggressively as malloc though. //===---------------------------------------------------------------------===// clang -O3 doesn't optimize this: void f1(int* begin, int* end) { std::fill(begin, end, 0); } into a memset. This is PR8942. //===---------------------------------------------------------------------===// clang -O3 -fno-exceptions currently compiles this code: void f(int N) { std::vector<int> v(N); extern void sink(void*); sink(&v); } into define void @_Z1fi(i32 %N) nounwind { entry: %v2 = alloca [3 x i32*], align 8 %v2.sub = getelementptr inbounds [3 x i32*]* %v2, i64 0, i64 0 %tmpcast = bitcast [3 x i32*]* %v2 to %"class.std::vector"* %conv = sext i32 %N to i64 store i32* null, i32** %v2.sub, align 8, !tbaa !0 %tmp3.i.i.i.i.i = getelementptr inbounds [3 x i32*]* %v2, i64 0, i64 1 store i32* null, i32** %tmp3.i.i.i.i.i, align 8, !tbaa !0 %tmp4.i.i.i.i.i = getelementptr inbounds [3 x i32*]* %v2, i64 0, i64 2 store i32* null, i32** %tmp4.i.i.i.i.i, align 8, !tbaa !0 %cmp.i.i.i.i = icmp eq i32 %N, 0 br i1 %cmp.i.i.i.i, label %_ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.thread.i.i, label %cond.true.i.i.i.i _ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.thread.i.i: ; preds = %entry store i32* null, i32** %v2.sub, align 8, !tbaa !0 store i32* null, i32** %tmp3.i.i.i.i.i, align 8, !tbaa !0 %add.ptr.i5.i.i = getelementptr inbounds i32* null, i64 %conv store i32* %add.ptr.i5.i.i, i32** %tmp4.i.i.i.i.i, align 8, !tbaa !0 br label %_ZNSt6vectorIiSaIiEEC1EmRKiRKS0_.exit cond.true.i.i.i.i: ; preds = %entry %cmp.i.i.i.i.i = icmp slt i32 %N, 0 br i1 %cmp.i.i.i.i.i, label %if.then.i.i.i.i.i, label %_ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.i.i if.then.i.i.i.i.i: ; preds = %cond.true.i.i.i.i call void @_ZSt17__throw_bad_allocv() noreturn nounwind unreachable _ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.i.i: ; preds = %cond.true.i.i.i.i %mul.i.i.i.i.i = shl i64 %conv, 2 %call3.i.i.i.i.i = call noalias i8* @_Znwm(i64 %mul.i.i.i.i.i) nounwind %0 = bitcast i8* %call3.i.i.i.i.i to i32* store i32* %0, i32** %v2.sub, align 8, !tbaa !0 store i32* %0, i32** %tmp3.i.i.i.i.i, align 8, !tbaa !0 %add.ptr.i.i.i = getelementptr inbounds i32* %0, i64 %conv store i32* %add.ptr.i.i.i, i32** %tmp4.i.i.i.i.i, align 8, !tbaa !0 call void @llvm.memset.p0i8.i64(i8* %call3.i.i.i.i.i, i8 0, i64 %mul.i.i.i.i.i, i32 4, i1 false) br label %_ZNSt6vectorIiSaIiEEC1EmRKiRKS0_.exit This is just the handling the construction of the vector. Most surprising here is the fact that all three null stores in %entry are dead (because we do no cross-block DSE). Also surprising is that %conv isn't simplified to 0 in %....exit.thread.i.i. This is a because the client of LazyValueInfo doesn't simplify all instruction operands, just selected ones. //===---------------------------------------------------------------------===// clang -O3 -fno-exceptions currently compiles this code: void f(char* a, int n) { __builtin_memset(a, 0, n); for (int i = 0; i < n; ++i) a[i] = 0; } into: define void @_Z1fPci(i8* nocapture %a, i32 %n) nounwind { entry: %conv = sext i32 %n to i64 tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 %conv, i32 1, i1 false) %cmp8 = icmp sgt i32 %n, 0 br i1 %cmp8, label %for.body.lr.ph, label %for.end for.body.lr.ph: ; preds = %entry %tmp10 = add i32 %n, -1 %tmp11 = zext i32 %tmp10 to i64 %tmp12 = add i64 %tmp11, 1 call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 %tmp12, i32 1, i1 false) ret void for.end: ; preds = %entry ret void } This shouldn't need the ((zext (%n - 1)) + 1) game, and it should ideally fold the two memset's together. The issue with the addition only occurs in 64-bit mode, and appears to be at least partially caused by Scalar Evolution not keeping its cache updated: it returns the "wrong" result immediately after indvars runs, but figures out the expected result if it is run from scratch on IR resulting from running indvars. //===---------------------------------------------------------------------===// clang -O3 -fno-exceptions currently compiles this code: struct S { unsigned short m1, m2; unsigned char m3, m4; }; void f(int N) { std::vector<S> v(N); extern void sink(void*); sink(&v); } into poor code for zero-initializing 'v' when N is >0. The problem is that S is only 6 bytes, but each element is 8 byte-aligned. We generate a loop and 4 stores on each iteration. If the struct were 8 bytes, this gets turned into a memset. In order to handle this we have to: A) Teach clang to generate metadata for memsets of structs that have holes in them. B) Teach clang to use such a memset for zero init of this struct (since it has a hole), instead of doing elementwise zeroing. //===---------------------------------------------------------------------===// clang -O3 currently compiles this code: extern const int magic; double f() { return 0.0 * magic; } into @magic = external constant i32 define double @_Z1fv() nounwind readnone { entry: %tmp = load i32* @magic, align 4, !tbaa !0 %conv = sitofp i32 %tmp to double %mul = fmul double %conv, 0.000000e+00 ret double %mul } We should be able to fold away this fmul to 0.0. More generally, fmul(x,0.0) can be folded to 0.0 if we can prove that the LHS is not -0.0, not a NaN, and not an INF. The CannotBeNegativeZero predicate in value tracking should be extended to support general "fpclassify" operations that can return yes/no/unknown for each of these predicates. In this predicate, we know that uitofp is trivially never NaN or -0.0, and we know that it isn't +/-Inf if the floating point type has enough exponent bits to represent the largest integer value as < inf. //===---------------------------------------------------------------------===// When optimizing a transformation that can change the sign of 0.0 (such as the 0.0*val -> 0.0 transformation above), it might be provable that the sign of the expression doesn't matter. For example, by the above rules, we can't transform fmul(sitofp(x), 0.0) into 0.0, because x might be -1 and the result of the expression is defined to be -0.0. If we look at the uses of the fmul for example, we might be able to prove that all uses don't care about the sign of zero. For example, if we have: fadd(fmul(sitofp(x), 0.0), 2.0) Since we know that x+2.0 doesn't care about the sign of any zeros in X, we can transform the fmul to 0.0, and then the fadd to 2.0. //===---------------------------------------------------------------------===// We should enhance memcpy/memcpy/memset to allow a metadata node on them indicating that some bytes of the transfer are undefined. This is useful for frontends like clang when lowering struct copies, when some elements of the struct are undefined. Consider something like this: struct x { char a; int b[4]; }; void foo(struct x*P); struct x testfunc() { struct x V1, V2; foo(&V1); V2 = V1; return V2; } We currently compile this to: $ clang t.c -S -o - -O0 -emit-llvm | opt -scalarrepl -S %struct.x = type { i8, [4 x i32] } define void @testfunc(%struct.x* sret %agg.result) nounwind ssp { entry: %V1 = alloca %struct.x, align 4 call void @foo(%struct.x* %V1) %tmp1 = bitcast %struct.x* %V1 to i8* %0 = bitcast %struct.x* %V1 to i160* %srcval1 = load i160* %0, align 4 %tmp2 = bitcast %struct.x* %agg.result to i8* %1 = bitcast %struct.x* %agg.result to i160* store i160 %srcval1, i160* %1, align 4 ret void } This happens because SRoA sees that the temp alloca has is being memcpy'd into and out of and it has holes and it has to be conservative. If we knew about the holes, then this could be much much better. Having information about these holes would also improve memcpy (etc) lowering at llc time when it gets inlined, because we can use smaller transfers. This also avoids partial register stalls in some important cases. //===---------------------------------------------------------------------===// We don't fold (icmp (add) (add)) unless the two adds only have a single use. There are a lot of cases that we're refusing to fold in (e.g.) 256.bzip2, for example: %indvar.next90 = add i64 %indvar89, 1 ;; Has 2 uses %tmp96 = add i64 %tmp95, 1 ;; Has 1 use %exitcond97 = icmp eq i64 %indvar.next90, %tmp96 We don't fold this because we don't want to introduce an overlapped live range of the ivar. However if we can make this more aggressive without causing performance issues in two ways: 1. If *either* the LHS or RHS has a single use, we can definitely do the transformation. In the overlapping liverange case we're trading one register use for one fewer operation, which is a reasonable trade. Before doing this we should verify that the llc output actually shrinks for some benchmarks. 2. If both ops have multiple uses, we can still fold it if the operations are both sinkable to *after* the icmp (e.g. in a subsequent block) which doesn't increase register pressure. There are a ton of icmp's we aren't simplifying because of the reg pressure concern. Care is warranted here though because many of these are induction variables and other cases that matter a lot to performance, like the above. Here's a blob of code that you can drop into the bottom of visitICmp to see some missed cases: { Value *A, *B, *C, *D; if (match(Op0, m_Add(m_Value(A), m_Value(B))) && match(Op1, m_Add(m_Value(C), m_Value(D))) && (A == C || A == D || B == C || B == D)) { errs() << "OP0 = " << *Op0 << " U=" << Op0->getNumUses() << "\n"; errs() << "OP1 = " << *Op1 << " U=" << Op1->getNumUses() << "\n"; errs() << "CMP = " << I << "\n\n"; } } //===---------------------------------------------------------------------===// define i1 @test1(i32 %x) nounwind { %and = and i32 %x, 3 %cmp = icmp ult i32 %and, 2 ret i1 %cmp } Can be folded to (x & 2) == 0. define i1 @test2(i32 %x) nounwind { %and = and i32 %x, 3 %cmp = icmp ugt i32 %and, 1 ret i1 %cmp } Can be folded to (x & 2) != 0. SimplifyDemandedBits shrinks the "and" constant to 2 but instcombine misses the icmp transform. //===---------------------------------------------------------------------===// This code: typedef struct { int f1:1; int f2:1; int f3:1; int f4:29; } t1; typedef struct { int f1:1; int f2:1; int f3:30; } t2; t1 s1; t2 s2; void func1(void) { s1.f1 = s2.f1; s1.f2 = s2.f2; } Compiles into this IR (on x86-64 at least): %struct.t1 = type { i8, [3 x i8] } @s2 = global %struct.t1 zeroinitializer, align 4 @s1 = global %struct.t1 zeroinitializer, align 4 define void @func1() nounwind ssp noredzone { entry: %0 = load i32* bitcast (%struct.t1* @s2 to i32*), align 4 %bf.val.sext5 = and i32 %0, 1 %1 = load i32* bitcast (%struct.t1* @s1 to i32*), align 4 %2 = and i32 %1, -4 %3 = or i32 %2, %bf.val.sext5 %bf.val.sext26 = and i32 %0, 2 %4 = or i32 %3, %bf.val.sext26 store i32 %4, i32* bitcast (%struct.t1* @s1 to i32*), align 4 ret void } The two or/and's should be merged into one each. //===---------------------------------------------------------------------===// Machine level code hoisting can be useful in some cases. For example, PR9408 is about: typedef union { void (*f1)(int); void (*f2)(long); } funcs; void foo(funcs f, int which) { int a = 5; if (which) { f.f1(a); } else { f.f2(a); } } which we compile to: foo: # @foo # BB#0: # %entry pushq %rbp movq %rsp, %rbp testl %esi, %esi movq %rdi, %rax je .LBB0_2 # BB#1: # %if.then movl $5, %edi callq *%rax popq %rbp ret .LBB0_2: # %if.else movl $5, %edi callq *%rax popq %rbp ret Note that bb1 and bb2 are the same. This doesn't happen at the IR level because one call is passing an i32 and the other is passing an i64. //===---------------------------------------------------------------------===// I see this sort of pattern in 176.gcc in a few places (e.g. the start of store_bit_field). The rem should be replaced with a multiply and subtract: %3 = sdiv i32 %A, %B %4 = srem i32 %A, %B Similarly for udiv/urem. Note that this shouldn't be done on X86 or ARM, which can do this in a single operation (instruction or libcall). It is probably best to do this in the code generator. //===---------------------------------------------------------------------===// unsigned foo(unsigned x, unsigned y) { return (x & y) == 0 || x == 0; } should fold to (x & y) == 0. //===---------------------------------------------------------------------===// unsigned foo(unsigned x, unsigned y) { return x > y && x != 0; } should fold to x > y. //===---------------------------------------------------------------------===//
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/TargetLoweringObjectFile.cpp
//===-- llvm/Target/TargetLoweringObjectFile.cpp - Object File Info -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements classes used to handle lowerings specific to common // object file formats. // //===----------------------------------------------------------------------===// #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Mangler.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; //===----------------------------------------------------------------------===// // Generic Code //===----------------------------------------------------------------------===// /// Initialize - this method must be called before any actual lowering is /// done. This specifies the current context for codegen, and gives the /// lowering implementations a chance to set up their default sections. void TargetLoweringObjectFile::Initialize(MCContext &ctx, const TargetMachine &TM) { Ctx = &ctx; DL = TM.getDataLayout(); InitMCObjectFileInfo(TM.getTargetTriple(), TM.getRelocationModel(), TM.getCodeModel(), *Ctx); } TargetLoweringObjectFile::~TargetLoweringObjectFile() { } static bool isSuitableForBSS(const GlobalVariable *GV, bool NoZerosInBSS) { const Constant *C = GV->getInitializer(); // Must have zero initializer. if (!C->isNullValue()) return false; // Leave constant zeros in readonly constant sections, so they can be shared. if (GV->isConstant()) return false; // If the global has an explicit section specified, don't put it in BSS. if (GV->hasSection()) return false; // If -nozero-initialized-in-bss is specified, don't ever use BSS. if (NoZerosInBSS) return false; // Otherwise, put it in BSS! return true; } /// IsNullTerminatedString - Return true if the specified constant (which is /// known to have a type that is an array of 1/2/4 byte elements) ends with a /// nul value and contains no other nuls in it. Note that this is more general /// than ConstantDataSequential::isString because we allow 2 & 4 byte strings. static bool IsNullTerminatedString(const Constant *C) { // First check: is we have constant array terminated with zero if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(C)) { unsigned NumElts = CDS->getNumElements(); assert(NumElts != 0 && "Can't have an empty CDS"); if (CDS->getElementAsInteger(NumElts-1) != 0) return false; // Not null terminated. // Verify that the null doesn't occur anywhere else in the string. for (unsigned i = 0; i != NumElts-1; ++i) if (CDS->getElementAsInteger(i) == 0) return false; return true; } // Another possibility: [1 x i8] zeroinitializer if (isa<ConstantAggregateZero>(C)) return cast<ArrayType>(C->getType())->getNumElements() == 1; return false; } MCSymbol *TargetLoweringObjectFile::getSymbolWithGlobalValueBase( const GlobalValue *GV, StringRef Suffix, Mangler &Mang, const TargetMachine &TM) const { assert(!Suffix.empty()); SmallString<60> NameStr; NameStr += DL->getPrivateGlobalPrefix(); TM.getNameWithPrefix(NameStr, GV, Mang); NameStr.append(Suffix.begin(), Suffix.end()); return Ctx->getOrCreateSymbol(NameStr); } MCSymbol *TargetLoweringObjectFile::getCFIPersonalitySymbol( const GlobalValue *GV, Mangler &Mang, const TargetMachine &TM, MachineModuleInfo *MMI) const { return TM.getSymbol(GV, Mang); } void TargetLoweringObjectFile::emitPersonalityValue(MCStreamer &Streamer, const TargetMachine &TM, const MCSymbol *Sym) const { } /// getKindForGlobal - This is a top-level target-independent classifier for /// a global variable. Given an global variable and information from TM, it /// classifies the global in a variety of ways that make various target /// implementations simpler. The target implementation is free to ignore this /// extra info of course. SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV, const TargetMachine &TM){ assert(!GV->isDeclaration() && !GV->hasAvailableExternallyLinkage() && "Can only be used for global definitions"); Reloc::Model ReloModel = TM.getRelocationModel(); // Early exit - functions should be always in text sections. const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); if (!GVar) return SectionKind::getText(); // Handle thread-local data first. if (GVar->isThreadLocal()) { if (isSuitableForBSS(GVar, TM.Options.NoZerosInBSS)) return SectionKind::getThreadBSS(); return SectionKind::getThreadData(); } // Variables with common linkage always get classified as common. if (GVar->hasCommonLinkage()) return SectionKind::getCommon(); // Variable can be easily put to BSS section. if (isSuitableForBSS(GVar, TM.Options.NoZerosInBSS)) { if (GVar->hasLocalLinkage()) return SectionKind::getBSSLocal(); else if (GVar->hasExternalLinkage()) return SectionKind::getBSSExtern(); return SectionKind::getBSS(); } const Constant *C = GVar->getInitializer(); // If the global is marked constant, we can put it into a mergable section, // a mergable string section, or general .data if it contains relocations. if (GVar->isConstant()) { // If the initializer for the global contains something that requires a // relocation, then we may have to drop this into a writable data section // even though it is marked const. switch (C->getRelocationInfo()) { case Constant::NoRelocation: // If the global is required to have a unique address, it can't be put // into a mergable section: just drop it into the general read-only // section instead. if (!GVar->hasUnnamedAddr()) return SectionKind::getReadOnly(); // If initializer is a null-terminated string, put it in a "cstring" // section of the right width. if (ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) { if (IntegerType *ITy = dyn_cast<IntegerType>(ATy->getElementType())) { if ((ITy->getBitWidth() == 8 || ITy->getBitWidth() == 16 || ITy->getBitWidth() == 32) && IsNullTerminatedString(C)) { if (ITy->getBitWidth() == 8) return SectionKind::getMergeable1ByteCString(); if (ITy->getBitWidth() == 16) return SectionKind::getMergeable2ByteCString(); assert(ITy->getBitWidth() == 32 && "Unknown width"); return SectionKind::getMergeable4ByteCString(); } } } // Otherwise, just drop it into a mergable constant section. If we have // a section for this size, use it, otherwise use the arbitrary sized // mergable section. switch (TM.getDataLayout()->getTypeAllocSize(C->getType())) { case 4: return SectionKind::getMergeableConst4(); case 8: return SectionKind::getMergeableConst8(); case 16: return SectionKind::getMergeableConst16(); default: return SectionKind::getReadOnly(); } case Constant::LocalRelocation: // In static relocation model, the linker will resolve all addresses, so // the relocation entries will actually be constants by the time the app // starts up. However, we can't put this into a mergable section, because // the linker doesn't take relocations into consideration when it tries to // merge entries in the section. if (ReloModel == Reloc::Static) return SectionKind::getReadOnly(); // Otherwise, the dynamic linker needs to fix it up, put it in the // writable data.rel.local section. return SectionKind::getReadOnlyWithRelLocal(); case Constant::GlobalRelocations: // In static relocation model, the linker will resolve all addresses, so // the relocation entries will actually be constants by the time the app // starts up. However, we can't put this into a mergable section, because // the linker doesn't take relocations into consideration when it tries to // merge entries in the section. if (ReloModel == Reloc::Static) return SectionKind::getReadOnly(); // Otherwise, the dynamic linker needs to fix it up, put it in the // writable data.rel section. return SectionKind::getReadOnlyWithRel(); } } // Okay, this isn't a constant. If the initializer for the global is going // to require a runtime relocation by the dynamic linker, put it into a more // specific section to improve startup time of the app. This coalesces these // globals together onto fewer pages, improving the locality of the dynamic // linker. if (ReloModel == Reloc::Static) return SectionKind::getDataNoRel(); switch (C->getRelocationInfo()) { case Constant::NoRelocation: return SectionKind::getDataNoRel(); case Constant::LocalRelocation: return SectionKind::getDataRelLocal(); case Constant::GlobalRelocations: return SectionKind::getDataRel(); } llvm_unreachable("Invalid relocation"); } /// This method computes the appropriate section to emit the specified global /// variable or function definition. This should not be passed external (or /// available externally) globals. MCSection * TargetLoweringObjectFile::SectionForGlobal(const GlobalValue *GV, SectionKind Kind, Mangler &Mang, const TargetMachine &TM) const { // Select section name. if (GV->hasSection()) return getExplicitSectionGlobal(GV, Kind, Mang, TM); // Use default section depending on the 'type' of global return SelectSectionForGlobal(GV, Kind, Mang, TM); } MCSection *TargetLoweringObjectFile::getSectionForJumpTable( const Function &F, Mangler &Mang, const TargetMachine &TM) const { return getSectionForConstant(SectionKind::getReadOnly(), /*C=*/nullptr); } bool TargetLoweringObjectFile::shouldPutJumpTableInFunctionSection( bool UsesLabelDifference, const Function &F) const { // In PIC mode, we need to emit the jump table to the same section as the // function body itself, otherwise the label differences won't make sense. // FIXME: Need a better predicate for this: what about custom entries? if (UsesLabelDifference) return true; // We should also do if the section name is NULL or function is declared // in discardable section // FIXME: this isn't the right predicate, should be based on the MCSection // for the function. if (F.isWeakForLinker()) return true; return false; } /// Given a mergable constant with the specified size and relocation /// information, return a section that it should be placed in. MCSection * TargetLoweringObjectFile::getSectionForConstant(SectionKind Kind, const Constant *C) const { if (Kind.isReadOnly() && ReadOnlySection != nullptr) return ReadOnlySection; return DataSection; } /// getTTypeGlobalReference - Return an MCExpr to use for a /// reference to the specified global variable from exception /// handling information. const MCExpr *TargetLoweringObjectFile::getTTypeGlobalReference( const GlobalValue *GV, unsigned Encoding, Mangler &Mang, const TargetMachine &TM, MachineModuleInfo *MMI, MCStreamer &Streamer) const { const MCSymbolRefExpr *Ref = MCSymbolRefExpr::create(TM.getSymbol(GV, Mang), getContext()); return getTTypeReference(Ref, Encoding, Streamer); } const MCExpr *TargetLoweringObjectFile:: getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding, MCStreamer &Streamer) const { switch (Encoding & 0x70) { default: report_fatal_error("We do not support this DWARF encoding yet!"); case dwarf::DW_EH_PE_absptr: // Do nothing special return Sym; case dwarf::DW_EH_PE_pcrel: { // Emit a label to the streamer for the current position. This gives us // .-foo addressing. MCSymbol *PCSym = getContext().createTempSymbol(); Streamer.EmitLabel(PCSym); const MCExpr *PC = MCSymbolRefExpr::create(PCSym, getContext()); return MCBinaryExpr::createSub(Sym, PC, getContext()); } } } const MCExpr *TargetLoweringObjectFile::getDebugThreadLocalSymbol(const MCSymbol *Sym) const { // FIXME: It's not clear what, if any, default this should have - perhaps a // null return could mean 'no location' & we should just do that here. return MCSymbolRefExpr::create(Sym, *Ctx); } void TargetLoweringObjectFile::getNameWithPrefix( SmallVectorImpl<char> &OutName, const GlobalValue *GV, bool CannotUsePrivateLabel, Mangler &Mang, const TargetMachine &TM) const { Mang.getNameWithPrefix(OutName, GV, CannotUsePrivateLabel); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Target/TargetIntrinsicInfo.cpp
//===-- TargetIntrinsicInfo.cpp - Target Instruction Information ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the TargetIntrinsicInfo class. // //===----------------------------------------------------------------------===// #include "llvm/Target/TargetIntrinsicInfo.h" #include "llvm/ADT/StringMap.h" #include "llvm/IR/Function.h" using namespace llvm; TargetIntrinsicInfo::TargetIntrinsicInfo() { } TargetIntrinsicInfo::~TargetIntrinsicInfo() { } unsigned TargetIntrinsicInfo::getIntrinsicID(Function *F) const { const ValueName *ValName = F->getValueName(); if (!ValName) return 0; return lookupName(ValName->getKeyData(), ValName->getKeyLength()); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilPdbInfo/DxilPdbInfoWriter.cpp
#include "dxc/DxilPdbInfo/DxilPdbInfoWriter.h" #include "dxc/Support/Global.h" #include "dxc/Support/WinIncludes.h" #include "dxc/DxilCompression/DxilCompressionHelpers.h" #include "dxc/DxilContainer/DxilContainer.h" #include "dxc/DxilContainer/DxilRuntimeReflection.h" using namespace hlsl; HRESULT hlsl::WritePdbInfoPart(IMalloc *pMalloc, const void *pUncompressedPdbInfoData, size_t size, std::vector<char> *outBuffer) { // Write to the output buffer. outBuffer->clear(); hlsl::DxilShaderPDBInfo header = {}; header.CompressionType = hlsl::DxilShaderPDBInfoCompressionType::Zlib; // TODO: Add option to do // uncompressed version. header.UncompressedSizeInBytes = size; header.Version = hlsl::DxilShaderPDBInfoVersion::Latest; { const size_t lastSize = outBuffer->size(); outBuffer->resize(outBuffer->size() + sizeof(header)); memcpy(outBuffer->data() + lastSize, &header, sizeof(header)); } // Then write the compressed RDAT data. hlsl::ZlibResult result = hlsl::ZlibCompressAppend( pMalloc, pUncompressedPdbInfoData, size, *outBuffer); if (result == hlsl::ZlibResult::OutOfMemory) IFTBOOL(false, E_OUTOFMEMORY); IFTBOOL(result == hlsl::ZlibResult::Success, E_FAIL); IFTBOOL(outBuffer->size() >= sizeof(header), E_FAIL); header.SizeInBytes = outBuffer->size() - sizeof(header); memcpy(outBuffer->data(), &header, sizeof(header)); return S_OK; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilPdbInfo/CMakeLists.txt
add_llvm_library(LLVMDxilPdbInfo DxilPdbInfoWriter.cpp ADDITIONAL_HEADER_DIRS ) add_dependencies(LLVMDxilPdbInfo intrinsics_gen)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxilPdbInfo/LLVMBuild.txt
; Copyright (C) Microsoft Corporation. All rights reserved. ; This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = DxilPdbInfo parent = Libraries required_libraries = Core Support DxcSupport DxilContainer
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcBindingTable/CMakeLists.txt
add_llvm_library(LLVMDxcBindingTable DxcBindingTable.cpp ) add_dependencies(LLVMDxcBindingTable intrinsics_gen)
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcBindingTable/DxcBindingTable.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxcBindingTable.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Twine.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "dxc/DXIL/DxilMetadataHelper.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilResourceBase.h" #include "dxc/DxcBindingTable/DxcBindingTable.h" #include <ctype.h> #include <set> using namespace llvm; using namespace hlsl; namespace { enum IntegerConversionStatus { Success, OutOfBounds, Invalid, Empty, }; static IntegerConversionStatus ToUnsigned32(StringRef str, uint32_t *outInteger) { *outInteger = 0; if (str.empty()) return IntegerConversionStatus::Empty; llvm::APInt integer; if (llvm::StringRef(str).getAsInteger(0, integer)) { return IntegerConversionStatus::Invalid; } if (integer != 0 && integer.getBitWidth() > 32) { return IntegerConversionStatus::OutOfBounds; } *outInteger = (uint32_t)integer.getLimitedValue(); return IntegerConversionStatus::Success; } } // namespace bool hlsl::ParseBindingTable(llvm::StringRef fileName, llvm::StringRef content, llvm::raw_ostream &errors, DxcBindingTable *outTable) { struct Parser { StringRef fileName; const char *curr = nullptr; const char *end = nullptr; int line = 1; int col = 1; llvm::raw_ostream &errors; bool WasEndOfLine = false; struct Location { int line = 0; int col = 0; }; inline static bool IsDelimiter(char c) { return c == ','; } inline static bool IsNewline(char c) { return c == '\r' || c == '\n'; } inline static bool IsEndOfLine(char c) { return IsNewline(c) || c == ';' || c == '\0'; } inline static bool IsWhitespace(char c) { return c == ' ' || c == '\t'; } inline Parser(StringRef fileName, StringRef content, llvm::raw_ostream &errors) : fileName(fileName), curr(content.data()), end(content.data() + content.size()), errors(errors) { EatWhiteSpaceAndNewlines(); } inline bool WasJustEndOfLine() const { return WasEndOfLine; } inline void EatWhitespace() { for (;;) { if (IsWhitespace(Peek())) Advance(); else break; } } inline void EatWhiteSpaceAndNewlines() { for (;;) { if (IsWhitespace(Peek()) || IsNewline(Peek())) Advance(); else break; } } inline Location GetLoc() const { Location loc; loc.line = line; loc.col = col; return loc; } void Advance() { if (ReachedEnd()) return; if (*curr == '\n') { line++; col = 1; } else if (*curr != '\r') { col++; } curr++; } inline bool ReachedEnd() const { return curr >= end || *curr == '\0'; } inline void Warn(Location loc, const Twine &err) { (void)Error(loc, err); } inline bool Error(Location loc, const Twine &err) { errors << (Twine(fileName) + ":" + Twine(loc.line) + ":" + Twine(loc.col) + ": " + err + "\n") .str(); return false; } inline bool Error(const Twine &err) { Error(GetLoc(), err); return false; } inline char Peek() const { if (ReachedEnd()) return '\0'; return *curr; } bool ParseCell(SmallVectorImpl<char> *str) { EatWhitespace(); if (ReachedEnd()) { return Error("Unexpected EOF when parsing cell."); } bool hasQuote = false; if (Peek() == '"') { hasQuote = true; Advance(); } while (!ReachedEnd()) { if (IsEndOfLine(Peek()) || (!hasQuote && IsDelimiter(Peek()))) { if (hasQuote && IsNewline(Peek())) return Error("Unexpected newline inside quotation."); // Trim the white space at the end of the string if (str) { while (str->size() && IsWhitespace(str->back())) { str->pop_back(); } } break; } // Double quotes if (Peek() == '"') { Advance(); if (!hasQuote) return Error("'\"' not allowed in non-quoted cell."); EatWhitespace(); if (!IsDelimiter(Peek()) && !IsEndOfLine(Peek())) { return Error("Unexpected character after quote."); } break; } if (str) { str->push_back(Peek()); } Advance(); } // Handle delimiter { // If this delimiter is not a newline, set our newline flag to false. if (!IsEndOfLine(Peek())) { WasEndOfLine = false; Advance(); // Eat white spaces so we can detect the next newline if this // is a trailing comma. EatWhitespace(); } if (IsEndOfLine(Peek())) { Advance(); // Skip this character, which could be ';' WasEndOfLine = true; EatWhiteSpaceAndNewlines(); } } return true; } bool ParseResourceIndex(hlsl::DXIL::ResourceClass *outClass, unsigned *outIndex) { *outClass = hlsl::DXIL::ResourceClass::Invalid; *outIndex = UINT_MAX; auto loc = GetLoc(); SmallString<32> str; if (!ParseCell(&str)) return false; if (str.empty()) { return Error(loc, "Resource binding cannot be empty."); } switch (str[0]) { case 'b': *outClass = hlsl::DXIL::ResourceClass::CBuffer; break; case 's': *outClass = hlsl::DXIL::ResourceClass::Sampler; break; case 't': *outClass = hlsl::DXIL::ResourceClass::SRV; break; case 'u': *outClass = hlsl::DXIL::ResourceClass::UAV; break; default: return Error(loc, "Invalid resource class. Needs to be one of 'b', " "'s', 't', or 'u'."); break; } StringRef integerStr; if (str.size() > 1) { integerStr = StringRef(&str[1], str.size() - 1); } if (auto result = ToUnsigned32(integerStr, outIndex)) { switch (result) { case IntegerConversionStatus::OutOfBounds: return Error(loc, Twine() + "'" + integerStr + "' is out of range of an 32-bit unsigned integer."); default: return Error(loc, Twine() + "'" + str + "' is not a valid resource binding."); } } return true; } inline bool ParseReourceSpace(unsigned *outResult) { auto loc = GetLoc(); SmallString<32> str; if (!ParseCell(&str)) return false; if (str.empty()) { return Error(loc, "Expected unsigned 32-bit integer for resource " "space, but got empty cell."); } if (auto result = ToUnsigned32(str, outResult)) { switch (result) { case IntegerConversionStatus::OutOfBounds: return Error(loc, Twine() + "'" + str + "' is out of range of an 32-bit unsigned integer."); default: return Error(loc, Twine() + "'" + str + "' is not a valid 32-bit unsigned integer."); } } return true; } }; Parser P(fileName, content, errors); enum class ColumnType { Name, Index, Space, Unknown, }; llvm::SmallVector<ColumnType, 5> columns; std::set<ColumnType> columnsSet; for (;;) { llvm::SmallString<32> column; if (!P.ParseCell(&column)) { return false; } for (char &c : column) c = tolower(c); auto loc = P.GetLoc(); if (column == "resourcename") { if (!columnsSet.insert(ColumnType::Name).second) { return P.Error(loc, "Column 'ResourceName' already specified."); } columns.push_back(ColumnType::Name); } else if (column == "binding") { if (!columnsSet.insert(ColumnType::Index).second) { return P.Error(loc, "Column 'Binding' already specified."); } columns.push_back(ColumnType::Index); } else if (column == "space") { if (!columnsSet.insert(ColumnType::Space).second) { return P.Error(loc, "Column 'Space' already specified."); } columns.push_back(ColumnType::Space); } else { P.Warn(loc, Twine() + "Unknown column '" + column + "'"); columns.push_back(ColumnType::Unknown); } if (P.WasJustEndOfLine()) break; } if (!columnsSet.count(ColumnType::Name) || !columnsSet.count(ColumnType::Index) || !columnsSet.count(ColumnType::Space)) { return P.Error( Twine() + "Input format is csv with headings: ResourceName, Binding, Space."); } while (!P.ReachedEnd()) { SmallString<32> name; hlsl::DXIL::ResourceClass cls = hlsl::DXIL::ResourceClass::Invalid; unsigned index = 0; unsigned space = 0; for (unsigned i = 0; i < columns.size(); i++) { ColumnType column = columns[i]; switch (column) { case ColumnType::Name: { if (!P.ParseCell(&name)) return false; } break; case ColumnType::Index: { if (!P.ParseResourceIndex(&cls, &index)) return false; } break; case ColumnType::Space: { if (!P.ParseReourceSpace(&space)) return false; } break; default: { if (!P.ParseCell(nullptr)) return false; } break; } if (P.WasJustEndOfLine() && i + 1 != columns.size()) { return P.Error("Row ended after just " + Twine(i + 1) + " columns. Expected " + Twine(columns.size()) + "."); } } DxcBindingTable::Entry entry; entry.space = space; entry.index = index; outTable->entries[DxcBindingTable::Key(name.c_str(), cls)] = entry; if (!P.WasJustEndOfLine()) { return P.Error( "Unexpected cell at the end of row. There should only be " + Twine(columns.size()) + " columns"); } } return true; } typedef std::pair<std::string, hlsl::DXIL::ResourceClass> ResourceKey; typedef std::map<ResourceKey, DxilResourceBase *> ResourceMap; template <typename T> static inline void GatherResources(const std::vector<std::unique_ptr<T>> &List, ResourceMap *Map) { for (const std::unique_ptr<T> &ptr : List) { (*Map)[ResourceKey(ptr->GetGlobalName(), ptr->GetClass())] = ptr.get(); } } void hlsl::WriteBindingTableToMetadata(llvm::Module &M, const hlsl::DxcBindingTable &table) { if (table.entries.empty()) return; llvm::NamedMDNode *bindingsMD = M.getOrInsertNamedMetadata( hlsl::DxilMDHelper::kDxilDxcBindingTableMDName); LLVMContext &LLVMCtx = M.getContext(); // Don't add operands repeatedly if (bindingsMD->getNumOperands()) { return; } for (const std::pair<const DxcBindingTable::Key, DxcBindingTable::Entry> &binding : table.entries) { auto GetInt32MD = [&LLVMCtx](uint32_t val) -> llvm::ValueAsMetadata * { return llvm::ValueAsMetadata::get( llvm::ConstantInt::get(llvm::Type::getInt32Ty(LLVMCtx), val)); }; llvm::Metadata *operands[4] = {}; operands[hlsl::DxilMDHelper::kDxilDxcBindingTableResourceName] = llvm::MDString::get(LLVMCtx, binding.first.first); operands[hlsl::DxilMDHelper::kDxilDxcBindingTableResourceClass] = GetInt32MD((unsigned)binding.first.second); operands[hlsl::DxilMDHelper::kDxilDxcBindingTableResourceIndex] = GetInt32MD(binding.second.index); operands[hlsl::DxilMDHelper::kDxilDxcBindingTableResourceSpace] = GetInt32MD(binding.second.space); llvm::MDTuple *entry = llvm::MDNode::get(LLVMCtx, operands); bindingsMD->addOperand(entry); } } void hlsl::ApplyBindingTableFromMetadata(DxilModule &DM) { Module &M = *DM.GetModule(); NamedMDNode *bindings = M.getNamedMetadata(hlsl::DxilMDHelper::kDxilDxcBindingTableMDName); if (!bindings) return; ResourceMap resourceMap; GatherResources(DM.GetCBuffers(), &resourceMap); GatherResources(DM.GetSRVs(), &resourceMap); GatherResources(DM.GetUAVs(), &resourceMap); GatherResources(DM.GetSamplers(), &resourceMap); for (MDNode *mdEntry : bindings->operands()) { Metadata *nameMD = mdEntry->getOperand(DxilMDHelper::kDxilDxcBindingTableResourceName); Metadata *classMD = mdEntry->getOperand(DxilMDHelper::kDxilDxcBindingTableResourceClass); Metadata *indexMD = mdEntry->getOperand(DxilMDHelper::kDxilDxcBindingTableResourceIndex); Metadata *spaceMD = mdEntry->getOperand(DxilMDHelper::kDxilDxcBindingTableResourceSpace); StringRef name = cast<MDString>(nameMD)->getString(); hlsl::DXIL::ResourceClass cls = (hlsl::DXIL::ResourceClass)cast<ConstantInt>( cast<ValueAsMetadata>(classMD)->getValue()) ->getLimitedValue(); unsigned index = cast<ConstantInt>(cast<ValueAsMetadata>(indexMD)->getValue()) ->getLimitedValue(); unsigned space = cast<ConstantInt>(cast<ValueAsMetadata>(spaceMD)->getValue()) ->getLimitedValue(); auto it = resourceMap.find(ResourceKey(name, cls)); if (it != resourceMap.end()) { DxilResourceBase *resource = it->second; if (!resource->IsAllocated()) { resource->SetLowerBound(index); resource->SetSpaceID(space); } } } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/DxcBindingTable/LLVMBuild.txt
; Copyright (C) Microsoft Corporation. All rights reserved. ; This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = DxcBindingTable parent = Libraries required_libraries = Core DxcSupport Support DXIL
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/ArchiveWriter.cpp
//===- ArchiveWriter.cpp - ar File Format implementation --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the writeArchive function. // //===----------------------------------------------------------------------===// #include "llvm/Object/ArchiveWriter.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Object/Archive.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Object/SymbolicFile.h" #include "llvm/Support/EndianStream.h" #include "llvm/Support/Errc.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Support/Path.h" #include "llvm/Support/ToolOutputFile.h" #include "llvm/Support/raw_ostream.h" #if !defined(_MSC_VER) && !defined(__MINGW32__) #include <unistd.h> #else #include <io.h> #endif using namespace llvm; NewArchiveIterator::NewArchiveIterator(object::Archive::child_iterator I, StringRef Name) : IsNewMember(false), Name(Name), OldI(I) {} NewArchiveIterator::NewArchiveIterator(StringRef NewFilename, StringRef Name) : IsNewMember(true), Name(Name), NewFilename(NewFilename) {} StringRef NewArchiveIterator::getName() const { return Name; } bool NewArchiveIterator::isNewMember() const { return IsNewMember; } object::Archive::child_iterator NewArchiveIterator::getOld() const { assert(!IsNewMember); return OldI; } StringRef NewArchiveIterator::getNew() const { assert(IsNewMember); return NewFilename; } llvm::ErrorOr<int> NewArchiveIterator::getFD(sys::fs::file_status &NewStatus) const { assert(IsNewMember); int NewFD; if (auto EC = sys::fs::openFileForRead(NewFilename, NewFD)) return EC; assert(NewFD != -1); if (auto EC = sys::fs::status(NewFD, NewStatus)) return EC; // Opening a directory doesn't make sense. Let it fail. // Linux cannot open directories with open(2), although // cygwin and *bsd can. if (NewStatus.type() == sys::fs::file_type::directory_file) return make_error_code(errc::is_a_directory); return NewFD; } template <typename T> static void printWithSpacePadding(raw_fd_ostream &OS, T Data, unsigned Size, bool MayTruncate = false) { uint64_t OldPos = OS.tell(); OS << Data; unsigned SizeSoFar = OS.tell() - OldPos; if (Size > SizeSoFar) { OS.indent(Size - SizeSoFar); } else if (Size < SizeSoFar) { assert(MayTruncate && "Data doesn't fit in Size"); // Some of the data this is used for (like UID) can be larger than the // space available in the archive format. Truncate in that case. OS.seek(OldPos + Size); } } static void print32(raw_ostream &Out, object::Archive::Kind Kind, uint32_t Val) { if (Kind == object::Archive::K_GNU) support::endian::Writer<support::big>(Out).write(Val); else support::endian::Writer<support::little>(Out).write(Val); } static void printRestOfMemberHeader(raw_fd_ostream &Out, const sys::TimeValue &ModTime, unsigned UID, unsigned GID, unsigned Perms, unsigned Size) { printWithSpacePadding(Out, ModTime.toEpochTime(), 12); printWithSpacePadding(Out, UID, 6, true); printWithSpacePadding(Out, GID, 6, true); printWithSpacePadding(Out, format("%o", Perms), 8); printWithSpacePadding(Out, Size, 10); Out << "`\n"; } static void printGNUSmallMemberHeader(raw_fd_ostream &Out, StringRef Name, const sys::TimeValue &ModTime, unsigned UID, unsigned GID, unsigned Perms, unsigned Size) { printWithSpacePadding(Out, Twine(Name) + "/", 16); printRestOfMemberHeader(Out, ModTime, UID, GID, Perms, Size); } static void printBSDMemberHeader(raw_fd_ostream &Out, StringRef Name, const sys::TimeValue &ModTime, unsigned UID, unsigned GID, unsigned Perms, unsigned Size) { uint64_t PosAfterHeader = Out.tell() + 60 + Name.size(); // Pad so that even 64 bit object files are aligned. unsigned Pad = OffsetToAlignment(PosAfterHeader, 8); unsigned NameWithPadding = Name.size() + Pad; printWithSpacePadding(Out, Twine("#1/") + Twine(NameWithPadding), 16); printRestOfMemberHeader(Out, ModTime, UID, GID, Perms, NameWithPadding + Size); Out << Name; assert(PosAfterHeader == Out.tell()); while (Pad--) Out.write(uint8_t(0)); } static void printMemberHeader(raw_fd_ostream &Out, object::Archive::Kind Kind, StringRef Name, std::vector<unsigned>::iterator &StringMapIndexIter, const sys::TimeValue &ModTime, unsigned UID, unsigned GID, unsigned Perms, unsigned Size) { if (Kind == object::Archive::K_BSD) return printBSDMemberHeader(Out, Name, ModTime, UID, GID, Perms, Size); if (Name.size() < 16) return printGNUSmallMemberHeader(Out, Name, ModTime, UID, GID, Perms, Size); Out << '/'; printWithSpacePadding(Out, *StringMapIndexIter++, 15); printRestOfMemberHeader(Out, ModTime, UID, GID, Perms, Size); } static void writeStringTable(raw_fd_ostream &Out, ArrayRef<NewArchiveIterator> Members, std::vector<unsigned> &StringMapIndexes) { unsigned StartOffset = 0; for (ArrayRef<NewArchiveIterator>::iterator I = Members.begin(), E = Members.end(); I != E; ++I) { StringRef Name = I->getName(); if (Name.size() < 16) continue; if (StartOffset == 0) { printWithSpacePadding(Out, "//", 58); Out << "`\n"; StartOffset = Out.tell(); } StringMapIndexes.push_back(Out.tell() - StartOffset); Out << Name << "/\n"; } if (StartOffset == 0) return; if (Out.tell() % 2) Out << '\n'; int Pos = Out.tell(); Out.seek(StartOffset - 12); printWithSpacePadding(Out, Pos - StartOffset, 10); Out.seek(Pos); } static sys::TimeValue now(bool Deterministic) { if (!Deterministic) return sys::TimeValue::now(); sys::TimeValue TV; TV.fromEpochTime(0); return TV; } // Returns the offset of the first reference to a member offset. static ErrorOr<unsigned> writeSymbolTable(raw_fd_ostream &Out, object::Archive::Kind Kind, ArrayRef<NewArchiveIterator> Members, ArrayRef<MemoryBufferRef> Buffers, std::vector<unsigned> &MemberOffsetRefs, bool Deterministic) { unsigned HeaderStartOffset = 0; unsigned BodyStartOffset = 0; SmallString<128> NameBuf; raw_svector_ostream NameOS(NameBuf); LLVMContext Context; for (unsigned MemberNum = 0, N = Members.size(); MemberNum < N; ++MemberNum) { MemoryBufferRef MemberBuffer = Buffers[MemberNum]; ErrorOr<std::unique_ptr<object::SymbolicFile>> ObjOrErr = object::SymbolicFile::createSymbolicFile( MemberBuffer, sys::fs::file_magic::unknown, &Context); if (!ObjOrErr) continue; // FIXME: check only for "not an object file" errors. object::SymbolicFile &Obj = *ObjOrErr.get(); if (!HeaderStartOffset) { HeaderStartOffset = Out.tell(); if (Kind == object::Archive::K_GNU) printGNUSmallMemberHeader(Out, "", now(Deterministic), 0, 0, 0, 0); else printBSDMemberHeader(Out, "__.SYMDEF", now(Deterministic), 0, 0, 0, 0); BodyStartOffset = Out.tell(); print32(Out, Kind, 0); // number of entries or bytes } for (const object::BasicSymbolRef &S : Obj.symbols()) { uint32_t Symflags = S.getFlags(); if (Symflags & object::SymbolRef::SF_FormatSpecific) continue; if (!(Symflags & object::SymbolRef::SF_Global)) continue; if (Symflags & object::SymbolRef::SF_Undefined) continue; unsigned NameOffset = NameOS.tell(); if (auto EC = S.printName(NameOS)) return EC; NameOS << '\0'; MemberOffsetRefs.push_back(MemberNum); if (Kind == object::Archive::K_BSD) print32(Out, Kind, NameOffset); print32(Out, Kind, 0); // member offset } } if (HeaderStartOffset == 0) return 0; StringRef StringTable = NameOS.str(); if (Kind == object::Archive::K_BSD) print32(Out, Kind, StringTable.size()); // byte count of the string table Out << StringTable; // ld64 requires the next member header to start at an offset that is // 4 bytes aligned. unsigned Pad = OffsetToAlignment(Out.tell(), 4); while (Pad--) Out.write(uint8_t(0)); // Patch up the size of the symbol table now that we know how big it is. unsigned Pos = Out.tell(); const unsigned MemberHeaderSize = 60; Out.seek(HeaderStartOffset + 48); // offset of the size field. printWithSpacePadding(Out, Pos - MemberHeaderSize - HeaderStartOffset, 10); // Patch up the number of symbols. Out.seek(BodyStartOffset); unsigned NumSyms = MemberOffsetRefs.size(); if (Kind == object::Archive::K_GNU) print32(Out, Kind, NumSyms); else print32(Out, Kind, NumSyms * 8); Out.seek(Pos); return BodyStartOffset + 4; } std::pair<StringRef, std::error_code> llvm::writeArchive( StringRef ArcName, std::vector<NewArchiveIterator> &NewMembers, bool WriteSymtab, object::Archive::Kind Kind, bool Deterministic) { SmallString<128> TmpArchive; int TmpArchiveFD; if (auto EC = sys::fs::createUniqueFile(ArcName + ".temp-archive-%%%%%%%.a", TmpArchiveFD, TmpArchive)) return std::make_pair(ArcName, EC); tool_output_file Output(TmpArchive, TmpArchiveFD); raw_fd_ostream &Out = Output.os(); Out << "!<arch>\n"; std::vector<unsigned> MemberOffsetRefs; std::vector<std::unique_ptr<MemoryBuffer>> Buffers; std::vector<MemoryBufferRef> Members; std::vector<sys::fs::file_status> NewMemberStatus; for (unsigned I = 0, N = NewMembers.size(); I < N; ++I) { NewArchiveIterator &Member = NewMembers[I]; MemoryBufferRef MemberRef; if (Member.isNewMember()) { StringRef Filename = Member.getNew(); NewMemberStatus.resize(NewMemberStatus.size() + 1); sys::fs::file_status &Status = NewMemberStatus.back(); ErrorOr<int> FD = Member.getFD(Status); if (auto EC = FD.getError()) return std::make_pair(Filename, EC); ErrorOr<std::unique_ptr<MemoryBuffer>> MemberBufferOrErr = MemoryBuffer::getOpenFile(FD.get(), Filename, Status.getSize(), false); if (auto EC = MemberBufferOrErr.getError()) return std::make_pair(Filename, EC); if (close(FD.get()) != 0) return std::make_pair(Filename, std::error_code(errno, std::generic_category())); Buffers.push_back(std::move(MemberBufferOrErr.get())); MemberRef = Buffers.back()->getMemBufferRef(); } else { object::Archive::child_iterator OldMember = Member.getOld(); ErrorOr<MemoryBufferRef> MemberBufferOrErr = OldMember->getMemoryBufferRef(); if (auto EC = MemberBufferOrErr.getError()) return std::make_pair("", EC); MemberRef = MemberBufferOrErr.get(); } Members.push_back(MemberRef); } unsigned MemberReferenceOffset = 0; if (WriteSymtab) { ErrorOr<unsigned> MemberReferenceOffsetOrErr = writeSymbolTable( Out, Kind, NewMembers, Members, MemberOffsetRefs, Deterministic); if (auto EC = MemberReferenceOffsetOrErr.getError()) return std::make_pair(ArcName, EC); MemberReferenceOffset = MemberReferenceOffsetOrErr.get(); } std::vector<unsigned> StringMapIndexes; if (Kind != object::Archive::K_BSD) writeStringTable(Out, NewMembers, StringMapIndexes); unsigned MemberNum = 0; unsigned NewMemberNum = 0; std::vector<unsigned>::iterator StringMapIndexIter = StringMapIndexes.begin(); std::vector<unsigned> MemberOffset; for (const NewArchiveIterator &I : NewMembers) { MemoryBufferRef File = Members[MemberNum++]; unsigned Pos = Out.tell(); MemberOffset.push_back(Pos); sys::TimeValue ModTime; unsigned UID; unsigned GID; unsigned Perms; if (Deterministic) { ModTime.fromEpochTime(0); UID = 0; GID = 0; Perms = 0644; } else if (I.isNewMember()) { const sys::fs::file_status &Status = NewMemberStatus[NewMemberNum]; ModTime = Status.getLastModificationTime(); UID = Status.getUser(); GID = Status.getGroup(); Perms = Status.permissions(); } else { object::Archive::child_iterator OldMember = I.getOld(); ModTime = OldMember->getLastModified(); UID = OldMember->getUID(); GID = OldMember->getGID(); Perms = OldMember->getAccessMode(); } if (I.isNewMember()) { StringRef FileName = I.getNew(); const sys::fs::file_status &Status = NewMemberStatus[NewMemberNum++]; printMemberHeader(Out, Kind, sys::path::filename(FileName), StringMapIndexIter, ModTime, UID, GID, Perms, Status.getSize()); } else { object::Archive::child_iterator OldMember = I.getOld(); printMemberHeader(Out, Kind, I.getName(), StringMapIndexIter, ModTime, UID, GID, Perms, OldMember->getSize()); } Out << File.getBuffer(); if (Out.tell() % 2) Out << '\n'; } if (MemberReferenceOffset) { Out.seek(MemberReferenceOffset); for (unsigned MemberNum : MemberOffsetRefs) { if (Kind == object::Archive::K_BSD) Out.seek(Out.tell() + 4); // skip over the string offset print32(Out, Kind, MemberOffset[MemberNum]); } } Output.keep(); Out.close(); sys::fs::rename(TmpArchive, ArcName); return std::make_pair("", std::error_code()); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/IRObjectFile.cpp
//===- IRObjectFile.cpp - IR object file implementation ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Part of the IRObjectFile class implementation. // //===----------------------------------------------------------------------===// #include "llvm/Object/IRObjectFile.h" #include "RecordStreamer.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/GVMaterializer.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Mangler.h" #include "llvm/IR/Module.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCObjectFileInfo.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; using namespace object; IRObjectFile::IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> Mod) : SymbolicFile(Binary::ID_IR, Object), M(std::move(Mod)) { Mang.reset(new Mangler()); #if 1 // HLSL Change - remove dependency on machine-specific concepts here return; #else const std::string &InlineAsm = M->getModuleInlineAsm(); if (InlineAsm.empty()) return; Triple TT(M->getTargetTriple()); std::string Err; const Target *T = TargetRegistry::lookupTarget(TT.str(), Err); if (!T) return; std::unique_ptr<MCRegisterInfo> MRI(T->createMCRegInfo(TT.str())); if (!MRI) return; std::unique_ptr<MCAsmInfo> MAI(T->createMCAsmInfo(*MRI, TT.str())); if (!MAI) return; std::unique_ptr<MCSubtargetInfo> STI( T->createMCSubtargetInfo(TT.str(), "", "")); if (!STI) return; std::unique_ptr<MCInstrInfo> MCII(T->createMCInstrInfo()); if (!MCII) return; MCObjectFileInfo MOFI; MCContext MCCtx(MAI.get(), MRI.get(), &MOFI); MOFI.InitMCObjectFileInfo(TT, Reloc::Default, CodeModel::Default, MCCtx); std::unique_ptr<RecordStreamer> Streamer(new RecordStreamer(MCCtx)); T->createNullTargetStreamer(*Streamer); std::unique_ptr<MemoryBuffer> Buffer(MemoryBuffer::getMemBuffer(InlineAsm)); SourceMgr SrcMgr; SrcMgr.AddNewSourceBuffer(std::move(Buffer), SMLoc()); std::unique_ptr<MCAsmParser> Parser( createMCAsmParser(SrcMgr, MCCtx, *Streamer, *MAI)); MCTargetOptions MCOptions; std::unique_ptr<MCTargetAsmParser> TAP( T->createMCAsmParser(*STI, *Parser, *MCII, MCOptions)); if (!TAP) return; Parser->setTargetParser(*TAP); if (Parser->Run(false)) return; for (auto &KV : *Streamer) { StringRef Key = KV.first(); RecordStreamer::State Value = KV.second; uint32_t Res = BasicSymbolRef::SF_None; switch (Value) { case RecordStreamer::NeverSeen: llvm_unreachable("foo"); case RecordStreamer::DefinedGlobal: Res |= BasicSymbolRef::SF_Global; break; case RecordStreamer::Defined: break; case RecordStreamer::Global: case RecordStreamer::Used: Res |= BasicSymbolRef::SF_Undefined; Res |= BasicSymbolRef::SF_Global; break; } AsmSymbols.push_back( std::make_pair<std::string, uint32_t>(Key, std::move(Res))); } #endif // HLSL Change - remove dependency on machine-specific concepts here } IRObjectFile::~IRObjectFile() { } static GlobalValue *getGV(DataRefImpl &Symb) { if ((Symb.p & 3) == 3) return nullptr; return reinterpret_cast<GlobalValue*>(Symb.p & ~uintptr_t(3)); } static uintptr_t skipEmpty(Module::const_alias_iterator I, const Module &M) { if (I == M.alias_end()) return 3; const GlobalValue *GV = &*I; return reinterpret_cast<uintptr_t>(GV) | 2; } static uintptr_t skipEmpty(Module::const_global_iterator I, const Module &M) { if (I == M.global_end()) return skipEmpty(M.alias_begin(), M); const GlobalValue *GV = &*I; return reinterpret_cast<uintptr_t>(GV) | 1; } static uintptr_t skipEmpty(Module::const_iterator I, const Module &M) { if (I == M.end()) return skipEmpty(M.global_begin(), M); const GlobalValue *GV = &*I; return reinterpret_cast<uintptr_t>(GV) | 0; } static unsigned getAsmSymIndex(DataRefImpl Symb) { assert((Symb.p & uintptr_t(3)) == 3); uintptr_t Index = Symb.p & ~uintptr_t(3); Index >>= 2; return Index; } void IRObjectFile::moveSymbolNext(DataRefImpl &Symb) const { const GlobalValue *GV = getGV(Symb); uintptr_t Res; switch (Symb.p & 3) { case 0: { Module::const_iterator Iter(static_cast<const Function*>(GV)); ++Iter; Res = skipEmpty(Iter, *M); break; } case 1: { Module::const_global_iterator Iter(static_cast<const GlobalVariable*>(GV)); ++Iter; Res = skipEmpty(Iter, *M); break; } case 2: { Module::const_alias_iterator Iter(static_cast<const GlobalAlias*>(GV)); ++Iter; Res = skipEmpty(Iter, *M); break; } case 3: { unsigned Index = getAsmSymIndex(Symb); assert(Index < AsmSymbols.size()); ++Index; Res = (Index << 2) | 3; break; } default: llvm_unreachable("unreachable case"); } Symb.p = Res; } std::error_code IRObjectFile::printSymbolName(raw_ostream &OS, DataRefImpl Symb) const { const GlobalValue *GV = getGV(Symb); if (!GV) { unsigned Index = getAsmSymIndex(Symb); assert(Index <= AsmSymbols.size()); OS << AsmSymbols[Index].first; return std::error_code(); } if (GV->hasDLLImportStorageClass()) OS << "__imp_"; if (Mang) Mang->getNameWithPrefix(OS, GV, false); else OS << GV->getName(); return std::error_code(); } uint32_t IRObjectFile::getSymbolFlags(DataRefImpl Symb) const { const GlobalValue *GV = getGV(Symb); if (!GV) { unsigned Index = getAsmSymIndex(Symb); assert(Index <= AsmSymbols.size()); return AsmSymbols[Index].second; } uint32_t Res = BasicSymbolRef::SF_None; if (GV->isDeclarationForLinker()) Res |= BasicSymbolRef::SF_Undefined; if (GV->hasPrivateLinkage()) Res |= BasicSymbolRef::SF_FormatSpecific; if (!GV->hasLocalLinkage()) Res |= BasicSymbolRef::SF_Global; if (GV->hasCommonLinkage()) Res |= BasicSymbolRef::SF_Common; if (GV->hasLinkOnceLinkage() || GV->hasWeakLinkage()) Res |= BasicSymbolRef::SF_Weak; if (GV->getName().startswith("llvm.")) Res |= BasicSymbolRef::SF_FormatSpecific; else if (auto *Var = dyn_cast<GlobalVariable>(GV)) { if (Var->getSection() == StringRef("llvm.metadata")) Res |= BasicSymbolRef::SF_FormatSpecific; } return Res; } GlobalValue *IRObjectFile::getSymbolGV(DataRefImpl Symb) { return getGV(Symb); } std::unique_ptr<Module> IRObjectFile::takeModule() { return std::move(M); } basic_symbol_iterator IRObjectFile::symbol_begin_impl() const { Module::const_iterator I = M->begin(); DataRefImpl Ret; Ret.p = skipEmpty(I, *M); return basic_symbol_iterator(BasicSymbolRef(Ret, this)); } basic_symbol_iterator IRObjectFile::symbol_end_impl() const { DataRefImpl Ret; uint64_t NumAsm = AsmSymbols.size(); NumAsm <<= 2; Ret.p = 3 | NumAsm; return basic_symbol_iterator(BasicSymbolRef(Ret, this)); } ErrorOr<MemoryBufferRef> IRObjectFile::findBitcodeInObject(const ObjectFile &Obj) { for (const SectionRef &Sec : Obj.sections()) { StringRef SecName; if (std::error_code EC = Sec.getName(SecName)) return EC; if (SecName == ".llvmbc") { StringRef SecContents; if (std::error_code EC = Sec.getContents(SecContents)) return EC; return MemoryBufferRef(SecContents, Obj.getFileName()); } } return object_error::bitcode_section_not_found; } ErrorOr<MemoryBufferRef> IRObjectFile::findBitcodeInMemBuffer(MemoryBufferRef Object) { sys::fs::file_magic Type = sys::fs::identify_magic(Object.getBuffer()); switch (Type) { case sys::fs::file_magic::bitcode: return Object; case sys::fs::file_magic::elf_relocatable: case sys::fs::file_magic::macho_object: case sys::fs::file_magic::coff_object: { ErrorOr<std::unique_ptr<ObjectFile>> ObjFile = ObjectFile::createObjectFile(Object, Type); if (!ObjFile) return ObjFile.getError(); return findBitcodeInObject(*ObjFile->get()); } default: return object_error::invalid_file_type; } } ErrorOr<std::unique_ptr<IRObjectFile>> llvm::object::IRObjectFile::create(MemoryBufferRef Object, LLVMContext &Context) { ErrorOr<MemoryBufferRef> BCOrErr = findBitcodeInMemBuffer(Object); if (!BCOrErr) return BCOrErr.getError(); std::unique_ptr<MemoryBuffer> Buff( MemoryBuffer::getMemBuffer(BCOrErr.get(), false)); ErrorOr<std::unique_ptr<Module>> MOrErr = getLazyBitcodeModule(std::move(Buff), Context, nullptr, /*ShouldLazyLoadMetadata*/ true); if (std::error_code EC = MOrErr.getError()) return EC; std::unique_ptr<Module> &M = MOrErr.get(); return llvm::make_unique<IRObjectFile>(Object, std::move(M)); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/ELFObjectFile.cpp
//===- ELFObjectFile.cpp - ELF object file implementation -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Part of the ELFObjectFile class implementation. // //===----------------------------------------------------------------------===// #include "llvm/Object/ELFObjectFile.h" #include "llvm/Support/MathExtras.h" namespace llvm { using namespace object; ELFObjectFileBase::ELFObjectFileBase(unsigned int Type, MemoryBufferRef Source) : ObjectFile(Type, Source) {} ErrorOr<std::unique_ptr<ObjectFile>> ObjectFile::createELFObjectFile(MemoryBufferRef Obj) { std::pair<unsigned char, unsigned char> Ident = getElfArchType(Obj.getBuffer()); std::size_t MaxAlignment = 1ULL << countTrailingZeros(uintptr_t(Obj.getBufferStart())); if (MaxAlignment < 2) return object_error::parse_failed; std::error_code EC; std::unique_ptr<ObjectFile> R; if (Ident.first == ELF::ELFCLASS32) { if (Ident.second == ELF::ELFDATA2LSB) R.reset(new ELFObjectFile<ELFType<support::little, false>>(Obj, EC)); else if (Ident.second == ELF::ELFDATA2MSB) R.reset(new ELFObjectFile<ELFType<support::big, false>>(Obj, EC)); else return object_error::parse_failed; } else if (Ident.first == ELF::ELFCLASS64) { if (Ident.second == ELF::ELFDATA2LSB) R.reset(new ELFObjectFile<ELFType<support::little, true>>(Obj, EC)); else if (Ident.second == ELF::ELFDATA2MSB) R.reset(new ELFObjectFile<ELFType<support::big, true>>(Obj, EC)); else return object_error::parse_failed; } else { return object_error::parse_failed; } if (EC) return EC; return std::move(R); } } // end namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/Error.cpp
//===- Error.cpp - system_error extensions for Object -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This defines a new error_category for the Object library. // //===----------------------------------------------------------------------===// #include "llvm/Object/Error.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ManagedStatic.h" using namespace llvm; using namespace object; namespace { class _object_error_category : public std::error_category { public: const char* name() const LLVM_NOEXCEPT override; std::string message(int ev) const override; }; } const char *_object_error_category::name() const LLVM_NOEXCEPT { return "llvm.object"; } std::string _object_error_category::message(int EV) const { object_error E = static_cast<object_error>(EV); switch (E) { case object_error::arch_not_found: return "No object file for requested architecture"; case object_error::invalid_file_type: return "The file was not recognized as a valid object file"; case object_error::parse_failed: return "Invalid data was encountered while parsing the file"; case object_error::unexpected_eof: return "The end of the file was unexpectedly encountered"; case object_error::string_table_non_null_end: return "String table must end with a null terminator"; case object_error::invalid_section_index: return "Invalid section index"; case object_error::bitcode_section_not_found: return "Bitcode section not found in object file"; case object_error::macho_small_load_command: return "Mach-O load command with size < 8 bytes"; case object_error::macho_load_segment_too_many_sections: return "Mach-O segment load command contains too many sections"; case object_error::macho_load_segment_too_small: return "Mach-O segment load command size is too small"; } llvm_unreachable("An enumerator of object_error does not have a message " "defined."); } static ManagedStatic<_object_error_category> error_category; const std::error_category &object::object_category() { return *error_category; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/ELF.cpp
//===- ELF.cpp - ELF object file implementation -----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Object/ELF.h" namespace llvm { namespace object { #define ELF_RELOC(name, value) \ case ELF::name: \ return #name; \ StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type) { switch (Machine) { case ELF::EM_X86_64: switch (Type) { #include "llvm/Support/ELFRelocs/x86_64.def" default: break; } break; case ELF::EM_386: switch (Type) { #include "llvm/Support/ELFRelocs/i386.def" default: break; } break; case ELF::EM_MIPS: switch (Type) { #include "llvm/Support/ELFRelocs/Mips.def" default: break; } break; case ELF::EM_AARCH64: switch (Type) { #include "llvm/Support/ELFRelocs/AArch64.def" default: break; } break; case ELF::EM_ARM: switch (Type) { #include "llvm/Support/ELFRelocs/ARM.def" default: break; } break; case ELF::EM_HEXAGON: switch (Type) { #include "llvm/Support/ELFRelocs/Hexagon.def" default: break; } break; case ELF::EM_PPC: switch (Type) { #include "llvm/Support/ELFRelocs/PowerPC.def" default: break; } break; case ELF::EM_PPC64: switch (Type) { #include "llvm/Support/ELFRelocs/PowerPC64.def" default: break; } break; case ELF::EM_S390: switch (Type) { #include "llvm/Support/ELFRelocs/SystemZ.def" default: break; } break; case ELF::EM_SPARC: case ELF::EM_SPARC32PLUS: case ELF::EM_SPARCV9: switch (Type) { #include "llvm/Support/ELFRelocs/Sparc.def" default: break; } break; default: break; } return "Unknown"; } #undef ELF_RELOC } // end namespace object } // end namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/Archive.cpp
//===- Archive.cpp - ar File Format implementation --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ArchiveObjectFile class. // //===----------------------------------------------------------------------===// #include "llvm/Object/Archive.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/Endian.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" using namespace llvm; using namespace object; using namespace llvm::support::endian; static const char *const Magic = "!<arch>\n"; static const char *const ThinMagic = "!<thin>\n"; void Archive::anchor() { } StringRef ArchiveMemberHeader::getName() const { char EndCond; if (Name[0] == '/' || Name[0] == '#') EndCond = ' '; else EndCond = '/'; llvm::StringRef::size_type end = llvm::StringRef(Name, sizeof(Name)).find(EndCond); if (end == llvm::StringRef::npos) end = sizeof(Name); assert(end <= sizeof(Name) && end > 0); // Don't include the EndCond if there is one. return llvm::StringRef(Name, end); } uint32_t ArchiveMemberHeader::getSize() const { uint32_t Ret; if (llvm::StringRef(Size, sizeof(Size)).rtrim(" ").getAsInteger(10, Ret)) llvm_unreachable("Size is not a decimal number."); return Ret; } sys::fs::perms ArchiveMemberHeader::getAccessMode() const { unsigned Ret; if (StringRef(AccessMode, sizeof(AccessMode)).rtrim(" ").getAsInteger(8, Ret)) llvm_unreachable("Access mode is not an octal number."); return static_cast<sys::fs::perms>(Ret); } sys::TimeValue ArchiveMemberHeader::getLastModified() const { unsigned Seconds; if (StringRef(LastModified, sizeof(LastModified)).rtrim(" ") .getAsInteger(10, Seconds)) llvm_unreachable("Last modified time not a decimal number."); sys::TimeValue Ret; Ret.fromEpochTime(Seconds); return Ret; } unsigned ArchiveMemberHeader::getUID() const { unsigned Ret; if (StringRef(UID, sizeof(UID)).rtrim(" ").getAsInteger(10, Ret)) llvm_unreachable("UID time not a decimal number."); return Ret; } unsigned ArchiveMemberHeader::getGID() const { unsigned Ret; if (StringRef(GID, sizeof(GID)).rtrim(" ").getAsInteger(10, Ret)) llvm_unreachable("GID time not a decimal number."); return Ret; } Archive::Child::Child(const Archive *Parent, const char *Start) : Parent(Parent) { if (!Start) return; const ArchiveMemberHeader *Header = reinterpret_cast<const ArchiveMemberHeader *>(Start); uint64_t Size = sizeof(ArchiveMemberHeader); if (!Parent->IsThin || Header->getName() == "/" || Header->getName() == "//") Size += Header->getSize(); Data = StringRef(Start, Size); // Setup StartOfFile and PaddingBytes. StartOfFile = sizeof(ArchiveMemberHeader); // Don't include attached name. StringRef Name = Header->getName(); if (Name.startswith("#1/")) { uint64_t NameSize; if (Name.substr(3).rtrim(" ").getAsInteger(10, NameSize)) llvm_unreachable("Long name length is not an integer"); StartOfFile += NameSize; } } uint64_t Archive::Child::getSize() const { if (Parent->IsThin) return getHeader()->getSize(); return Data.size() - StartOfFile; } uint64_t Archive::Child::getRawSize() const { return getHeader()->getSize(); } ErrorOr<StringRef> Archive::Child::getBuffer() const { if (!Parent->IsThin) return StringRef(Data.data() + StartOfFile, getSize()); ErrorOr<StringRef> Name = getName(); if (std::error_code EC = Name.getError()) return EC; SmallString<128> FullName = Parent->getMemoryBufferRef().getBufferIdentifier(); sys::path::remove_filename(FullName); sys::path::append(FullName, *Name); ErrorOr<std::unique_ptr<MemoryBuffer>> Buf = MemoryBuffer::getFile(FullName); if (std::error_code EC = Buf.getError()) return EC; Parent->ThinBuffers.push_back(std::move(*Buf)); return Parent->ThinBuffers.back()->getBuffer(); } Archive::Child Archive::Child::getNext() const { size_t SpaceToSkip = Data.size(); // If it's odd, add 1 to make it even. if (SpaceToSkip & 1) ++SpaceToSkip; const char *NextLoc = Data.data() + SpaceToSkip; // Check to see if this is past the end of the archive. if (NextLoc >= Parent->Data.getBufferEnd()) return Child(Parent, nullptr); return Child(Parent, NextLoc); } uint64_t Archive::Child::getChildOffset() const { const char *a = Parent->Data.getBuffer().data(); const char *c = Data.data(); uint64_t offset = c - a; return offset; } ErrorOr<StringRef> Archive::Child::getName() const { StringRef name = getRawName(); // Check if it's a special name. if (name[0] == '/') { if (name.size() == 1) // Linker member. return name; if (name.size() == 2 && name[1] == '/') // String table. return name; // It's a long name. // Get the offset. std::size_t offset; if (name.substr(1).rtrim(" ").getAsInteger(10, offset)) llvm_unreachable("Long name offset is not an integer"); const char *addr = Parent->StringTable->Data.begin() + sizeof(ArchiveMemberHeader) + offset; // Verify it. if (Parent->StringTable == Parent->child_end() || addr < (Parent->StringTable->Data.begin() + sizeof(ArchiveMemberHeader)) || addr > (Parent->StringTable->Data.begin() + sizeof(ArchiveMemberHeader) + Parent->StringTable->getSize())) return object_error::parse_failed; // GNU long file names end with a "/\n". if (Parent->kind() == K_GNU || Parent->kind() == K_MIPS64) { StringRef::size_type End = StringRef(addr).find('\n'); return StringRef(addr, End - 1); } return StringRef(addr); } else if (name.startswith("#1/")) { uint64_t name_size; if (name.substr(3).rtrim(" ").getAsInteger(10, name_size)) llvm_unreachable("Long name length is not an ingeter"); return Data.substr(sizeof(ArchiveMemberHeader), name_size) .rtrim(StringRef("\0", 1)); } // It's a simple name. if (name[name.size() - 1] == '/') return name.substr(0, name.size() - 1); return name; } ErrorOr<MemoryBufferRef> Archive::Child::getMemoryBufferRef() const { ErrorOr<StringRef> NameOrErr = getName(); if (std::error_code EC = NameOrErr.getError()) return EC; StringRef Name = NameOrErr.get(); ErrorOr<StringRef> Buf = getBuffer(); if (std::error_code EC = Buf.getError()) return EC; return MemoryBufferRef(*Buf, Name); } ErrorOr<std::unique_ptr<Binary>> Archive::Child::getAsBinary(LLVMContext *Context) const { ErrorOr<MemoryBufferRef> BuffOrErr = getMemoryBufferRef(); if (std::error_code EC = BuffOrErr.getError()) return EC; return createBinary(BuffOrErr.get(), Context); } ErrorOr<std::unique_ptr<Archive>> Archive::create(MemoryBufferRef Source) { std::error_code EC; std::unique_ptr<Archive> Ret(new Archive(Source, EC)); if (EC) return EC; return std::move(Ret); } Archive::Archive(MemoryBufferRef Source, std::error_code &ec) : Binary(Binary::ID_Archive, Source), SymbolTable(child_end()), StringTable(child_end()), FirstRegular(child_end()) { StringRef Buffer = Data.getBuffer(); // Check for sufficient magic. if (Buffer.startswith(ThinMagic)) { IsThin = true; } else if (Buffer.startswith(Magic)) { IsThin = false; } else { ec = object_error::invalid_file_type; return; } // Get the special members. child_iterator i = child_begin(false); child_iterator e = child_end(); if (i == e) { ec = std::error_code(); return; } StringRef Name = i->getRawName(); // Below is the pattern that is used to figure out the archive format // GNU archive format // First member : / (may exist, if it exists, points to the symbol table ) // Second member : // (may exist, if it exists, points to the string table) // Note : The string table is used if the filename exceeds 15 characters // BSD archive format // First member : __.SYMDEF or "__.SYMDEF SORTED" (the symbol table) // There is no string table, if the filename exceeds 15 characters or has a // embedded space, the filename has #1/<size>, The size represents the size // of the filename that needs to be read after the archive header // COFF archive format // First member : / // Second member : / (provides a directory of symbols) // Third member : // (may exist, if it exists, contains the string table) // Note: Microsoft PE/COFF Spec 8.3 says that the third member is present // even if the string table is empty. However, lib.exe does not in fact // seem to create the third member if there's no member whose filename // exceeds 15 characters. So the third member is optional. if (Name == "__.SYMDEF") { Format = K_BSD; SymbolTable = i; ++i; FirstRegular = i; ec = std::error_code(); return; } if (Name.startswith("#1/")) { Format = K_BSD; // We know this is BSD, so getName will work since there is no string table. ErrorOr<StringRef> NameOrErr = i->getName(); ec = NameOrErr.getError(); if (ec) return; Name = NameOrErr.get(); if (Name == "__.SYMDEF SORTED" || Name == "__.SYMDEF") { SymbolTable = i; ++i; } FirstRegular = i; return; } // MIPS 64-bit ELF archives use a special format of a symbol table. // This format is marked by `ar_name` field equals to "/SYM64/". // For detailed description see page 96 in the following document: // http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf bool has64SymTable = false; if (Name == "/" || Name == "/SYM64/") { SymbolTable = i; if (Name == "/SYM64/") has64SymTable = true; ++i; if (i == e) { ec = std::error_code(); return; } Name = i->getRawName(); } if (Name == "//") { Format = has64SymTable ? K_MIPS64 : K_GNU; StringTable = i; ++i; FirstRegular = i; ec = std::error_code(); return; } if (Name[0] != '/') { Format = has64SymTable ? K_MIPS64 : K_GNU; FirstRegular = i; ec = std::error_code(); return; } if (Name != "/") { ec = object_error::parse_failed; return; } Format = K_COFF; SymbolTable = i; ++i; if (i == e) { FirstRegular = i; ec = std::error_code(); return; } Name = i->getRawName(); if (Name == "//") { StringTable = i; ++i; } FirstRegular = i; ec = std::error_code(); } Archive::child_iterator Archive::child_begin(bool SkipInternal) const { if (Data.getBufferSize() == 8) // empty archive. return child_end(); if (SkipInternal) return FirstRegular; const char *Loc = Data.getBufferStart() + strlen(Magic); Child c(this, Loc); return c; } Archive::child_iterator Archive::child_end() const { return Child(this, nullptr); } StringRef Archive::Symbol::getName() const { return Parent->getSymbolTable().begin() + StringIndex; } ErrorOr<Archive::child_iterator> Archive::Symbol::getMember() const { const char *Buf = Parent->getSymbolTable().begin(); const char *Offsets = Buf; if (Parent->kind() == K_MIPS64) Offsets += sizeof(uint64_t); else Offsets += sizeof(uint32_t); uint32_t Offset = 0; if (Parent->kind() == K_GNU) { Offset = read32be(Offsets + SymbolIndex * 4); } else if (Parent->kind() == K_MIPS64) { Offset = read64be(Offsets + SymbolIndex * 8); } else if (Parent->kind() == K_BSD) { // The SymbolIndex is an index into the ranlib structs that start at // Offsets (the first uint32_t is the number of bytes of the ranlib // structs). The ranlib structs are a pair of uint32_t's the first // being a string table offset and the second being the offset into // the archive of the member that defines the symbol. Which is what // is needed here. Offset = read32le(Offsets + SymbolIndex * 8 + 4); } else { // Skip offsets. uint32_t MemberCount = read32le(Buf); Buf += MemberCount * 4 + 4; uint32_t SymbolCount = read32le(Buf); if (SymbolIndex >= SymbolCount) return object_error::parse_failed; // Skip SymbolCount to get to the indices table. const char *Indices = Buf + 4; // Get the index of the offset in the file member offset table for this // symbol. uint16_t OffsetIndex = read16le(Indices + SymbolIndex * 2); // Subtract 1 since OffsetIndex is 1 based. --OffsetIndex; if (OffsetIndex >= MemberCount) return object_error::parse_failed; Offset = read32le(Offsets + OffsetIndex * 4); } const char *Loc = Parent->getData().begin() + Offset; child_iterator Iter(Child(Parent, Loc)); return Iter; } Archive::Symbol Archive::Symbol::getNext() const { Symbol t(*this); if (Parent->kind() == K_BSD) { // t.StringIndex is an offset from the start of the __.SYMDEF or // "__.SYMDEF SORTED" member into the string table for the ranlib // struct indexed by t.SymbolIndex . To change t.StringIndex to the // offset in the string table for t.SymbolIndex+1 we subtract the // its offset from the start of the string table for t.SymbolIndex // and add the offset of the string table for t.SymbolIndex+1. // The __.SYMDEF or "__.SYMDEF SORTED" member starts with a uint32_t // which is the number of bytes of ranlib structs that follow. The ranlib // structs are a pair of uint32_t's the first being a string table offset // and the second being the offset into the archive of the member that // define the symbol. After that the next uint32_t is the byte count of // the string table followed by the string table. const char *Buf = Parent->getSymbolTable().begin(); uint32_t RanlibCount = 0; RanlibCount = read32le(Buf) / 8; // If t.SymbolIndex + 1 will be past the count of symbols (the RanlibCount) // don't change the t.StringIndex as we don't want to reference a ranlib // past RanlibCount. if (t.SymbolIndex + 1 < RanlibCount) { const char *Ranlibs = Buf + 4; uint32_t CurRanStrx = 0; uint32_t NextRanStrx = 0; CurRanStrx = read32le(Ranlibs + t.SymbolIndex * 8); NextRanStrx = read32le(Ranlibs + (t.SymbolIndex + 1) * 8); t.StringIndex -= CurRanStrx; t.StringIndex += NextRanStrx; } } else { // Go to one past next null. t.StringIndex = Parent->getSymbolTable().find('\0', t.StringIndex) + 1; } ++t.SymbolIndex; return t; } Archive::symbol_iterator Archive::symbol_begin() const { if (!hasSymbolTable()) return symbol_iterator(Symbol(this, 0, 0)); const char *buf = getSymbolTable().begin(); if (kind() == K_GNU) { uint32_t symbol_count = 0; symbol_count = read32be(buf); buf += sizeof(uint32_t) + (symbol_count * (sizeof(uint32_t))); } else if (kind() == K_MIPS64) { uint64_t symbol_count = read64be(buf); buf += sizeof(uint64_t) + (symbol_count * (sizeof(uint64_t))); } else if (kind() == K_BSD) { // The __.SYMDEF or "__.SYMDEF SORTED" member starts with a uint32_t // which is the number of bytes of ranlib structs that follow. The ranlib // structs are a pair of uint32_t's the first being a string table offset // and the second being the offset into the archive of the member that // define the symbol. After that the next uint32_t is the byte count of // the string table followed by the string table. uint32_t ranlib_count = 0; ranlib_count = read32le(buf) / 8; const char *ranlibs = buf + 4; uint32_t ran_strx = 0; ran_strx = read32le(ranlibs); buf += sizeof(uint32_t) + (ranlib_count * (2 * (sizeof(uint32_t)))); // Skip the byte count of the string table. buf += sizeof(uint32_t); buf += ran_strx; } else { uint32_t member_count = 0; uint32_t symbol_count = 0; member_count = read32le(buf); buf += 4 + (member_count * 4); // Skip offsets. symbol_count = read32le(buf); buf += 4 + (symbol_count * 2); // Skip indices. } uint32_t string_start_offset = buf - getSymbolTable().begin(); return symbol_iterator(Symbol(this, 0, string_start_offset)); } Archive::symbol_iterator Archive::symbol_end() const { if (!hasSymbolTable()) return symbol_iterator(Symbol(this, 0, 0)); return symbol_iterator(Symbol(this, getNumberOfSymbols(), 0)); } uint32_t Archive::getNumberOfSymbols() const { const char *buf = getSymbolTable().begin(); if (kind() == K_GNU) return read32be(buf); if (kind() == K_MIPS64) return read64be(buf); if (kind() == K_BSD) return read32le(buf) / 8; uint32_t member_count = 0; member_count = read32le(buf); buf += 4 + (member_count * 4); // Skip offsets. return read32le(buf); } Archive::child_iterator Archive::findSym(StringRef name) const { Archive::symbol_iterator bs = symbol_begin(); Archive::symbol_iterator es = symbol_end(); for (; bs != es; ++bs) { StringRef SymName = bs->getName(); if (SymName == name) { ErrorOr<Archive::child_iterator> ResultOrErr = bs->getMember(); // FIXME: Should we really eat the error? if (ResultOrErr.getError()) return child_end(); return ResultOrErr.get(); } } return child_end(); } bool Archive::hasSymbolTable() const { return SymbolTable != child_end(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/MachOObjectFile.cpp
//===- MachOObjectFile.cpp - Mach-O object file binding ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the MachOObjectFile class, which binds the MachOObject // class to the generic ObjectFile wrapper. // //===----------------------------------------------------------------------===// #include "llvm/Object/MachO.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" #include "llvm/Support/DataExtractor.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Format.h" #include "llvm/Support/Host.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/MachO.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/raw_ostream.h" #include <cctype> #include <cstring> #include <limits> using namespace llvm; using namespace object; namespace { struct section_base { char sectname[16]; char segname[16]; }; } // FIXME: Replace all uses of this function with getStructOrErr. template <typename T> static T getStruct(const MachOObjectFile *O, const char *P) { // Don't read before the beginning or past the end of the file if (P < O->getData().begin() || P + sizeof(T) > O->getData().end()) report_fatal_error("Malformed MachO file."); T Cmd; memcpy(&Cmd, P, sizeof(T)); if (O->isLittleEndian() != sys::IsLittleEndianHost) MachO::swapStruct(Cmd); return Cmd; } template <typename T> static ErrorOr<T> getStructOrErr(const MachOObjectFile *O, const char *P) { // Don't read before the beginning or past the end of the file if (P < O->getData().begin() || P + sizeof(T) > O->getData().end()) return object_error::parse_failed; T Cmd; memcpy(&Cmd, P, sizeof(T)); if (O->isLittleEndian() != sys::IsLittleEndianHost) MachO::swapStruct(Cmd); return Cmd; } static const char * getSectionPtr(const MachOObjectFile *O, MachOObjectFile::LoadCommandInfo L, unsigned Sec) { uintptr_t CommandAddr = reinterpret_cast<uintptr_t>(L.Ptr); bool Is64 = O->is64Bit(); unsigned SegmentLoadSize = Is64 ? sizeof(MachO::segment_command_64) : sizeof(MachO::segment_command); unsigned SectionSize = Is64 ? sizeof(MachO::section_64) : sizeof(MachO::section); uintptr_t SectionAddr = CommandAddr + SegmentLoadSize + Sec * SectionSize; return reinterpret_cast<const char*>(SectionAddr); } static const char *getPtr(const MachOObjectFile *O, size_t Offset) { return O->getData().substr(Offset, 1).data(); } static MachO::nlist_base getSymbolTableEntryBase(const MachOObjectFile *O, DataRefImpl DRI) { const char *P = reinterpret_cast<const char *>(DRI.p); return getStruct<MachO::nlist_base>(O, P); } static StringRef parseSegmentOrSectionName(const char *P) { if (P[15] == 0) // Null terminated. return P; // Not null terminated, so this is a 16 char string. return StringRef(P, 16); } // Helper to advance a section or symbol iterator multiple increments at a time. template<class T> static void advance(T &it, size_t Val) { while (Val--) ++it; } static unsigned getCPUType(const MachOObjectFile *O) { return O->getHeader().cputype; } static uint32_t getPlainRelocationAddress(const MachO::any_relocation_info &RE) { return RE.r_word0; } static unsigned getScatteredRelocationAddress(const MachO::any_relocation_info &RE) { return RE.r_word0 & 0xffffff; } static bool getPlainRelocationPCRel(const MachOObjectFile *O, const MachO::any_relocation_info &RE) { if (O->isLittleEndian()) return (RE.r_word1 >> 24) & 1; return (RE.r_word1 >> 7) & 1; } static bool getScatteredRelocationPCRel(const MachOObjectFile *O, const MachO::any_relocation_info &RE) { return (RE.r_word0 >> 30) & 1; } static unsigned getPlainRelocationLength(const MachOObjectFile *O, const MachO::any_relocation_info &RE) { if (O->isLittleEndian()) return (RE.r_word1 >> 25) & 3; return (RE.r_word1 >> 5) & 3; } static unsigned getScatteredRelocationLength(const MachO::any_relocation_info &RE) { return (RE.r_word0 >> 28) & 3; } static unsigned getPlainRelocationType(const MachOObjectFile *O, const MachO::any_relocation_info &RE) { if (O->isLittleEndian()) return RE.r_word1 >> 28; return RE.r_word1 & 0xf; } static uint32_t getSectionFlags(const MachOObjectFile *O, DataRefImpl Sec) { if (O->is64Bit()) { MachO::section_64 Sect = O->getSection64(Sec); return Sect.flags; } MachO::section Sect = O->getSection(Sec); return Sect.flags; } static ErrorOr<MachOObjectFile::LoadCommandInfo> getLoadCommandInfo(const MachOObjectFile *Obj, const char *Ptr) { auto CmdOrErr = getStructOrErr<MachO::load_command>(Obj, Ptr); if (!CmdOrErr) return CmdOrErr.getError(); if (CmdOrErr->cmdsize < 8) return object_error::macho_small_load_command; MachOObjectFile::LoadCommandInfo Load; Load.Ptr = Ptr; Load.C = CmdOrErr.get(); return Load; } static ErrorOr<MachOObjectFile::LoadCommandInfo> getFirstLoadCommandInfo(const MachOObjectFile *Obj) { unsigned HeaderSize = Obj->is64Bit() ? sizeof(MachO::mach_header_64) : sizeof(MachO::mach_header); return getLoadCommandInfo(Obj, getPtr(Obj, HeaderSize)); } static ErrorOr<MachOObjectFile::LoadCommandInfo> getNextLoadCommandInfo(const MachOObjectFile *Obj, const MachOObjectFile::LoadCommandInfo &L) { return getLoadCommandInfo(Obj, L.Ptr + L.C.cmdsize); } template <typename T> static void parseHeader(const MachOObjectFile *Obj, T &Header, std::error_code &EC) { auto HeaderOrErr = getStructOrErr<T>(Obj, getPtr(Obj, 0)); if (HeaderOrErr) Header = HeaderOrErr.get(); else EC = HeaderOrErr.getError(); } // Parses LC_SEGMENT or LC_SEGMENT_64 load command, adds addresses of all // sections to \param Sections, and optionally sets // \param IsPageZeroSegment to true. template <typename SegmentCmd> static std::error_code parseSegmentLoadCommand( const MachOObjectFile *Obj, const MachOObjectFile::LoadCommandInfo &Load, SmallVectorImpl<const char *> &Sections, bool &IsPageZeroSegment) { const unsigned SegmentLoadSize = sizeof(SegmentCmd); if (Load.C.cmdsize < SegmentLoadSize) return object_error::macho_load_segment_too_small; auto SegOrErr = getStructOrErr<SegmentCmd>(Obj, Load.Ptr); if (!SegOrErr) return SegOrErr.getError(); SegmentCmd S = SegOrErr.get(); const unsigned SectionSize = Obj->is64Bit() ? sizeof(MachO::section_64) : sizeof(MachO::section); if (S.nsects > std::numeric_limits<uint32_t>::max() / SectionSize || S.nsects * SectionSize > Load.C.cmdsize - SegmentLoadSize) return object_error::macho_load_segment_too_many_sections; for (unsigned J = 0; J < S.nsects; ++J) { const char *Sec = getSectionPtr(Obj, Load, J); Sections.push_back(Sec); } IsPageZeroSegment |= StringRef("__PAGEZERO").equals(S.segname); return std::error_code(); } MachOObjectFile::MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64bits, std::error_code &EC) : ObjectFile(getMachOType(IsLittleEndian, Is64bits), Object), SymtabLoadCmd(nullptr), DysymtabLoadCmd(nullptr), DataInCodeLoadCmd(nullptr), LinkOptHintsLoadCmd(nullptr), DyldInfoLoadCmd(nullptr), UuidLoadCmd(nullptr), HasPageZeroSegment(false) { if (is64Bit()) parseHeader(this, Header64, EC); else parseHeader(this, Header, EC); if (EC) return; uint32_t LoadCommandCount = getHeader().ncmds; if (LoadCommandCount == 0) return; auto LoadOrErr = getFirstLoadCommandInfo(this); if (!LoadOrErr) { EC = LoadOrErr.getError(); return; } LoadCommandInfo Load = LoadOrErr.get(); for (unsigned I = 0; I < LoadCommandCount; ++I) { LoadCommands.push_back(Load); if (Load.C.cmd == MachO::LC_SYMTAB) { // Multiple symbol tables if (SymtabLoadCmd) { EC = object_error::parse_failed; return; } SymtabLoadCmd = Load.Ptr; } else if (Load.C.cmd == MachO::LC_DYSYMTAB) { // Multiple dynamic symbol tables if (DysymtabLoadCmd) { EC = object_error::parse_failed; return; } DysymtabLoadCmd = Load.Ptr; } else if (Load.C.cmd == MachO::LC_DATA_IN_CODE) { // Multiple data in code tables if (DataInCodeLoadCmd) { EC = object_error::parse_failed; return; } DataInCodeLoadCmd = Load.Ptr; } else if (Load.C.cmd == MachO::LC_LINKER_OPTIMIZATION_HINT) { // Multiple linker optimization hint tables if (LinkOptHintsLoadCmd) { EC = object_error::parse_failed; return; } LinkOptHintsLoadCmd = Load.Ptr; } else if (Load.C.cmd == MachO::LC_DYLD_INFO || Load.C.cmd == MachO::LC_DYLD_INFO_ONLY) { // Multiple dyldinfo load commands if (DyldInfoLoadCmd) { EC = object_error::parse_failed; return; } DyldInfoLoadCmd = Load.Ptr; } else if (Load.C.cmd == MachO::LC_UUID) { // Multiple UUID load commands if (UuidLoadCmd) { EC = object_error::parse_failed; return; } UuidLoadCmd = Load.Ptr; } else if (Load.C.cmd == MachO::LC_SEGMENT_64) { if ((EC = parseSegmentLoadCommand<MachO::segment_command_64>( this, Load, Sections, HasPageZeroSegment))) return; } else if (Load.C.cmd == MachO::LC_SEGMENT) { if ((EC = parseSegmentLoadCommand<MachO::segment_command>( this, Load, Sections, HasPageZeroSegment))) return; } else if (Load.C.cmd == MachO::LC_LOAD_DYLIB || Load.C.cmd == MachO::LC_LOAD_WEAK_DYLIB || Load.C.cmd == MachO::LC_LAZY_LOAD_DYLIB || Load.C.cmd == MachO::LC_REEXPORT_DYLIB || Load.C.cmd == MachO::LC_LOAD_UPWARD_DYLIB) { Libraries.push_back(Load.Ptr); } if (I < LoadCommandCount - 1) { auto LoadOrErr = getNextLoadCommandInfo(this, Load); if (!LoadOrErr) { EC = LoadOrErr.getError(); return; } Load = LoadOrErr.get(); } } assert(LoadCommands.size() == LoadCommandCount); } void MachOObjectFile::moveSymbolNext(DataRefImpl &Symb) const { unsigned SymbolTableEntrySize = is64Bit() ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist); Symb.p += SymbolTableEntrySize; } ErrorOr<StringRef> MachOObjectFile::getSymbolName(DataRefImpl Symb) const { StringRef StringTable = getStringTableData(); MachO::nlist_base Entry = getSymbolTableEntryBase(this, Symb); const char *Start = &StringTable.data()[Entry.n_strx]; if (Start < getData().begin() || Start >= getData().end()) report_fatal_error( "Symbol name entry points before beginning or past end of file."); return StringRef(Start); } unsigned MachOObjectFile::getSectionType(SectionRef Sec) const { DataRefImpl DRI = Sec.getRawDataRefImpl(); uint32_t Flags = getSectionFlags(this, DRI); return Flags & MachO::SECTION_TYPE; } uint64_t MachOObjectFile::getNValue(DataRefImpl Sym) const { if (is64Bit()) { MachO::nlist_64 Entry = getSymbol64TableEntry(Sym); return Entry.n_value; } MachO::nlist Entry = getSymbolTableEntry(Sym); return Entry.n_value; } // getIndirectName() returns the name of the alias'ed symbol who's string table // index is in the n_value field. std::error_code MachOObjectFile::getIndirectName(DataRefImpl Symb, StringRef &Res) const { StringRef StringTable = getStringTableData(); MachO::nlist_base Entry = getSymbolTableEntryBase(this, Symb); if ((Entry.n_type & MachO::N_TYPE) != MachO::N_INDR) return object_error::parse_failed; uint64_t NValue = getNValue(Symb); if (NValue >= StringTable.size()) return object_error::parse_failed; const char *Start = &StringTable.data()[NValue]; Res = StringRef(Start); return std::error_code(); } uint64_t MachOObjectFile::getSymbolValueImpl(DataRefImpl Sym) const { return getNValue(Sym); } ErrorOr<uint64_t> MachOObjectFile::getSymbolAddress(DataRefImpl Sym) const { return getSymbolValue(Sym); } uint32_t MachOObjectFile::getSymbolAlignment(DataRefImpl DRI) const { uint32_t flags = getSymbolFlags(DRI); if (flags & SymbolRef::SF_Common) { MachO::nlist_base Entry = getSymbolTableEntryBase(this, DRI); return 1 << MachO::GET_COMM_ALIGN(Entry.n_desc); } return 0; } uint64_t MachOObjectFile::getCommonSymbolSizeImpl(DataRefImpl DRI) const { return getNValue(DRI); } SymbolRef::Type MachOObjectFile::getSymbolType(DataRefImpl Symb) const { MachO::nlist_base Entry = getSymbolTableEntryBase(this, Symb); uint8_t n_type = Entry.n_type; // If this is a STAB debugging symbol, we can do nothing more. if (n_type & MachO::N_STAB) return SymbolRef::ST_Debug; switch (n_type & MachO::N_TYPE) { case MachO::N_UNDF : return SymbolRef::ST_Unknown; case MachO::N_SECT : return SymbolRef::ST_Function; } return SymbolRef::ST_Other; } uint32_t MachOObjectFile::getSymbolFlags(DataRefImpl DRI) const { MachO::nlist_base Entry = getSymbolTableEntryBase(this, DRI); uint8_t MachOType = Entry.n_type; uint16_t MachOFlags = Entry.n_desc; uint32_t Result = SymbolRef::SF_None; if ((MachOType & MachO::N_TYPE) == MachO::N_INDR) Result |= SymbolRef::SF_Indirect; if (MachOType & MachO::N_STAB) Result |= SymbolRef::SF_FormatSpecific; if (MachOType & MachO::N_EXT) { Result |= SymbolRef::SF_Global; if ((MachOType & MachO::N_TYPE) == MachO::N_UNDF) { if (getNValue(DRI)) Result |= SymbolRef::SF_Common; else Result |= SymbolRef::SF_Undefined; } if (!(MachOType & MachO::N_PEXT)) Result |= SymbolRef::SF_Exported; } if (MachOFlags & (MachO::N_WEAK_REF | MachO::N_WEAK_DEF)) Result |= SymbolRef::SF_Weak; if (MachOFlags & (MachO::N_ARM_THUMB_DEF)) Result |= SymbolRef::SF_Thumb; if ((MachOType & MachO::N_TYPE) == MachO::N_ABS) Result |= SymbolRef::SF_Absolute; return Result; } std::error_code MachOObjectFile::getSymbolSection(DataRefImpl Symb, section_iterator &Res) const { MachO::nlist_base Entry = getSymbolTableEntryBase(this, Symb); uint8_t index = Entry.n_sect; if (index == 0) { Res = section_end(); } else { DataRefImpl DRI; DRI.d.a = index - 1; if (DRI.d.a >= Sections.size()) report_fatal_error("getSymbolSection: Invalid section index."); Res = section_iterator(SectionRef(DRI, this)); } return std::error_code(); } unsigned MachOObjectFile::getSymbolSectionID(SymbolRef Sym) const { MachO::nlist_base Entry = getSymbolTableEntryBase(this, Sym.getRawDataRefImpl()); return Entry.n_sect - 1; } void MachOObjectFile::moveSectionNext(DataRefImpl &Sec) const { Sec.d.a++; } std::error_code MachOObjectFile::getSectionName(DataRefImpl Sec, StringRef &Result) const { ArrayRef<char> Raw = getSectionRawName(Sec); Result = parseSegmentOrSectionName(Raw.data()); return std::error_code(); } uint64_t MachOObjectFile::getSectionAddress(DataRefImpl Sec) const { if (is64Bit()) return getSection64(Sec).addr; return getSection(Sec).addr; } uint64_t MachOObjectFile::getSectionSize(DataRefImpl Sec) const { if (is64Bit()) return getSection64(Sec).size; return getSection(Sec).size; } std::error_code MachOObjectFile::getSectionContents(DataRefImpl Sec, StringRef &Res) const { uint32_t Offset; uint64_t Size; if (is64Bit()) { MachO::section_64 Sect = getSection64(Sec); Offset = Sect.offset; Size = Sect.size; } else { MachO::section Sect = getSection(Sec); Offset = Sect.offset; Size = Sect.size; } Res = this->getData().substr(Offset, Size); return std::error_code(); } uint64_t MachOObjectFile::getSectionAlignment(DataRefImpl Sec) const { uint32_t Align; if (is64Bit()) { MachO::section_64 Sect = getSection64(Sec); Align = Sect.align; } else { MachO::section Sect = getSection(Sec); Align = Sect.align; } return uint64_t(1) << Align; } bool MachOObjectFile::isSectionText(DataRefImpl Sec) const { uint32_t Flags = getSectionFlags(this, Sec); return Flags & MachO::S_ATTR_PURE_INSTRUCTIONS; } bool MachOObjectFile::isSectionData(DataRefImpl Sec) const { uint32_t Flags = getSectionFlags(this, Sec); unsigned SectionType = Flags & MachO::SECTION_TYPE; return !(Flags & MachO::S_ATTR_PURE_INSTRUCTIONS) && !(SectionType == MachO::S_ZEROFILL || SectionType == MachO::S_GB_ZEROFILL); } bool MachOObjectFile::isSectionBSS(DataRefImpl Sec) const { uint32_t Flags = getSectionFlags(this, Sec); unsigned SectionType = Flags & MachO::SECTION_TYPE; return !(Flags & MachO::S_ATTR_PURE_INSTRUCTIONS) && (SectionType == MachO::S_ZEROFILL || SectionType == MachO::S_GB_ZEROFILL); } unsigned MachOObjectFile::getSectionID(SectionRef Sec) const { return Sec.getRawDataRefImpl().d.a; } bool MachOObjectFile::isSectionVirtual(DataRefImpl Sec) const { // FIXME: Unimplemented. return false; } relocation_iterator MachOObjectFile::section_rel_begin(DataRefImpl Sec) const { DataRefImpl Ret; Ret.d.a = Sec.d.a; Ret.d.b = 0; return relocation_iterator(RelocationRef(Ret, this)); } relocation_iterator MachOObjectFile::section_rel_end(DataRefImpl Sec) const { uint32_t Num; if (is64Bit()) { MachO::section_64 Sect = getSection64(Sec); Num = Sect.nreloc; } else { MachO::section Sect = getSection(Sec); Num = Sect.nreloc; } DataRefImpl Ret; Ret.d.a = Sec.d.a; Ret.d.b = Num; return relocation_iterator(RelocationRef(Ret, this)); } void MachOObjectFile::moveRelocationNext(DataRefImpl &Rel) const { ++Rel.d.b; } uint64_t MachOObjectFile::getRelocationOffset(DataRefImpl Rel) const { assert(getHeader().filetype == MachO::MH_OBJECT && "Only implemented for MH_OBJECT"); MachO::any_relocation_info RE = getRelocation(Rel); return getAnyRelocationAddress(RE); } symbol_iterator MachOObjectFile::getRelocationSymbol(DataRefImpl Rel) const { MachO::any_relocation_info RE = getRelocation(Rel); if (isRelocationScattered(RE)) return symbol_end(); uint32_t SymbolIdx = getPlainRelocationSymbolNum(RE); bool isExtern = getPlainRelocationExternal(RE); if (!isExtern) return symbol_end(); MachO::symtab_command S = getSymtabLoadCommand(); unsigned SymbolTableEntrySize = is64Bit() ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist); uint64_t Offset = S.symoff + SymbolIdx * SymbolTableEntrySize; DataRefImpl Sym; Sym.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset)); return symbol_iterator(SymbolRef(Sym, this)); } section_iterator MachOObjectFile::getRelocationSection(DataRefImpl Rel) const { return section_iterator(getAnyRelocationSection(getRelocation(Rel))); } uint64_t MachOObjectFile::getRelocationType(DataRefImpl Rel) const { MachO::any_relocation_info RE = getRelocation(Rel); return getAnyRelocationType(RE); } void MachOObjectFile::getRelocationTypeName( DataRefImpl Rel, SmallVectorImpl<char> &Result) const { StringRef res; uint64_t RType = getRelocationType(Rel); unsigned Arch = this->getArch(); switch (Arch) { case Triple::x86: { static const char *const Table[] = { "GENERIC_RELOC_VANILLA", "GENERIC_RELOC_PAIR", "GENERIC_RELOC_SECTDIFF", "GENERIC_RELOC_PB_LA_PTR", "GENERIC_RELOC_LOCAL_SECTDIFF", "GENERIC_RELOC_TLV" }; if (RType > 5) res = "Unknown"; else res = Table[RType]; break; } case Triple::x86_64: { static const char *const Table[] = { "X86_64_RELOC_UNSIGNED", "X86_64_RELOC_SIGNED", "X86_64_RELOC_BRANCH", "X86_64_RELOC_GOT_LOAD", "X86_64_RELOC_GOT", "X86_64_RELOC_SUBTRACTOR", "X86_64_RELOC_SIGNED_1", "X86_64_RELOC_SIGNED_2", "X86_64_RELOC_SIGNED_4", "X86_64_RELOC_TLV" }; if (RType > 9) res = "Unknown"; else res = Table[RType]; break; } case Triple::arm: { static const char *const Table[] = { "ARM_RELOC_VANILLA", "ARM_RELOC_PAIR", "ARM_RELOC_SECTDIFF", "ARM_RELOC_LOCAL_SECTDIFF", "ARM_RELOC_PB_LA_PTR", "ARM_RELOC_BR24", "ARM_THUMB_RELOC_BR22", "ARM_THUMB_32BIT_BRANCH", "ARM_RELOC_HALF", "ARM_RELOC_HALF_SECTDIFF" }; if (RType > 9) res = "Unknown"; else res = Table[RType]; break; } case Triple::aarch64: { static const char *const Table[] = { "ARM64_RELOC_UNSIGNED", "ARM64_RELOC_SUBTRACTOR", "ARM64_RELOC_BRANCH26", "ARM64_RELOC_PAGE21", "ARM64_RELOC_PAGEOFF12", "ARM64_RELOC_GOT_LOAD_PAGE21", "ARM64_RELOC_GOT_LOAD_PAGEOFF12", "ARM64_RELOC_POINTER_TO_GOT", "ARM64_RELOC_TLVP_LOAD_PAGE21", "ARM64_RELOC_TLVP_LOAD_PAGEOFF12", "ARM64_RELOC_ADDEND" }; if (RType >= array_lengthof(Table)) res = "Unknown"; else res = Table[RType]; break; } case Triple::ppc: { static const char *const Table[] = { "PPC_RELOC_VANILLA", "PPC_RELOC_PAIR", "PPC_RELOC_BR14", "PPC_RELOC_BR24", "PPC_RELOC_HI16", "PPC_RELOC_LO16", "PPC_RELOC_HA16", "PPC_RELOC_LO14", "PPC_RELOC_SECTDIFF", "PPC_RELOC_PB_LA_PTR", "PPC_RELOC_HI16_SECTDIFF", "PPC_RELOC_LO16_SECTDIFF", "PPC_RELOC_HA16_SECTDIFF", "PPC_RELOC_JBSR", "PPC_RELOC_LO14_SECTDIFF", "PPC_RELOC_LOCAL_SECTDIFF" }; if (RType > 15) res = "Unknown"; else res = Table[RType]; break; } case Triple::UnknownArch: res = "Unknown"; break; } Result.append(res.begin(), res.end()); } uint8_t MachOObjectFile::getRelocationLength(DataRefImpl Rel) const { MachO::any_relocation_info RE = getRelocation(Rel); return getAnyRelocationLength(RE); } // // guessLibraryShortName() is passed a name of a dynamic library and returns a // guess on what the short name is. Then name is returned as a substring of the // StringRef Name passed in. The name of the dynamic library is recognized as // a framework if it has one of the two following forms: // Foo.framework/Versions/A/Foo // Foo.framework/Foo // Where A and Foo can be any string. And may contain a trailing suffix // starting with an underbar. If the Name is recognized as a framework then // isFramework is set to true else it is set to false. If the Name has a // suffix then Suffix is set to the substring in Name that contains the suffix // else it is set to a NULL StringRef. // // The Name of the dynamic library is recognized as a library name if it has // one of the two following forms: // libFoo.A.dylib // libFoo.dylib // The library may have a suffix trailing the name Foo of the form: // libFoo_profile.A.dylib // libFoo_profile.dylib // // The Name of the dynamic library is also recognized as a library name if it // has the following form: // Foo.qtx // // If the Name of the dynamic library is none of the forms above then a NULL // StringRef is returned. // StringRef MachOObjectFile::guessLibraryShortName(StringRef Name, bool &isFramework, StringRef &Suffix) { StringRef Foo, F, DotFramework, V, Dylib, Lib, Dot, Qtx; size_t a, b, c, d, Idx; isFramework = false; Suffix = StringRef(); // Pull off the last component and make Foo point to it a = Name.rfind('/'); if (a == Name.npos || a == 0) goto guess_library; Foo = Name.slice(a+1, Name.npos); // Look for a suffix starting with a '_' Idx = Foo.rfind('_'); if (Idx != Foo.npos && Foo.size() >= 2) { Suffix = Foo.slice(Idx, Foo.npos); Foo = Foo.slice(0, Idx); } // First look for the form Foo.framework/Foo b = Name.rfind('/', a); if (b == Name.npos) Idx = 0; else Idx = b+1; F = Name.slice(Idx, Idx + Foo.size()); DotFramework = Name.slice(Idx + Foo.size(), Idx + Foo.size() + sizeof(".framework/")-1); if (F == Foo && DotFramework == ".framework/") { isFramework = true; return Foo; } // Next look for the form Foo.framework/Versions/A/Foo if (b == Name.npos) goto guess_library; c = Name.rfind('/', b); if (c == Name.npos || c == 0) goto guess_library; V = Name.slice(c+1, Name.npos); if (!V.startswith("Versions/")) goto guess_library; d = Name.rfind('/', c); if (d == Name.npos) Idx = 0; else Idx = d+1; F = Name.slice(Idx, Idx + Foo.size()); DotFramework = Name.slice(Idx + Foo.size(), Idx + Foo.size() + sizeof(".framework/")-1); if (F == Foo && DotFramework == ".framework/") { isFramework = true; return Foo; } guess_library: // pull off the suffix after the "." and make a point to it a = Name.rfind('.'); if (a == Name.npos || a == 0) return StringRef(); Dylib = Name.slice(a, Name.npos); if (Dylib != ".dylib") goto guess_qtx; // First pull off the version letter for the form Foo.A.dylib if any. if (a >= 3) { Dot = Name.slice(a-2, a-1); if (Dot == ".") a = a - 2; } b = Name.rfind('/', a); if (b == Name.npos) b = 0; else b = b+1; // ignore any suffix after an underbar like Foo_profile.A.dylib Idx = Name.find('_', b); if (Idx != Name.npos && Idx != b) { Lib = Name.slice(b, Idx); Suffix = Name.slice(Idx, a); } else Lib = Name.slice(b, a); // There are incorrect library names of the form: // libATS.A_profile.dylib so check for these. if (Lib.size() >= 3) { Dot = Lib.slice(Lib.size()-2, Lib.size()-1); if (Dot == ".") Lib = Lib.slice(0, Lib.size()-2); } return Lib; guess_qtx: Qtx = Name.slice(a, Name.npos); if (Qtx != ".qtx") return StringRef(); b = Name.rfind('/', a); if (b == Name.npos) Lib = Name.slice(0, a); else Lib = Name.slice(b+1, a); // There are library names of the form: QT.A.qtx so check for these. if (Lib.size() >= 3) { Dot = Lib.slice(Lib.size()-2, Lib.size()-1); if (Dot == ".") Lib = Lib.slice(0, Lib.size()-2); } return Lib; } // getLibraryShortNameByIndex() is used to get the short name of the library // for an undefined symbol in a linked Mach-O binary that was linked with the // normal two-level namespace default (that is MH_TWOLEVEL in the header). // It is passed the index (0 - based) of the library as translated from // GET_LIBRARY_ORDINAL (1 - based). std::error_code MachOObjectFile::getLibraryShortNameByIndex(unsigned Index, StringRef &Res) const { if (Index >= Libraries.size()) return object_error::parse_failed; // If the cache of LibrariesShortNames is not built up do that first for // all the Libraries. if (LibrariesShortNames.size() == 0) { for (unsigned i = 0; i < Libraries.size(); i++) { MachO::dylib_command D = getStruct<MachO::dylib_command>(this, Libraries[i]); if (D.dylib.name >= D.cmdsize) return object_error::parse_failed; const char *P = (const char *)(Libraries[i]) + D.dylib.name; StringRef Name = StringRef(P); if (D.dylib.name+Name.size() >= D.cmdsize) return object_error::parse_failed; StringRef Suffix; bool isFramework; StringRef shortName = guessLibraryShortName(Name, isFramework, Suffix); if (shortName.empty()) LibrariesShortNames.push_back(Name); else LibrariesShortNames.push_back(shortName); } } Res = LibrariesShortNames[Index]; return std::error_code(); } section_iterator MachOObjectFile::getRelocationRelocatedSection(relocation_iterator Rel) const { DataRefImpl Sec; Sec.d.a = Rel->getRawDataRefImpl().d.a; return section_iterator(SectionRef(Sec, this)); } basic_symbol_iterator MachOObjectFile::symbol_begin_impl() const { return getSymbolByIndex(0); } basic_symbol_iterator MachOObjectFile::symbol_end_impl() const { DataRefImpl DRI; if (!SymtabLoadCmd) return basic_symbol_iterator(SymbolRef(DRI, this)); MachO::symtab_command Symtab = getSymtabLoadCommand(); unsigned SymbolTableEntrySize = is64Bit() ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist); unsigned Offset = Symtab.symoff + Symtab.nsyms * SymbolTableEntrySize; DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset)); return basic_symbol_iterator(SymbolRef(DRI, this)); } basic_symbol_iterator MachOObjectFile::getSymbolByIndex(unsigned Index) const { DataRefImpl DRI; if (!SymtabLoadCmd) return basic_symbol_iterator(SymbolRef(DRI, this)); MachO::symtab_command Symtab = getSymtabLoadCommand(); if (Index >= Symtab.nsyms) report_fatal_error("Requested symbol index is out of range."); unsigned SymbolTableEntrySize = is64Bit() ? sizeof(MachO::nlist_64) : sizeof(MachO::nlist); DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Symtab.symoff)); DRI.p += Index * SymbolTableEntrySize; return basic_symbol_iterator(SymbolRef(DRI, this)); } section_iterator MachOObjectFile::section_begin() const { DataRefImpl DRI; return section_iterator(SectionRef(DRI, this)); } section_iterator MachOObjectFile::section_end() const { DataRefImpl DRI; DRI.d.a = Sections.size(); return section_iterator(SectionRef(DRI, this)); } uint8_t MachOObjectFile::getBytesInAddress() const { return is64Bit() ? 8 : 4; } StringRef MachOObjectFile::getFileFormatName() const { unsigned CPUType = getCPUType(this); if (!is64Bit()) { switch (CPUType) { case llvm::MachO::CPU_TYPE_I386: return "Mach-O 32-bit i386"; case llvm::MachO::CPU_TYPE_ARM: return "Mach-O arm"; case llvm::MachO::CPU_TYPE_POWERPC: return "Mach-O 32-bit ppc"; default: return "Mach-O 32-bit unknown"; } } switch (CPUType) { case llvm::MachO::CPU_TYPE_X86_64: return "Mach-O 64-bit x86-64"; case llvm::MachO::CPU_TYPE_ARM64: return "Mach-O arm64"; case llvm::MachO::CPU_TYPE_POWERPC64: return "Mach-O 64-bit ppc64"; default: return "Mach-O 64-bit unknown"; } } Triple::ArchType MachOObjectFile::getArch(uint32_t CPUType) { switch (CPUType) { case llvm::MachO::CPU_TYPE_I386: return Triple::x86; case llvm::MachO::CPU_TYPE_X86_64: return Triple::x86_64; case llvm::MachO::CPU_TYPE_ARM: return Triple::arm; case llvm::MachO::CPU_TYPE_ARM64: return Triple::aarch64; case llvm::MachO::CPU_TYPE_POWERPC: return Triple::ppc; case llvm::MachO::CPU_TYPE_POWERPC64: return Triple::ppc64; default: return Triple::UnknownArch; } } Triple MachOObjectFile::getArch(uint32_t CPUType, uint32_t CPUSubType, const char **McpuDefault) { if (McpuDefault) *McpuDefault = nullptr; switch (CPUType) { case MachO::CPU_TYPE_I386: switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) { case MachO::CPU_SUBTYPE_I386_ALL: return Triple("i386-apple-darwin"); default: return Triple(); } case MachO::CPU_TYPE_X86_64: switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) { case MachO::CPU_SUBTYPE_X86_64_ALL: return Triple("x86_64-apple-darwin"); case MachO::CPU_SUBTYPE_X86_64_H: return Triple("x86_64h-apple-darwin"); default: return Triple(); } case MachO::CPU_TYPE_ARM: switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) { case MachO::CPU_SUBTYPE_ARM_V4T: return Triple("armv4t-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V5TEJ: return Triple("armv5e-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_XSCALE: return Triple("xscale-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V6: return Triple("armv6-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V6M: if (McpuDefault) *McpuDefault = "cortex-m0"; return Triple("armv6m-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7: return Triple("armv7-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7EM: if (McpuDefault) *McpuDefault = "cortex-m4"; return Triple("armv7em-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7K: return Triple("armv7k-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7M: if (McpuDefault) *McpuDefault = "cortex-m3"; return Triple("armv7m-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7S: return Triple("armv7s-apple-darwin"); default: return Triple(); } case MachO::CPU_TYPE_ARM64: switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) { case MachO::CPU_SUBTYPE_ARM64_ALL: return Triple("arm64-apple-darwin"); default: return Triple(); } case MachO::CPU_TYPE_POWERPC: switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) { case MachO::CPU_SUBTYPE_POWERPC_ALL: return Triple("ppc-apple-darwin"); default: return Triple(); } case MachO::CPU_TYPE_POWERPC64: switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) { case MachO::CPU_SUBTYPE_POWERPC_ALL: return Triple("ppc64-apple-darwin"); default: return Triple(); } default: return Triple(); } } Triple MachOObjectFile::getThumbArch(uint32_t CPUType, uint32_t CPUSubType, const char **McpuDefault) { if (McpuDefault) *McpuDefault = nullptr; switch (CPUType) { case MachO::CPU_TYPE_ARM: switch (CPUSubType & ~MachO::CPU_SUBTYPE_MASK) { case MachO::CPU_SUBTYPE_ARM_V4T: return Triple("thumbv4t-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V5TEJ: return Triple("thumbv5e-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_XSCALE: return Triple("xscale-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V6: return Triple("thumbv6-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V6M: if (McpuDefault) *McpuDefault = "cortex-m0"; return Triple("thumbv6m-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7: return Triple("thumbv7-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7EM: if (McpuDefault) *McpuDefault = "cortex-m4"; return Triple("thumbv7em-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7K: return Triple("thumbv7k-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7M: if (McpuDefault) *McpuDefault = "cortex-m3"; return Triple("thumbv7m-apple-darwin"); case MachO::CPU_SUBTYPE_ARM_V7S: return Triple("thumbv7s-apple-darwin"); default: return Triple(); } default: return Triple(); } } Triple MachOObjectFile::getArch(uint32_t CPUType, uint32_t CPUSubType, const char **McpuDefault, Triple *ThumbTriple) { Triple T = MachOObjectFile::getArch(CPUType, CPUSubType, McpuDefault); *ThumbTriple = MachOObjectFile::getThumbArch(CPUType, CPUSubType, McpuDefault); return T; } Triple MachOObjectFile::getHostArch() { return Triple(sys::getDefaultTargetTriple()); } bool MachOObjectFile::isValidArch(StringRef ArchFlag) { return StringSwitch<bool>(ArchFlag) .Case("i386", true) .Case("x86_64", true) .Case("x86_64h", true) .Case("armv4t", true) .Case("arm", true) .Case("armv5e", true) .Case("armv6", true) .Case("armv6m", true) .Case("armv7", true) .Case("armv7em", true) .Case("armv7k", true) .Case("armv7m", true) .Case("armv7s", true) .Case("arm64", true) .Case("ppc", true) .Case("ppc64", true) .Default(false); } unsigned MachOObjectFile::getArch() const { return getArch(getCPUType(this)); } Triple MachOObjectFile::getArch(const char **McpuDefault, Triple *ThumbTriple) const { *ThumbTriple = getThumbArch(Header.cputype, Header.cpusubtype, McpuDefault); return getArch(Header.cputype, Header.cpusubtype, McpuDefault); } relocation_iterator MachOObjectFile::section_rel_begin(unsigned Index) const { DataRefImpl DRI; DRI.d.a = Index; return section_rel_begin(DRI); } relocation_iterator MachOObjectFile::section_rel_end(unsigned Index) const { DataRefImpl DRI; DRI.d.a = Index; return section_rel_end(DRI); } dice_iterator MachOObjectFile::begin_dices() const { DataRefImpl DRI; if (!DataInCodeLoadCmd) return dice_iterator(DiceRef(DRI, this)); MachO::linkedit_data_command DicLC = getDataInCodeLoadCommand(); DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, DicLC.dataoff)); return dice_iterator(DiceRef(DRI, this)); } dice_iterator MachOObjectFile::end_dices() const { DataRefImpl DRI; if (!DataInCodeLoadCmd) return dice_iterator(DiceRef(DRI, this)); MachO::linkedit_data_command DicLC = getDataInCodeLoadCommand(); unsigned Offset = DicLC.dataoff + DicLC.datasize; DRI.p = reinterpret_cast<uintptr_t>(getPtr(this, Offset)); return dice_iterator(DiceRef(DRI, this)); } ExportEntry::ExportEntry(ArrayRef<uint8_t> T) : Trie(T), Malformed(false), Done(false) { } void ExportEntry::moveToFirst() { pushNode(0); pushDownUntilBottom(); } void ExportEntry::moveToEnd() { Stack.clear(); Done = true; } bool ExportEntry::operator==(const ExportEntry &Other) const { // Common case, one at end, other iterating from begin. if (Done || Other.Done) return (Done == Other.Done); // Not equal if different stack sizes. if (Stack.size() != Other.Stack.size()) return false; // Not equal if different cumulative strings. if (!CumulativeString.equals(Other.CumulativeString)) return false; // Equal if all nodes in both stacks match. for (unsigned i=0; i < Stack.size(); ++i) { if (Stack[i].Start != Other.Stack[i].Start) return false; } return true; } uint64_t ExportEntry::readULEB128(const uint8_t *&Ptr) { unsigned Count; uint64_t Result = decodeULEB128(Ptr, &Count); Ptr += Count; if (Ptr > Trie.end()) { Ptr = Trie.end(); Malformed = true; } return Result; } StringRef ExportEntry::name() const { return CumulativeString; } uint64_t ExportEntry::flags() const { return Stack.back().Flags; } uint64_t ExportEntry::address() const { return Stack.back().Address; } uint64_t ExportEntry::other() const { return Stack.back().Other; } StringRef ExportEntry::otherName() const { const char* ImportName = Stack.back().ImportName; if (ImportName) return StringRef(ImportName); return StringRef(); } uint32_t ExportEntry::nodeOffset() const { return Stack.back().Start - Trie.begin(); } ExportEntry::NodeState::NodeState(const uint8_t *Ptr) : Start(Ptr), Current(Ptr), Flags(0), Address(0), Other(0), ImportName(nullptr), ChildCount(0), NextChildIndex(0), ParentStringLength(0), IsExportNode(false) { } void ExportEntry::pushNode(uint64_t offset) { const uint8_t *Ptr = Trie.begin() + offset; NodeState State(Ptr); uint64_t ExportInfoSize = readULEB128(State.Current); State.IsExportNode = (ExportInfoSize != 0); const uint8_t* Children = State.Current + ExportInfoSize; if (State.IsExportNode) { State.Flags = readULEB128(State.Current); if (State.Flags & MachO::EXPORT_SYMBOL_FLAGS_REEXPORT) { State.Address = 0; State.Other = readULEB128(State.Current); // dylib ordinal State.ImportName = reinterpret_cast<const char*>(State.Current); } else { State.Address = readULEB128(State.Current); if (State.Flags & MachO::EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER) State.Other = readULEB128(State.Current); } } State.ChildCount = *Children; State.Current = Children + 1; State.NextChildIndex = 0; State.ParentStringLength = CumulativeString.size(); Stack.push_back(State); } void ExportEntry::pushDownUntilBottom() { while (Stack.back().NextChildIndex < Stack.back().ChildCount) { NodeState &Top = Stack.back(); CumulativeString.resize(Top.ParentStringLength); for (;*Top.Current != 0; Top.Current++) { char C = *Top.Current; CumulativeString.push_back(C); } Top.Current += 1; uint64_t childNodeIndex = readULEB128(Top.Current); Top.NextChildIndex += 1; pushNode(childNodeIndex); } if (!Stack.back().IsExportNode) { Malformed = true; moveToEnd(); } } // We have a trie data structure and need a way to walk it that is compatible // with the C++ iterator model. The solution is a non-recursive depth first // traversal where the iterator contains a stack of parent nodes along with a // string that is the accumulation of all edge strings along the parent chain // to this point. // // There is one "export" node for each exported symbol. But because some // symbols may be a prefix of another symbol (e.g. _dup and _dup2), an export // node may have child nodes too. // // The algorithm for moveNext() is to keep moving down the leftmost unvisited // child until hitting a node with no children (which is an export node or // else the trie is malformed). On the way down, each node is pushed on the // stack ivar. If there is no more ways down, it pops up one and tries to go // down a sibling path until a childless node is reached. void ExportEntry::moveNext() { if (Stack.empty() || !Stack.back().IsExportNode) { Malformed = true; moveToEnd(); return; } Stack.pop_back(); while (!Stack.empty()) { NodeState &Top = Stack.back(); if (Top.NextChildIndex < Top.ChildCount) { pushDownUntilBottom(); // Now at the next export node. return; } else { if (Top.IsExportNode) { // This node has no children but is itself an export node. CumulativeString.resize(Top.ParentStringLength); return; } Stack.pop_back(); } } Done = true; } iterator_range<export_iterator> MachOObjectFile::exports(ArrayRef<uint8_t> Trie) { ExportEntry Start(Trie); if (Trie.size() == 0) Start.moveToEnd(); else Start.moveToFirst(); ExportEntry Finish(Trie); Finish.moveToEnd(); return iterator_range<export_iterator>(export_iterator(Start), export_iterator(Finish)); } iterator_range<export_iterator> MachOObjectFile::exports() const { return exports(getDyldInfoExportsTrie()); } MachORebaseEntry::MachORebaseEntry(ArrayRef<uint8_t> Bytes, bool is64Bit) : Opcodes(Bytes), Ptr(Bytes.begin()), SegmentOffset(0), SegmentIndex(0), RemainingLoopCount(0), AdvanceAmount(0), RebaseType(0), PointerSize(is64Bit ? 8 : 4), Malformed(false), Done(false) {} void MachORebaseEntry::moveToFirst() { Ptr = Opcodes.begin(); moveNext(); } void MachORebaseEntry::moveToEnd() { Ptr = Opcodes.end(); RemainingLoopCount = 0; Done = true; } void MachORebaseEntry::moveNext() { // If in the middle of some loop, move to next rebasing in loop. SegmentOffset += AdvanceAmount; if (RemainingLoopCount) { --RemainingLoopCount; return; } if (Ptr == Opcodes.end()) { Done = true; return; } bool More = true; while (More && !Malformed) { // Parse next opcode and set up next loop. uint8_t Byte = *Ptr++; uint8_t ImmValue = Byte & MachO::REBASE_IMMEDIATE_MASK; uint8_t Opcode = Byte & MachO::REBASE_OPCODE_MASK; switch (Opcode) { case MachO::REBASE_OPCODE_DONE: More = false; Done = true; moveToEnd(); DEBUG_WITH_TYPE("mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_DONE\n"); break; case MachO::REBASE_OPCODE_SET_TYPE_IMM: RebaseType = ImmValue; DEBUG_WITH_TYPE( "mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_SET_TYPE_IMM: " << "RebaseType=" << (int) RebaseType << "\n"); break; case MachO::REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: SegmentIndex = ImmValue; SegmentOffset = readULEB128(); DEBUG_WITH_TYPE( "mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: " << "SegmentIndex=" << SegmentIndex << ", " << format("SegmentOffset=0x%06X", SegmentOffset) << "\n"); break; case MachO::REBASE_OPCODE_ADD_ADDR_ULEB: SegmentOffset += readULEB128(); DEBUG_WITH_TYPE("mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_ADD_ADDR_ULEB: " << format("SegmentOffset=0x%06X", SegmentOffset) << "\n"); break; case MachO::REBASE_OPCODE_ADD_ADDR_IMM_SCALED: SegmentOffset += ImmValue * PointerSize; DEBUG_WITH_TYPE("mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_ADD_ADDR_IMM_SCALED: " << format("SegmentOffset=0x%06X", SegmentOffset) << "\n"); break; case MachO::REBASE_OPCODE_DO_REBASE_IMM_TIMES: AdvanceAmount = PointerSize; RemainingLoopCount = ImmValue - 1; DEBUG_WITH_TYPE( "mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_DO_REBASE_IMM_TIMES: " << format("SegmentOffset=0x%06X", SegmentOffset) << ", AdvanceAmount=" << AdvanceAmount << ", RemainingLoopCount=" << RemainingLoopCount << "\n"); return; case MachO::REBASE_OPCODE_DO_REBASE_ULEB_TIMES: AdvanceAmount = PointerSize; RemainingLoopCount = readULEB128() - 1; DEBUG_WITH_TYPE( "mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_DO_REBASE_ULEB_TIMES: " << format("SegmentOffset=0x%06X", SegmentOffset) << ", AdvanceAmount=" << AdvanceAmount << ", RemainingLoopCount=" << RemainingLoopCount << "\n"); return; case MachO::REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB: AdvanceAmount = readULEB128() + PointerSize; RemainingLoopCount = 0; DEBUG_WITH_TYPE( "mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB: " << format("SegmentOffset=0x%06X", SegmentOffset) << ", AdvanceAmount=" << AdvanceAmount << ", RemainingLoopCount=" << RemainingLoopCount << "\n"); return; case MachO::REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB: RemainingLoopCount = readULEB128() - 1; AdvanceAmount = readULEB128() + PointerSize; DEBUG_WITH_TYPE( "mach-o-rebase", llvm::dbgs() << "REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB: " << format("SegmentOffset=0x%06X", SegmentOffset) << ", AdvanceAmount=" << AdvanceAmount << ", RemainingLoopCount=" << RemainingLoopCount << "\n"); return; default: Malformed = true; } } } uint64_t MachORebaseEntry::readULEB128() { unsigned Count; uint64_t Result = decodeULEB128(Ptr, &Count); Ptr += Count; if (Ptr > Opcodes.end()) { Ptr = Opcodes.end(); Malformed = true; } return Result; } uint32_t MachORebaseEntry::segmentIndex() const { return SegmentIndex; } uint64_t MachORebaseEntry::segmentOffset() const { return SegmentOffset; } StringRef MachORebaseEntry::typeName() const { switch (RebaseType) { case MachO::REBASE_TYPE_POINTER: return "pointer"; case MachO::REBASE_TYPE_TEXT_ABSOLUTE32: return "text abs32"; case MachO::REBASE_TYPE_TEXT_PCREL32: return "text rel32"; } return "unknown"; } bool MachORebaseEntry::operator==(const MachORebaseEntry &Other) const { assert(Opcodes == Other.Opcodes && "compare iterators of different files"); return (Ptr == Other.Ptr) && (RemainingLoopCount == Other.RemainingLoopCount) && (Done == Other.Done); } iterator_range<rebase_iterator> MachOObjectFile::rebaseTable(ArrayRef<uint8_t> Opcodes, bool is64) { MachORebaseEntry Start(Opcodes, is64); Start.moveToFirst(); MachORebaseEntry Finish(Opcodes, is64); Finish.moveToEnd(); return iterator_range<rebase_iterator>(rebase_iterator(Start), rebase_iterator(Finish)); } iterator_range<rebase_iterator> MachOObjectFile::rebaseTable() const { return rebaseTable(getDyldInfoRebaseOpcodes(), is64Bit()); } MachOBindEntry::MachOBindEntry(ArrayRef<uint8_t> Bytes, bool is64Bit, Kind BK) : Opcodes(Bytes), Ptr(Bytes.begin()), SegmentOffset(0), SegmentIndex(0), Ordinal(0), Flags(0), Addend(0), RemainingLoopCount(0), AdvanceAmount(0), BindType(0), PointerSize(is64Bit ? 8 : 4), TableKind(BK), Malformed(false), Done(false) {} void MachOBindEntry::moveToFirst() { Ptr = Opcodes.begin(); moveNext(); } void MachOBindEntry::moveToEnd() { Ptr = Opcodes.end(); RemainingLoopCount = 0; Done = true; } void MachOBindEntry::moveNext() { // If in the middle of some loop, move to next binding in loop. SegmentOffset += AdvanceAmount; if (RemainingLoopCount) { --RemainingLoopCount; return; } if (Ptr == Opcodes.end()) { Done = true; return; } bool More = true; while (More && !Malformed) { // Parse next opcode and set up next loop. uint8_t Byte = *Ptr++; uint8_t ImmValue = Byte & MachO::BIND_IMMEDIATE_MASK; uint8_t Opcode = Byte & MachO::BIND_OPCODE_MASK; int8_t SignExtended; const uint8_t *SymStart; switch (Opcode) { case MachO::BIND_OPCODE_DONE: if (TableKind == Kind::Lazy) { // Lazying bindings have a DONE opcode between entries. Need to ignore // it to advance to next entry. But need not if this is last entry. bool NotLastEntry = false; for (const uint8_t *P = Ptr; P < Opcodes.end(); ++P) { if (*P) { NotLastEntry = true; } } if (NotLastEntry) break; } More = false; Done = true; moveToEnd(); DEBUG_WITH_TYPE("mach-o-bind", llvm::dbgs() << "BIND_OPCODE_DONE\n"); break; case MachO::BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: Ordinal = ImmValue; DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: " << "Ordinal=" << Ordinal << "\n"); break; case MachO::BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: Ordinal = readULEB128(); DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: " << "Ordinal=" << Ordinal << "\n"); break; case MachO::BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: if (ImmValue) { SignExtended = MachO::BIND_OPCODE_MASK | ImmValue; Ordinal = SignExtended; } else Ordinal = 0; DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: " << "Ordinal=" << Ordinal << "\n"); break; case MachO::BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: Flags = ImmValue; SymStart = Ptr; while (*Ptr) { ++Ptr; } SymbolName = StringRef(reinterpret_cast<const char*>(SymStart), Ptr-SymStart); ++Ptr; DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: " << "SymbolName=" << SymbolName << "\n"); if (TableKind == Kind::Weak) { if (ImmValue & MachO::BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION) return; } break; case MachO::BIND_OPCODE_SET_TYPE_IMM: BindType = ImmValue; DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_SET_TYPE_IMM: " << "BindType=" << (int)BindType << "\n"); break; case MachO::BIND_OPCODE_SET_ADDEND_SLEB: Addend = readSLEB128(); if (TableKind == Kind::Lazy) Malformed = true; DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_SET_ADDEND_SLEB: " << "Addend=" << Addend << "\n"); break; case MachO::BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: SegmentIndex = ImmValue; SegmentOffset = readULEB128(); DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: " << "SegmentIndex=" << SegmentIndex << ", " << format("SegmentOffset=0x%06X", SegmentOffset) << "\n"); break; case MachO::BIND_OPCODE_ADD_ADDR_ULEB: SegmentOffset += readULEB128(); DEBUG_WITH_TYPE("mach-o-bind", llvm::dbgs() << "BIND_OPCODE_ADD_ADDR_ULEB: " << format("SegmentOffset=0x%06X", SegmentOffset) << "\n"); break; case MachO::BIND_OPCODE_DO_BIND: AdvanceAmount = PointerSize; RemainingLoopCount = 0; DEBUG_WITH_TYPE("mach-o-bind", llvm::dbgs() << "BIND_OPCODE_DO_BIND: " << format("SegmentOffset=0x%06X", SegmentOffset) << "\n"); return; case MachO::BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: AdvanceAmount = readULEB128() + PointerSize; RemainingLoopCount = 0; if (TableKind == Kind::Lazy) Malformed = true; DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: " << format("SegmentOffset=0x%06X", SegmentOffset) << ", AdvanceAmount=" << AdvanceAmount << ", RemainingLoopCount=" << RemainingLoopCount << "\n"); return; case MachO::BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: AdvanceAmount = ImmValue * PointerSize + PointerSize; RemainingLoopCount = 0; if (TableKind == Kind::Lazy) Malformed = true; DEBUG_WITH_TYPE("mach-o-bind", llvm::dbgs() << "BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: " << format("SegmentOffset=0x%06X", SegmentOffset) << "\n"); return; case MachO::BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: RemainingLoopCount = readULEB128() - 1; AdvanceAmount = readULEB128() + PointerSize; if (TableKind == Kind::Lazy) Malformed = true; DEBUG_WITH_TYPE( "mach-o-bind", llvm::dbgs() << "BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: " << format("SegmentOffset=0x%06X", SegmentOffset) << ", AdvanceAmount=" << AdvanceAmount << ", RemainingLoopCount=" << RemainingLoopCount << "\n"); return; default: Malformed = true; } } } uint64_t MachOBindEntry::readULEB128() { unsigned Count; uint64_t Result = decodeULEB128(Ptr, &Count); Ptr += Count; if (Ptr > Opcodes.end()) { Ptr = Opcodes.end(); Malformed = true; } return Result; } int64_t MachOBindEntry::readSLEB128() { unsigned Count; int64_t Result = decodeSLEB128(Ptr, &Count); Ptr += Count; if (Ptr > Opcodes.end()) { Ptr = Opcodes.end(); Malformed = true; } return Result; } uint32_t MachOBindEntry::segmentIndex() const { return SegmentIndex; } uint64_t MachOBindEntry::segmentOffset() const { return SegmentOffset; } StringRef MachOBindEntry::typeName() const { switch (BindType) { case MachO::BIND_TYPE_POINTER: return "pointer"; case MachO::BIND_TYPE_TEXT_ABSOLUTE32: return "text abs32"; case MachO::BIND_TYPE_TEXT_PCREL32: return "text rel32"; } return "unknown"; } StringRef MachOBindEntry::symbolName() const { return SymbolName; } int64_t MachOBindEntry::addend() const { return Addend; } uint32_t MachOBindEntry::flags() const { return Flags; } int MachOBindEntry::ordinal() const { return Ordinal; } bool MachOBindEntry::operator==(const MachOBindEntry &Other) const { assert(Opcodes == Other.Opcodes && "compare iterators of different files"); return (Ptr == Other.Ptr) && (RemainingLoopCount == Other.RemainingLoopCount) && (Done == Other.Done); } iterator_range<bind_iterator> MachOObjectFile::bindTable(ArrayRef<uint8_t> Opcodes, bool is64, MachOBindEntry::Kind BKind) { MachOBindEntry Start(Opcodes, is64, BKind); Start.moveToFirst(); MachOBindEntry Finish(Opcodes, is64, BKind); Finish.moveToEnd(); return iterator_range<bind_iterator>(bind_iterator(Start), bind_iterator(Finish)); } iterator_range<bind_iterator> MachOObjectFile::bindTable() const { return bindTable(getDyldInfoBindOpcodes(), is64Bit(), MachOBindEntry::Kind::Regular); } iterator_range<bind_iterator> MachOObjectFile::lazyBindTable() const { return bindTable(getDyldInfoLazyBindOpcodes(), is64Bit(), MachOBindEntry::Kind::Lazy); } iterator_range<bind_iterator> MachOObjectFile::weakBindTable() const { return bindTable(getDyldInfoWeakBindOpcodes(), is64Bit(), MachOBindEntry::Kind::Weak); } MachOObjectFile::load_command_iterator MachOObjectFile::begin_load_commands() const { return LoadCommands.begin(); } MachOObjectFile::load_command_iterator MachOObjectFile::end_load_commands() const { return LoadCommands.end(); } iterator_range<MachOObjectFile::load_command_iterator> MachOObjectFile::load_commands() const { return iterator_range<load_command_iterator>(begin_load_commands(), end_load_commands()); } StringRef MachOObjectFile::getSectionFinalSegmentName(DataRefImpl Sec) const { ArrayRef<char> Raw = getSectionRawFinalSegmentName(Sec); return parseSegmentOrSectionName(Raw.data()); } ArrayRef<char> MachOObjectFile::getSectionRawName(DataRefImpl Sec) const { assert(Sec.d.a < Sections.size() && "Should have detected this earlier"); const section_base *Base = reinterpret_cast<const section_base *>(Sections[Sec.d.a]); return makeArrayRef(Base->sectname); } ArrayRef<char> MachOObjectFile::getSectionRawFinalSegmentName(DataRefImpl Sec) const { assert(Sec.d.a < Sections.size() && "Should have detected this earlier"); const section_base *Base = reinterpret_cast<const section_base *>(Sections[Sec.d.a]); return makeArrayRef(Base->segname); } bool MachOObjectFile::isRelocationScattered(const MachO::any_relocation_info &RE) const { if (getCPUType(this) == MachO::CPU_TYPE_X86_64) return false; return getPlainRelocationAddress(RE) & MachO::R_SCATTERED; } unsigned MachOObjectFile::getPlainRelocationSymbolNum( const MachO::any_relocation_info &RE) const { if (isLittleEndian()) return RE.r_word1 & 0xffffff; return RE.r_word1 >> 8; } bool MachOObjectFile::getPlainRelocationExternal( const MachO::any_relocation_info &RE) const { if (isLittleEndian()) return (RE.r_word1 >> 27) & 1; return (RE.r_word1 >> 4) & 1; } bool MachOObjectFile::getScatteredRelocationScattered( const MachO::any_relocation_info &RE) const { return RE.r_word0 >> 31; } uint32_t MachOObjectFile::getScatteredRelocationValue( const MachO::any_relocation_info &RE) const { return RE.r_word1; } uint32_t MachOObjectFile::getScatteredRelocationType( const MachO::any_relocation_info &RE) const { return (RE.r_word0 >> 24) & 0xf; } unsigned MachOObjectFile::getAnyRelocationAddress( const MachO::any_relocation_info &RE) const { if (isRelocationScattered(RE)) return getScatteredRelocationAddress(RE); return getPlainRelocationAddress(RE); } unsigned MachOObjectFile::getAnyRelocationPCRel( const MachO::any_relocation_info &RE) const { if (isRelocationScattered(RE)) return getScatteredRelocationPCRel(this, RE); return getPlainRelocationPCRel(this, RE); } unsigned MachOObjectFile::getAnyRelocationLength( const MachO::any_relocation_info &RE) const { if (isRelocationScattered(RE)) return getScatteredRelocationLength(RE); return getPlainRelocationLength(this, RE); } unsigned MachOObjectFile::getAnyRelocationType( const MachO::any_relocation_info &RE) const { if (isRelocationScattered(RE)) return getScatteredRelocationType(RE); return getPlainRelocationType(this, RE); } SectionRef MachOObjectFile::getAnyRelocationSection( const MachO::any_relocation_info &RE) const { if (isRelocationScattered(RE) || getPlainRelocationExternal(RE)) return *section_end(); unsigned SecNum = getPlainRelocationSymbolNum(RE); if (SecNum == MachO::R_ABS || SecNum > Sections.size()) return *section_end(); DataRefImpl DRI; DRI.d.a = SecNum - 1; return SectionRef(DRI, this); } MachO::section MachOObjectFile::getSection(DataRefImpl DRI) const { assert(DRI.d.a < Sections.size() && "Should have detected this earlier"); return getStruct<MachO::section>(this, Sections[DRI.d.a]); } MachO::section_64 MachOObjectFile::getSection64(DataRefImpl DRI) const { assert(DRI.d.a < Sections.size() && "Should have detected this earlier"); return getStruct<MachO::section_64>(this, Sections[DRI.d.a]); } MachO::section MachOObjectFile::getSection(const LoadCommandInfo &L, unsigned Index) const { const char *Sec = getSectionPtr(this, L, Index); return getStruct<MachO::section>(this, Sec); } MachO::section_64 MachOObjectFile::getSection64(const LoadCommandInfo &L, unsigned Index) const { const char *Sec = getSectionPtr(this, L, Index); return getStruct<MachO::section_64>(this, Sec); } MachO::nlist MachOObjectFile::getSymbolTableEntry(DataRefImpl DRI) const { const char *P = reinterpret_cast<const char *>(DRI.p); return getStruct<MachO::nlist>(this, P); } MachO::nlist_64 MachOObjectFile::getSymbol64TableEntry(DataRefImpl DRI) const { const char *P = reinterpret_cast<const char *>(DRI.p); return getStruct<MachO::nlist_64>(this, P); } MachO::linkedit_data_command MachOObjectFile::getLinkeditDataLoadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::linkedit_data_command>(this, L.Ptr); } MachO::segment_command MachOObjectFile::getSegmentLoadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::segment_command>(this, L.Ptr); } MachO::segment_command_64 MachOObjectFile::getSegment64LoadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::segment_command_64>(this, L.Ptr); } MachO::linker_option_command MachOObjectFile::getLinkerOptionLoadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::linker_option_command>(this, L.Ptr); } MachO::version_min_command MachOObjectFile::getVersionMinLoadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::version_min_command>(this, L.Ptr); } MachO::dylib_command MachOObjectFile::getDylibIDLoadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::dylib_command>(this, L.Ptr); } MachO::dyld_info_command MachOObjectFile::getDyldInfoLoadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::dyld_info_command>(this, L.Ptr); } MachO::dylinker_command MachOObjectFile::getDylinkerCommand(const LoadCommandInfo &L) const { return getStruct<MachO::dylinker_command>(this, L.Ptr); } MachO::uuid_command MachOObjectFile::getUuidCommand(const LoadCommandInfo &L) const { return getStruct<MachO::uuid_command>(this, L.Ptr); } MachO::rpath_command MachOObjectFile::getRpathCommand(const LoadCommandInfo &L) const { return getStruct<MachO::rpath_command>(this, L.Ptr); } MachO::source_version_command MachOObjectFile::getSourceVersionCommand(const LoadCommandInfo &L) const { return getStruct<MachO::source_version_command>(this, L.Ptr); } MachO::entry_point_command MachOObjectFile::getEntryPointCommand(const LoadCommandInfo &L) const { return getStruct<MachO::entry_point_command>(this, L.Ptr); } MachO::encryption_info_command MachOObjectFile::getEncryptionInfoCommand(const LoadCommandInfo &L) const { return getStruct<MachO::encryption_info_command>(this, L.Ptr); } MachO::encryption_info_command_64 MachOObjectFile::getEncryptionInfoCommand64(const LoadCommandInfo &L) const { return getStruct<MachO::encryption_info_command_64>(this, L.Ptr); } MachO::sub_framework_command MachOObjectFile::getSubFrameworkCommand(const LoadCommandInfo &L) const { return getStruct<MachO::sub_framework_command>(this, L.Ptr); } MachO::sub_umbrella_command MachOObjectFile::getSubUmbrellaCommand(const LoadCommandInfo &L) const { return getStruct<MachO::sub_umbrella_command>(this, L.Ptr); } MachO::sub_library_command MachOObjectFile::getSubLibraryCommand(const LoadCommandInfo &L) const { return getStruct<MachO::sub_library_command>(this, L.Ptr); } MachO::sub_client_command MachOObjectFile::getSubClientCommand(const LoadCommandInfo &L) const { return getStruct<MachO::sub_client_command>(this, L.Ptr); } MachO::routines_command MachOObjectFile::getRoutinesCommand(const LoadCommandInfo &L) const { return getStruct<MachO::routines_command>(this, L.Ptr); } MachO::routines_command_64 MachOObjectFile::getRoutinesCommand64(const LoadCommandInfo &L) const { return getStruct<MachO::routines_command_64>(this, L.Ptr); } MachO::thread_command MachOObjectFile::getThreadCommand(const LoadCommandInfo &L) const { return getStruct<MachO::thread_command>(this, L.Ptr); } MachO::any_relocation_info MachOObjectFile::getRelocation(DataRefImpl Rel) const { DataRefImpl Sec; Sec.d.a = Rel.d.a; uint32_t Offset; if (is64Bit()) { MachO::section_64 Sect = getSection64(Sec); Offset = Sect.reloff; } else { MachO::section Sect = getSection(Sec); Offset = Sect.reloff; } auto P = reinterpret_cast<const MachO::any_relocation_info *>( getPtr(this, Offset)) + Rel.d.b; return getStruct<MachO::any_relocation_info>( this, reinterpret_cast<const char *>(P)); } MachO::data_in_code_entry MachOObjectFile::getDice(DataRefImpl Rel) const { const char *P = reinterpret_cast<const char *>(Rel.p); return getStruct<MachO::data_in_code_entry>(this, P); } const MachO::mach_header &MachOObjectFile::getHeader() const { return Header; } const MachO::mach_header_64 &MachOObjectFile::getHeader64() const { assert(is64Bit()); return Header64; } uint32_t MachOObjectFile::getIndirectSymbolTableEntry( const MachO::dysymtab_command &DLC, unsigned Index) const { uint64_t Offset = DLC.indirectsymoff + Index * sizeof(uint32_t); return getStruct<uint32_t>(this, getPtr(this, Offset)); } MachO::data_in_code_entry MachOObjectFile::getDataInCodeTableEntry(uint32_t DataOffset, unsigned Index) const { uint64_t Offset = DataOffset + Index * sizeof(MachO::data_in_code_entry); return getStruct<MachO::data_in_code_entry>(this, getPtr(this, Offset)); } MachO::symtab_command MachOObjectFile::getSymtabLoadCommand() const { if (SymtabLoadCmd) return getStruct<MachO::symtab_command>(this, SymtabLoadCmd); // If there is no SymtabLoadCmd return a load command with zero'ed fields. MachO::symtab_command Cmd; Cmd.cmd = MachO::LC_SYMTAB; Cmd.cmdsize = sizeof(MachO::symtab_command); Cmd.symoff = 0; Cmd.nsyms = 0; Cmd.stroff = 0; Cmd.strsize = 0; return Cmd; } MachO::dysymtab_command MachOObjectFile::getDysymtabLoadCommand() const { if (DysymtabLoadCmd) return getStruct<MachO::dysymtab_command>(this, DysymtabLoadCmd); // If there is no DysymtabLoadCmd return a load command with zero'ed fields. MachO::dysymtab_command Cmd; Cmd.cmd = MachO::LC_DYSYMTAB; Cmd.cmdsize = sizeof(MachO::dysymtab_command); Cmd.ilocalsym = 0; Cmd.nlocalsym = 0; Cmd.iextdefsym = 0; Cmd.nextdefsym = 0; Cmd.iundefsym = 0; Cmd.nundefsym = 0; Cmd.tocoff = 0; Cmd.ntoc = 0; Cmd.modtaboff = 0; Cmd.nmodtab = 0; Cmd.extrefsymoff = 0; Cmd.nextrefsyms = 0; Cmd.indirectsymoff = 0; Cmd.nindirectsyms = 0; Cmd.extreloff = 0; Cmd.nextrel = 0; Cmd.locreloff = 0; Cmd.nlocrel = 0; return Cmd; } MachO::linkedit_data_command MachOObjectFile::getDataInCodeLoadCommand() const { if (DataInCodeLoadCmd) return getStruct<MachO::linkedit_data_command>(this, DataInCodeLoadCmd); // If there is no DataInCodeLoadCmd return a load command with zero'ed fields. MachO::linkedit_data_command Cmd; Cmd.cmd = MachO::LC_DATA_IN_CODE; Cmd.cmdsize = sizeof(MachO::linkedit_data_command); Cmd.dataoff = 0; Cmd.datasize = 0; return Cmd; } MachO::linkedit_data_command MachOObjectFile::getLinkOptHintsLoadCommand() const { if (LinkOptHintsLoadCmd) return getStruct<MachO::linkedit_data_command>(this, LinkOptHintsLoadCmd); // If there is no LinkOptHintsLoadCmd return a load command with zero'ed // fields. MachO::linkedit_data_command Cmd; Cmd.cmd = MachO::LC_LINKER_OPTIMIZATION_HINT; Cmd.cmdsize = sizeof(MachO::linkedit_data_command); Cmd.dataoff = 0; Cmd.datasize = 0; return Cmd; } ArrayRef<uint8_t> MachOObjectFile::getDyldInfoRebaseOpcodes() const { if (!DyldInfoLoadCmd) return ArrayRef<uint8_t>(); MachO::dyld_info_command DyldInfo = getStruct<MachO::dyld_info_command>(this, DyldInfoLoadCmd); const uint8_t *Ptr = reinterpret_cast<const uint8_t*>( getPtr(this, DyldInfo.rebase_off)); return ArrayRef<uint8_t>(Ptr, DyldInfo.rebase_size); } ArrayRef<uint8_t> MachOObjectFile::getDyldInfoBindOpcodes() const { if (!DyldInfoLoadCmd) return ArrayRef<uint8_t>(); MachO::dyld_info_command DyldInfo = getStruct<MachO::dyld_info_command>(this, DyldInfoLoadCmd); const uint8_t *Ptr = reinterpret_cast<const uint8_t*>( getPtr(this, DyldInfo.bind_off)); return ArrayRef<uint8_t>(Ptr, DyldInfo.bind_size); } ArrayRef<uint8_t> MachOObjectFile::getDyldInfoWeakBindOpcodes() const { if (!DyldInfoLoadCmd) return ArrayRef<uint8_t>(); MachO::dyld_info_command DyldInfo = getStruct<MachO::dyld_info_command>(this, DyldInfoLoadCmd); const uint8_t *Ptr = reinterpret_cast<const uint8_t*>( getPtr(this, DyldInfo.weak_bind_off)); return ArrayRef<uint8_t>(Ptr, DyldInfo.weak_bind_size); } ArrayRef<uint8_t> MachOObjectFile::getDyldInfoLazyBindOpcodes() const { if (!DyldInfoLoadCmd) return ArrayRef<uint8_t>(); MachO::dyld_info_command DyldInfo = getStruct<MachO::dyld_info_command>(this, DyldInfoLoadCmd); const uint8_t *Ptr = reinterpret_cast<const uint8_t*>( getPtr(this, DyldInfo.lazy_bind_off)); return ArrayRef<uint8_t>(Ptr, DyldInfo.lazy_bind_size); } ArrayRef<uint8_t> MachOObjectFile::getDyldInfoExportsTrie() const { if (!DyldInfoLoadCmd) return ArrayRef<uint8_t>(); MachO::dyld_info_command DyldInfo = getStruct<MachO::dyld_info_command>(this, DyldInfoLoadCmd); const uint8_t *Ptr = reinterpret_cast<const uint8_t*>( getPtr(this, DyldInfo.export_off)); return ArrayRef<uint8_t>(Ptr, DyldInfo.export_size); } ArrayRef<uint8_t> MachOObjectFile::getUuid() const { if (!UuidLoadCmd) return ArrayRef<uint8_t>(); // Returning a pointer is fine as uuid doesn't need endian swapping. const char *Ptr = UuidLoadCmd + offsetof(MachO::uuid_command, uuid); return ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(Ptr), 16); } StringRef MachOObjectFile::getStringTableData() const { MachO::symtab_command S = getSymtabLoadCommand(); return getData().substr(S.stroff, S.strsize); } bool MachOObjectFile::is64Bit() const { return getType() == getMachOType(false, true) || getType() == getMachOType(true, true); } void MachOObjectFile::ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const { DataExtractor extractor(ObjectFile::getData(), true, 0); uint32_t offset = Index; uint64_t data = 0; while (uint64_t delta = extractor.getULEB128(&offset)) { data += delta; Out.push_back(data); } } bool MachOObjectFile::isRelocatableObject() const { return getHeader().filetype == MachO::MH_OBJECT; } ErrorOr<std::unique_ptr<MachOObjectFile>> ObjectFile::createMachOObjectFile(MemoryBufferRef Buffer) { StringRef Magic = Buffer.getBuffer().slice(0, 4); std::error_code EC; std::unique_ptr<MachOObjectFile> Ret; if (Magic == "\xFE\xED\xFA\xCE") Ret.reset(new MachOObjectFile(Buffer, false, false, EC)); else if (Magic == "\xCE\xFA\xED\xFE") Ret.reset(new MachOObjectFile(Buffer, true, false, EC)); else if (Magic == "\xFE\xED\xFA\xCF") Ret.reset(new MachOObjectFile(Buffer, false, true, EC)); else if (Magic == "\xCF\xFA\xED\xFE") Ret.reset(new MachOObjectFile(Buffer, true, true, EC)); else return object_error::parse_failed; if (EC) return EC; return std::move(Ret); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/Object.cpp
//===- Object.cpp - C bindings to the object file library--------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the C bindings to the file-format-independent object // library. // //===----------------------------------------------------------------------===// #include "llvm/ADT/SmallVector.h" #include "llvm-c/Object.h" #include "llvm/Object/ObjectFile.h" using namespace llvm; using namespace object; inline OwningBinary<ObjectFile> *unwrap(LLVMObjectFileRef OF) { return reinterpret_cast<OwningBinary<ObjectFile> *>(OF); } inline LLVMObjectFileRef wrap(const OwningBinary<ObjectFile> *OF) { return reinterpret_cast<LLVMObjectFileRef>( const_cast<OwningBinary<ObjectFile> *>(OF)); } inline section_iterator *unwrap(LLVMSectionIteratorRef SI) { return reinterpret_cast<section_iterator*>(SI); } inline LLVMSectionIteratorRef wrap(const section_iterator *SI) { return reinterpret_cast<LLVMSectionIteratorRef> (const_cast<section_iterator*>(SI)); } inline symbol_iterator *unwrap(LLVMSymbolIteratorRef SI) { return reinterpret_cast<symbol_iterator*>(SI); } inline LLVMSymbolIteratorRef wrap(const symbol_iterator *SI) { return reinterpret_cast<LLVMSymbolIteratorRef> (const_cast<symbol_iterator*>(SI)); } inline relocation_iterator *unwrap(LLVMRelocationIteratorRef SI) { return reinterpret_cast<relocation_iterator*>(SI); } inline LLVMRelocationIteratorRef wrap(const relocation_iterator *SI) { return reinterpret_cast<LLVMRelocationIteratorRef> (const_cast<relocation_iterator*>(SI)); } // ObjectFile creation LLVMObjectFileRef LLVMCreateObjectFile(LLVMMemoryBufferRef MemBuf) { std::unique_ptr<MemoryBuffer> Buf(unwrap(MemBuf)); ErrorOr<std::unique_ptr<ObjectFile>> ObjOrErr( ObjectFile::createObjectFile(Buf->getMemBufferRef())); std::unique_ptr<ObjectFile> Obj; if (!ObjOrErr) return nullptr; auto *Ret = new OwningBinary<ObjectFile>(std::move(ObjOrErr.get()), std::move(Buf)); return wrap(Ret); } void LLVMDisposeObjectFile(LLVMObjectFileRef ObjectFile) { delete unwrap(ObjectFile); } // ObjectFile Section iterators LLVMSectionIteratorRef LLVMGetSections(LLVMObjectFileRef OF) { OwningBinary<ObjectFile> *OB = unwrap(OF); section_iterator SI = OB->getBinary()->section_begin(); return wrap(new section_iterator(SI)); } void LLVMDisposeSectionIterator(LLVMSectionIteratorRef SI) { delete unwrap(SI); } LLVMBool LLVMIsSectionIteratorAtEnd(LLVMObjectFileRef OF, LLVMSectionIteratorRef SI) { OwningBinary<ObjectFile> *OB = unwrap(OF); return (*unwrap(SI) == OB->getBinary()->section_end()) ? 1 : 0; } void LLVMMoveToNextSection(LLVMSectionIteratorRef SI) { ++(*unwrap(SI)); } void LLVMMoveToContainingSection(LLVMSectionIteratorRef Sect, LLVMSymbolIteratorRef Sym) { if (std::error_code ec = (*unwrap(Sym))->getSection(*unwrap(Sect))) report_fatal_error(ec.message()); } // ObjectFile Symbol iterators LLVMSymbolIteratorRef LLVMGetSymbols(LLVMObjectFileRef OF) { OwningBinary<ObjectFile> *OB = unwrap(OF); symbol_iterator SI = OB->getBinary()->symbol_begin(); return wrap(new symbol_iterator(SI)); } void LLVMDisposeSymbolIterator(LLVMSymbolIteratorRef SI) { delete unwrap(SI); } LLVMBool LLVMIsSymbolIteratorAtEnd(LLVMObjectFileRef OF, LLVMSymbolIteratorRef SI) { OwningBinary<ObjectFile> *OB = unwrap(OF); return (*unwrap(SI) == OB->getBinary()->symbol_end()) ? 1 : 0; } void LLVMMoveToNextSymbol(LLVMSymbolIteratorRef SI) { ++(*unwrap(SI)); } // SectionRef accessors const char *LLVMGetSectionName(LLVMSectionIteratorRef SI) { StringRef ret; if (std::error_code ec = (*unwrap(SI))->getName(ret)) report_fatal_error(ec.message()); return ret.data(); } uint64_t LLVMGetSectionSize(LLVMSectionIteratorRef SI) { return (*unwrap(SI))->getSize(); } const char *LLVMGetSectionContents(LLVMSectionIteratorRef SI) { StringRef ret; if (std::error_code ec = (*unwrap(SI))->getContents(ret)) report_fatal_error(ec.message()); return ret.data(); } uint64_t LLVMGetSectionAddress(LLVMSectionIteratorRef SI) { return (*unwrap(SI))->getAddress(); } LLVMBool LLVMGetSectionContainsSymbol(LLVMSectionIteratorRef SI, LLVMSymbolIteratorRef Sym) { return (*unwrap(SI))->containsSymbol(**unwrap(Sym)); } // Section Relocation iterators LLVMRelocationIteratorRef LLVMGetRelocations(LLVMSectionIteratorRef Section) { relocation_iterator SI = (*unwrap(Section))->relocation_begin(); return wrap(new relocation_iterator(SI)); } void LLVMDisposeRelocationIterator(LLVMRelocationIteratorRef SI) { delete unwrap(SI); } LLVMBool LLVMIsRelocationIteratorAtEnd(LLVMSectionIteratorRef Section, LLVMRelocationIteratorRef SI) { return (*unwrap(SI) == (*unwrap(Section))->relocation_end()) ? 1 : 0; } void LLVMMoveToNextRelocation(LLVMRelocationIteratorRef SI) { ++(*unwrap(SI)); } // SymbolRef accessors const char *LLVMGetSymbolName(LLVMSymbolIteratorRef SI) { ErrorOr<StringRef> Ret = (*unwrap(SI))->getName(); if (std::error_code EC = Ret.getError()) report_fatal_error(EC.message()); return Ret->data(); } uint64_t LLVMGetSymbolAddress(LLVMSymbolIteratorRef SI) { ErrorOr<uint64_t> Ret = (*unwrap(SI))->getAddress(); if (std::error_code EC = Ret.getError()) report_fatal_error(EC.message()); return *Ret; } uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI) { return (*unwrap(SI))->getCommonSize(); } // RelocationRef accessors uint64_t LLVMGetRelocationOffset(LLVMRelocationIteratorRef RI) { return (*unwrap(RI))->getOffset(); } LLVMSymbolIteratorRef LLVMGetRelocationSymbol(LLVMRelocationIteratorRef RI) { symbol_iterator ret = (*unwrap(RI))->getSymbol(); return wrap(new symbol_iterator(ret)); } uint64_t LLVMGetRelocationType(LLVMRelocationIteratorRef RI) { return (*unwrap(RI))->getType(); } // NOTE: Caller takes ownership of returned string. const char *LLVMGetRelocationTypeName(LLVMRelocationIteratorRef RI) { SmallVector<char, 0> ret; (*unwrap(RI))->getTypeName(ret); char *str = static_cast<char*>(malloc(ret.size())); std::copy(ret.begin(), ret.end(), str); return str; } // NOTE: Caller takes ownership of returned string. const char *LLVMGetRelocationValueString(LLVMRelocationIteratorRef RI) { return strdup(""); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/MachOUniversal.cpp
//===- MachOUniversal.cpp - Mach-O universal binary -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the MachOUniversalBinary class. // //===----------------------------------------------------------------------===// #include "llvm/Object/MachOUniversal.h" #include "llvm/Object/Archive.h" #include "llvm/Object/MachO.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Host.h" #include "llvm/Support/MemoryBuffer.h" using namespace llvm; using namespace object; template<typename T> static void SwapStruct(T &Value); template<> void SwapStruct(MachO::fat_header &H) { sys::swapByteOrder(H.magic); sys::swapByteOrder(H.nfat_arch); } template<> void SwapStruct(MachO::fat_arch &H) { sys::swapByteOrder(H.cputype); sys::swapByteOrder(H.cpusubtype); sys::swapByteOrder(H.offset); sys::swapByteOrder(H.size); sys::swapByteOrder(H.align); } template<typename T> static T getUniversalBinaryStruct(const char *Ptr) { T Res; memcpy(&Res, Ptr, sizeof(T)); // Universal binary headers have big-endian byte order. if (sys::IsLittleEndianHost) SwapStruct(Res); return Res; } MachOUniversalBinary::ObjectForArch::ObjectForArch( const MachOUniversalBinary *Parent, uint32_t Index) : Parent(Parent), Index(Index) { if (!Parent || Index >= Parent->getNumberOfObjects()) { clear(); } else { // Parse object header. StringRef ParentData = Parent->getData(); const char *HeaderPos = ParentData.begin() + sizeof(MachO::fat_header) + Index * sizeof(MachO::fat_arch); Header = getUniversalBinaryStruct<MachO::fat_arch>(HeaderPos); if (ParentData.size() < Header.offset + Header.size) { clear(); } } } ErrorOr<std::unique_ptr<MachOObjectFile>> MachOUniversalBinary::ObjectForArch::getAsObjectFile() const { if (Parent) { StringRef ParentData = Parent->getData(); StringRef ObjectData = ParentData.substr(Header.offset, Header.size); StringRef ObjectName = Parent->getFileName(); MemoryBufferRef ObjBuffer(ObjectData, ObjectName); return ObjectFile::createMachOObjectFile(ObjBuffer); } return object_error::parse_failed; } ErrorOr<std::unique_ptr<Archive>> MachOUniversalBinary::ObjectForArch::getAsArchive() const { if (!Parent) return object_error::parse_failed; StringRef ParentData = Parent->getData(); StringRef ObjectData = ParentData.substr(Header.offset, Header.size); StringRef ObjectName = Parent->getFileName(); MemoryBufferRef ObjBuffer(ObjectData, ObjectName); return Archive::create(ObjBuffer); } void MachOUniversalBinary::anchor() { } ErrorOr<std::unique_ptr<MachOUniversalBinary>> MachOUniversalBinary::create(MemoryBufferRef Source) { std::error_code EC; std::unique_ptr<MachOUniversalBinary> Ret( new MachOUniversalBinary(Source, EC)); if (EC) return EC; return std::move(Ret); } MachOUniversalBinary::MachOUniversalBinary(MemoryBufferRef Source, std::error_code &ec) : Binary(Binary::ID_MachOUniversalBinary, Source), NumberOfObjects(0) { if (Data.getBufferSize() < sizeof(MachO::fat_header)) { ec = object_error::invalid_file_type; return; } // Check for magic value and sufficient header size. StringRef Buf = getData(); MachO::fat_header H= getUniversalBinaryStruct<MachO::fat_header>(Buf.begin()); NumberOfObjects = H.nfat_arch; uint32_t MinSize = sizeof(MachO::fat_header) + sizeof(MachO::fat_arch) * NumberOfObjects; if (H.magic != MachO::FAT_MAGIC || Buf.size() < MinSize) { ec = object_error::parse_failed; return; } ec = std::error_code(); } ErrorOr<std::unique_ptr<MachOObjectFile>> MachOUniversalBinary::getObjectForArch(StringRef ArchName) const { if (Triple(ArchName).getArch() == Triple::ArchType::UnknownArch) return object_error::arch_not_found; for (object_iterator I = begin_objects(), E = end_objects(); I != E; ++I) { if (I->getArchTypeName() == ArchName) return I->getAsObjectFile(); } return object_error::arch_not_found; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/SymbolSize.cpp
//===- SymbolSize.cpp -----------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Object/SymbolSize.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Object/COFF.h" #include "llvm/Object/ELFObjectFile.h" #include "llvm/Object/MachO.h" using namespace llvm; using namespace object; namespace { struct SymEntry { symbol_iterator I; uint64_t Address; unsigned Number; unsigned SectionID; }; } static int __cdecl compareAddress(const SymEntry *A, const SymEntry *B) { if (A->SectionID != B->SectionID) return A->SectionID - B->SectionID; return A->Address - B->Address; } static unsigned getSectionID(const ObjectFile &O, SectionRef Sec) { if (auto *M = dyn_cast<MachOObjectFile>(&O)) return M->getSectionID(Sec); return cast<COFFObjectFile>(O).getSectionID(Sec); } static unsigned getSymbolSectionID(const ObjectFile &O, SymbolRef Sym) { if (auto *M = dyn_cast<MachOObjectFile>(&O)) return M->getSymbolSectionID(Sym); return cast<COFFObjectFile>(O).getSymbolSectionID(Sym); } std::vector<std::pair<SymbolRef, uint64_t>> llvm::object::computeSymbolSizes(const ObjectFile &O) { std::vector<std::pair<SymbolRef, uint64_t>> Ret; if (const auto *E = dyn_cast<ELFObjectFileBase>(&O)) { auto Syms = E->symbols(); if (Syms.begin() == Syms.end()) Syms = E->getDynamicSymbolIterators(); for (ELFSymbolRef Sym : Syms) Ret.push_back({Sym, Sym.getSize()}); return Ret; } // Collect sorted symbol addresses. Include dummy addresses for the end // of each section. std::vector<SymEntry> Addresses; unsigned SymNum = 0; for (symbol_iterator I = O.symbol_begin(), E = O.symbol_end(); I != E; ++I) { SymbolRef Sym = *I; uint64_t Value = Sym.getValue(); Addresses.push_back({I, Value, SymNum, getSymbolSectionID(O, Sym)}); ++SymNum; } for (SectionRef Sec : O.sections()) { uint64_t Address = Sec.getAddress(); uint64_t Size = Sec.getSize(); Addresses.push_back( {O.symbol_end(), Address + Size, 0, getSectionID(O, Sec)}); } array_pod_sort(Addresses.begin(), Addresses.end(), compareAddress); // Compute the size as the gap to the next symbol for (unsigned I = 0, N = Addresses.size() - 1; I < N; ++I) { auto &P = Addresses[I]; if (P.I == O.symbol_end()) continue; // If multiple symbol have the same address, give both the same size. unsigned NextI = I + 1; while (NextI < N && Addresses[NextI].Address == P.Address) ++NextI; uint64_t Size = Addresses[NextI].Address - P.Address; P.Address = Size; } // Assign the sorted symbols in the original order. Ret.resize(SymNum); for (SymEntry &P : Addresses) { if (P.I == O.symbol_end()) continue; Ret[P.Number] = {*P.I, P.Address}; } return Ret; }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/RecordStreamer.cpp
//===-- RecordStreamer.cpp - Record asm definde and used symbols ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "RecordStreamer.h" #include "llvm/MC/MCSymbol.h" using namespace llvm; void RecordStreamer::markDefined(const MCSymbol &Symbol) { State &S = Symbols[Symbol.getName()]; switch (S) { case DefinedGlobal: case Global: S = DefinedGlobal; break; case NeverSeen: case Defined: case Used: S = Defined; break; } } void RecordStreamer::markGlobal(const MCSymbol &Symbol) { State &S = Symbols[Symbol.getName()]; switch (S) { case DefinedGlobal: case Defined: S = DefinedGlobal; break; case NeverSeen: case Global: case Used: S = Global; break; } } void RecordStreamer::markUsed(const MCSymbol &Symbol) { State &S = Symbols[Symbol.getName()]; switch (S) { case DefinedGlobal: case Defined: case Global: break; case NeverSeen: case Used: S = Used; break; } } void RecordStreamer::visitUsedSymbol(const MCSymbol &Sym) { markUsed(Sym); } RecordStreamer::const_iterator RecordStreamer::begin() { return Symbols.begin(); } RecordStreamer::const_iterator RecordStreamer::end() { return Symbols.end(); } RecordStreamer::RecordStreamer(MCContext &Context) : MCStreamer(Context) {} void RecordStreamer::EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) { MCStreamer::EmitInstruction(Inst, STI); } void RecordStreamer::EmitLabel(MCSymbol *Symbol) { MCStreamer::EmitLabel(Symbol); markDefined(*Symbol); } void RecordStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) { markDefined(*Symbol); MCStreamer::EmitAssignment(Symbol, Value); } bool RecordStreamer::EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) { if (Attribute == MCSA_Global) markGlobal(*Symbol); return true; } void RecordStreamer::EmitZerofill(MCSection *Section, MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment) { markDefined(*Symbol); } void RecordStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment) { markDefined(*Symbol); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/CMakeLists.txt
add_llvm_library(LLVMObject Archive.cpp ArchiveWriter.cpp Binary.cpp COFFObjectFile.cpp COFFYAML.cpp ELF.cpp ELFObjectFile.cpp ELFYAML.cpp Error.cpp IRObjectFile.cpp MachOObjectFile.cpp MachOUniversal.cpp Object.cpp ObjectFile.cpp RecordStreamer.cpp SymbolicFile.cpp SymbolSize.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Object DEPENDS intrinsics_gen )
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/LLVMBuild.txt
;===- ./lib/Object/LLVMBuild.txt -------------------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = Object parent = Libraries required_libraries = BitReader Core Support ; MC MCParser - HLSL Change
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/RecordStreamer.h
//===-- RecordStreamer.h - Record asm defined and used symbols ---*- C++ -*===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_OBJECT_RECORDSTREAMER_H #define LLVM_LIB_OBJECT_RECORDSTREAMER_H #include "llvm/MC/MCStreamer.h" namespace llvm { class RecordStreamer : public MCStreamer { public: enum State { NeverSeen, Global, Defined, DefinedGlobal, Used }; private: StringMap<State> Symbols; void markDefined(const MCSymbol &Symbol); void markGlobal(const MCSymbol &Symbol); void markUsed(const MCSymbol &Symbol); void visitUsedSymbol(const MCSymbol &Sym) override; public: typedef StringMap<State>::const_iterator const_iterator; const_iterator begin(); const_iterator end(); RecordStreamer(MCContext &Context); void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override; void EmitLabel(MCSymbol *Symbol) override; void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) override; bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override; void EmitZerofill(MCSection *Section, MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment) override; void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment) override; }; } #endif
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/Binary.cpp
//===- Binary.cpp - A generic binary file -----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Binary class. // //===----------------------------------------------------------------------===// #include "llvm/Object/Binary.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" // Include headers for createBinary. #include "llvm/Object/Archive.h" #include "llvm/Object/MachOUniversal.h" #include "llvm/Object/ObjectFile.h" using namespace llvm; using namespace object; Binary::~Binary() {} Binary::Binary(unsigned int Type, MemoryBufferRef Source) : TypeID(Type), Data(Source) {} StringRef Binary::getData() const { return Data.getBuffer(); } StringRef Binary::getFileName() const { return Data.getBufferIdentifier(); } MemoryBufferRef Binary::getMemoryBufferRef() const { return Data; } ErrorOr<std::unique_ptr<Binary>> object::createBinary(MemoryBufferRef Buffer, LLVMContext *Context) { sys::fs::file_magic Type = sys::fs::identify_magic(Buffer.getBuffer()); switch (Type) { case sys::fs::file_magic::archive: return Archive::create(Buffer); case sys::fs::file_magic::elf: case sys::fs::file_magic::elf_relocatable: case sys::fs::file_magic::elf_executable: case sys::fs::file_magic::elf_shared_object: case sys::fs::file_magic::elf_core: case sys::fs::file_magic::macho_object: case sys::fs::file_magic::macho_executable: case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib: case sys::fs::file_magic::macho_core: case sys::fs::file_magic::macho_preload_executable: case sys::fs::file_magic::macho_dynamically_linked_shared_lib: case sys::fs::file_magic::macho_dynamic_linker: case sys::fs::file_magic::macho_bundle: case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub: case sys::fs::file_magic::macho_dsym_companion: case sys::fs::file_magic::macho_kext_bundle: case sys::fs::file_magic::coff_object: case sys::fs::file_magic::coff_import_library: case sys::fs::file_magic::pecoff_executable: case sys::fs::file_magic::bitcode: return ObjectFile::createSymbolicFile(Buffer, Type, Context); case sys::fs::file_magic::macho_universal_binary: // return MachOUniversalBinary::create(Buffer); // HLSL Change - remove support for MachO files case sys::fs::file_magic::unknown: case sys::fs::file_magic::windows_resource: // Unrecognized object file format. return object_error::invalid_file_type; } llvm_unreachable("Unexpected Binary File Type"); } ErrorOr<OwningBinary<Binary>> object::createBinary(StringRef Path) { ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr = MemoryBuffer::getFileOrSTDIN(Path); if (std::error_code EC = FileOrErr.getError()) return EC; std::unique_ptr<MemoryBuffer> &Buffer = FileOrErr.get(); ErrorOr<std::unique_ptr<Binary>> BinOrErr = createBinary(Buffer->getMemBufferRef()); if (std::error_code EC = BinOrErr.getError()) return EC; std::unique_ptr<Binary> &Bin = BinOrErr.get(); return OwningBinary<Binary>(std::move(Bin), std::move(Buffer)); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/ObjectFile.cpp
//===- ObjectFile.cpp - File format independent object file -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a file format independent ObjectFile class. // //===----------------------------------------------------------------------===// #include "llvm/Object/COFF.h" #include "llvm/Object/MachO.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/raw_ostream.h" #include <system_error> using namespace llvm; using namespace object; void ObjectFile::anchor() { } ObjectFile::ObjectFile(unsigned int Type, MemoryBufferRef Source) : SymbolicFile(Type, Source) {} bool SectionRef::containsSymbol(SymbolRef S) const { section_iterator SymSec = getObject()->section_end(); if (S.getSection(SymSec)) return false; return *this == *SymSec; } uint64_t ObjectFile::getSymbolValue(DataRefImpl Ref) const { uint32_t Flags = getSymbolFlags(Ref); if (Flags & SymbolRef::SF_Undefined) return 0; if (Flags & SymbolRef::SF_Common) return getCommonSymbolSize(Ref); return getSymbolValueImpl(Ref); } std::error_code ObjectFile::printSymbolName(raw_ostream &OS, DataRefImpl Symb) const { ErrorOr<StringRef> Name = getSymbolName(Symb); if (std::error_code EC = Name.getError()) return EC; OS << *Name; return std::error_code(); } uint32_t ObjectFile::getSymbolAlignment(DataRefImpl DRI) const { return 0; } section_iterator ObjectFile::getRelocatedSection(DataRefImpl Sec) const { return section_iterator(SectionRef(Sec, this)); } ErrorOr<std::unique_ptr<ObjectFile>> ObjectFile::createObjectFile(MemoryBufferRef Object, sys::fs::file_magic Type) { StringRef Data = Object.getBuffer(); if (Type == sys::fs::file_magic::unknown) Type = sys::fs::identify_magic(Data); switch (Type) { case sys::fs::file_magic::unknown: case sys::fs::file_magic::bitcode: case sys::fs::file_magic::archive: case sys::fs::file_magic::macho_universal_binary: case sys::fs::file_magic::windows_resource: return object_error::invalid_file_type; case sys::fs::file_magic::elf: case sys::fs::file_magic::elf_relocatable: case sys::fs::file_magic::elf_executable: case sys::fs::file_magic::elf_shared_object: case sys::fs::file_magic::elf_core: // return createELFObjectFile(Object); // HLSL Change - remove support for ELF files case sys::fs::file_magic::macho_object: case sys::fs::file_magic::macho_executable: case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib: case sys::fs::file_magic::macho_core: case sys::fs::file_magic::macho_preload_executable: case sys::fs::file_magic::macho_dynamically_linked_shared_lib: case sys::fs::file_magic::macho_dynamic_linker: case sys::fs::file_magic::macho_bundle: case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub: case sys::fs::file_magic::macho_dsym_companion: case sys::fs::file_magic::macho_kext_bundle: // return createMachOObjectFile(Object); // HLSL Change - remove support for Mach-O files return object_error::invalid_file_type; // HLSL Change case sys::fs::file_magic::coff_object: case sys::fs::file_magic::coff_import_library: case sys::fs::file_magic::pecoff_executable: return createCOFFObjectFile(Object); } llvm_unreachable("Unexpected Object File Type"); } ErrorOr<OwningBinary<ObjectFile>> ObjectFile::createObjectFile(StringRef ObjectPath) { ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr = MemoryBuffer::getFile(ObjectPath); if (std::error_code EC = FileOrErr.getError()) return EC; std::unique_ptr<MemoryBuffer> Buffer = std::move(FileOrErr.get()); ErrorOr<std::unique_ptr<ObjectFile>> ObjOrErr = createObjectFile(Buffer->getMemBufferRef()); if (std::error_code EC = ObjOrErr.getError()) return EC; std::unique_ptr<ObjectFile> Obj = std::move(ObjOrErr.get()); return OwningBinary<ObjectFile>(std::move(Obj), std::move(Buffer)); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/ELFYAML.cpp
//===- ELFYAML.cpp - ELF YAMLIO implementation ----------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines classes for handling the YAML representation of ELF. // //===----------------------------------------------------------------------===// #include "llvm/Object/ELFYAML.h" #include "llvm/Support/Casting.h" #include "llvm/Support/MipsABIFlags.h" namespace llvm { ELFYAML::Section::~Section() {} namespace yaml { void ScalarEnumerationTraits<ELFYAML::ELF_ET>::enumeration(IO &IO, ELFYAML::ELF_ET &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); ECase(ET_NONE) ECase(ET_REL) ECase(ET_EXEC) ECase(ET_DYN) ECase(ET_CORE) #undef ECase IO.enumFallback<Hex16>(Value); } void ScalarEnumerationTraits<ELFYAML::ELF_EM>::enumeration(IO &IO, ELFYAML::ELF_EM &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); ECase(EM_NONE) ECase(EM_M32) ECase(EM_SPARC) ECase(EM_386) ECase(EM_68K) ECase(EM_88K) ECase(EM_IAMCU) ECase(EM_860) ECase(EM_MIPS) ECase(EM_S370) ECase(EM_MIPS_RS3_LE) ECase(EM_PARISC) ECase(EM_VPP500) ECase(EM_SPARC32PLUS) ECase(EM_960) ECase(EM_PPC) ECase(EM_PPC64) ECase(EM_S390) ECase(EM_SPU) ECase(EM_V800) ECase(EM_FR20) ECase(EM_RH32) ECase(EM_RCE) ECase(EM_ARM) ECase(EM_ALPHA) ECase(EM_SH) ECase(EM_SPARCV9) ECase(EM_TRICORE) ECase(EM_ARC) ECase(EM_H8_300) ECase(EM_H8_300H) ECase(EM_H8S) ECase(EM_H8_500) ECase(EM_IA_64) ECase(EM_MIPS_X) ECase(EM_COLDFIRE) ECase(EM_68HC12) ECase(EM_MMA) ECase(EM_PCP) ECase(EM_NCPU) ECase(EM_NDR1) ECase(EM_STARCORE) ECase(EM_ME16) ECase(EM_ST100) ECase(EM_TINYJ) ECase(EM_X86_64) ECase(EM_PDSP) ECase(EM_PDP10) ECase(EM_PDP11) ECase(EM_FX66) ECase(EM_ST9PLUS) ECase(EM_ST7) ECase(EM_68HC16) ECase(EM_68HC11) ECase(EM_68HC08) ECase(EM_68HC05) ECase(EM_SVX) ECase(EM_ST19) ECase(EM_VAX) ECase(EM_CRIS) ECase(EM_JAVELIN) ECase(EM_FIREPATH) ECase(EM_ZSP) ECase(EM_MMIX) ECase(EM_HUANY) ECase(EM_PRISM) ECase(EM_AVR) ECase(EM_FR30) ECase(EM_D10V) ECase(EM_D30V) ECase(EM_V850) ECase(EM_M32R) ECase(EM_MN10300) ECase(EM_MN10200) ECase(EM_PJ) ECase(EM_OPENRISC) ECase(EM_ARC_COMPACT) ECase(EM_XTENSA) ECase(EM_VIDEOCORE) ECase(EM_TMM_GPP) ECase(EM_NS32K) ECase(EM_TPC) ECase(EM_SNP1K) ECase(EM_ST200) ECase(EM_IP2K) ECase(EM_MAX) ECase(EM_CR) ECase(EM_F2MC16) ECase(EM_MSP430) ECase(EM_BLACKFIN) ECase(EM_SE_C33) ECase(EM_SEP) ECase(EM_ARCA) ECase(EM_UNICORE) ECase(EM_EXCESS) ECase(EM_DXP) ECase(EM_ALTERA_NIOS2) ECase(EM_CRX) ECase(EM_XGATE) ECase(EM_C166) ECase(EM_M16C) ECase(EM_DSPIC30F) ECase(EM_CE) ECase(EM_M32C) ECase(EM_TSK3000) ECase(EM_RS08) ECase(EM_SHARC) ECase(EM_ECOG2) ECase(EM_SCORE7) ECase(EM_DSP24) ECase(EM_VIDEOCORE3) ECase(EM_LATTICEMICO32) ECase(EM_SE_C17) ECase(EM_TI_C6000) ECase(EM_TI_C2000) ECase(EM_TI_C5500) ECase(EM_MMDSP_PLUS) ECase(EM_CYPRESS_M8C) ECase(EM_R32C) ECase(EM_TRIMEDIA) ECase(EM_HEXAGON) ECase(EM_8051) ECase(EM_STXP7X) ECase(EM_NDS32) ECase(EM_ECOG1) ECase(EM_ECOG1X) ECase(EM_MAXQ30) ECase(EM_XIMO16) ECase(EM_MANIK) ECase(EM_CRAYNV2) ECase(EM_RX) ECase(EM_METAG) ECase(EM_MCST_ELBRUS) ECase(EM_ECOG16) ECase(EM_CR16) ECase(EM_ETPU) ECase(EM_SLE9X) ECase(EM_L10M) ECase(EM_K10M) ECase(EM_AARCH64) ECase(EM_AVR32) ECase(EM_STM8) ECase(EM_TILE64) ECase(EM_TILEPRO) ECase(EM_CUDA) ECase(EM_TILEGX) ECase(EM_CLOUDSHIELD) ECase(EM_COREA_1ST) ECase(EM_COREA_2ND) ECase(EM_ARC_COMPACT2) ECase(EM_OPEN8) ECase(EM_RL78) ECase(EM_VIDEOCORE5) ECase(EM_78KOR) ECase(EM_56800EX) #undef ECase } void ScalarEnumerationTraits<ELFYAML::ELF_ELFCLASS>::enumeration( IO &IO, ELFYAML::ELF_ELFCLASS &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); // Since the semantics of ELFCLASSNONE is "invalid", just don't accept it // here. ECase(ELFCLASS32) ECase(ELFCLASS64) #undef ECase } void ScalarEnumerationTraits<ELFYAML::ELF_ELFDATA>::enumeration( IO &IO, ELFYAML::ELF_ELFDATA &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); // Since the semantics of ELFDATANONE is "invalid", just don't accept it // here. ECase(ELFDATA2LSB) ECase(ELFDATA2MSB) #undef ECase } void ScalarEnumerationTraits<ELFYAML::ELF_ELFOSABI>::enumeration( IO &IO, ELFYAML::ELF_ELFOSABI &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); ECase(ELFOSABI_NONE) ECase(ELFOSABI_HPUX) ECase(ELFOSABI_NETBSD) ECase(ELFOSABI_GNU) ECase(ELFOSABI_GNU) ECase(ELFOSABI_HURD) ECase(ELFOSABI_SOLARIS) ECase(ELFOSABI_AIX) ECase(ELFOSABI_IRIX) ECase(ELFOSABI_FREEBSD) ECase(ELFOSABI_TRU64) ECase(ELFOSABI_MODESTO) ECase(ELFOSABI_OPENBSD) ECase(ELFOSABI_OPENVMS) ECase(ELFOSABI_NSK) ECase(ELFOSABI_AROS) ECase(ELFOSABI_FENIXOS) ECase(ELFOSABI_CLOUDABI) ECase(ELFOSABI_C6000_ELFABI) ECase(ELFOSABI_C6000_LINUX) ECase(ELFOSABI_ARM) ECase(ELFOSABI_STANDALONE) #undef ECase } void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO, ELFYAML::ELF_EF &Value) { const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext()); assert(Object && "The IO context is not initialized"); #define BCase(X) IO.bitSetCase(Value, #X, ELF::X); #define BCaseMask(X, M) IO.maskedBitSetCase(Value, #X, ELF::X, ELF::M); switch (Object->Header.Machine) { case ELF::EM_ARM: BCase(EF_ARM_SOFT_FLOAT) BCase(EF_ARM_VFP_FLOAT) BCaseMask(EF_ARM_EABI_UNKNOWN, EF_ARM_EABIMASK) BCaseMask(EF_ARM_EABI_VER1, EF_ARM_EABIMASK) BCaseMask(EF_ARM_EABI_VER2, EF_ARM_EABIMASK) BCaseMask(EF_ARM_EABI_VER3, EF_ARM_EABIMASK) BCaseMask(EF_ARM_EABI_VER4, EF_ARM_EABIMASK) BCaseMask(EF_ARM_EABI_VER5, EF_ARM_EABIMASK) break; case ELF::EM_MIPS: BCase(EF_MIPS_NOREORDER) BCase(EF_MIPS_PIC) BCase(EF_MIPS_CPIC) BCase(EF_MIPS_ABI2) BCase(EF_MIPS_32BITMODE) BCase(EF_MIPS_FP64) BCase(EF_MIPS_NAN2008) BCase(EF_MIPS_MICROMIPS) BCase(EF_MIPS_ARCH_ASE_M16) BCase(EF_MIPS_ARCH_ASE_MDMX) BCaseMask(EF_MIPS_ABI_O32, EF_MIPS_ABI) BCaseMask(EF_MIPS_ABI_O64, EF_MIPS_ABI) BCaseMask(EF_MIPS_ABI_EABI32, EF_MIPS_ABI) BCaseMask(EF_MIPS_ABI_EABI64, EF_MIPS_ABI) BCaseMask(EF_MIPS_MACH_3900, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_4010, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_4100, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_4650, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_4120, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_4111, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_SB1, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_OCTEON, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_XLR, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_OCTEON2, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_OCTEON3, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_5400, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_5900, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_5500, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_9000, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_LS2E, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_LS2F, EF_MIPS_MACH) BCaseMask(EF_MIPS_MACH_LS3A, EF_MIPS_MACH) BCaseMask(EF_MIPS_ARCH_1, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_2, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_3, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_4, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_5, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_32, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_64, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_32R2, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_64R2, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_32R6, EF_MIPS_ARCH) BCaseMask(EF_MIPS_ARCH_64R6, EF_MIPS_ARCH) break; case ELF::EM_HEXAGON: BCase(EF_HEXAGON_MACH_V2) BCase(EF_HEXAGON_MACH_V3) BCase(EF_HEXAGON_MACH_V4) BCase(EF_HEXAGON_MACH_V5) BCase(EF_HEXAGON_ISA_V2) BCase(EF_HEXAGON_ISA_V3) BCase(EF_HEXAGON_ISA_V4) BCase(EF_HEXAGON_ISA_V5) break; default: llvm_unreachable("Unsupported architecture"); } #undef BCase #undef BCaseMask } void ScalarEnumerationTraits<ELFYAML::ELF_SHT>::enumeration( IO &IO, ELFYAML::ELF_SHT &Value) { const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext()); assert(Object && "The IO context is not initialized"); #define ECase(X) IO.enumCase(Value, #X, ELF::X); ECase(SHT_NULL) ECase(SHT_PROGBITS) // No SHT_SYMTAB. Use the top-level `Symbols` key instead. // FIXME: Issue a diagnostic with this information. ECase(SHT_STRTAB) ECase(SHT_RELA) ECase(SHT_HASH) ECase(SHT_DYNAMIC) ECase(SHT_NOTE) ECase(SHT_NOBITS) ECase(SHT_REL) ECase(SHT_SHLIB) ECase(SHT_DYNSYM) ECase(SHT_INIT_ARRAY) ECase(SHT_FINI_ARRAY) ECase(SHT_PREINIT_ARRAY) ECase(SHT_GROUP) ECase(SHT_SYMTAB_SHNDX) ECase(SHT_LOOS) ECase(SHT_GNU_ATTRIBUTES) ECase(SHT_GNU_HASH) ECase(SHT_GNU_verdef) ECase(SHT_GNU_verneed) ECase(SHT_GNU_versym) ECase(SHT_HIOS) ECase(SHT_LOPROC) switch (Object->Header.Machine) { case ELF::EM_ARM: ECase(SHT_ARM_EXIDX) ECase(SHT_ARM_PREEMPTMAP) ECase(SHT_ARM_ATTRIBUTES) ECase(SHT_ARM_DEBUGOVERLAY) ECase(SHT_ARM_OVERLAYSECTION) break; case ELF::EM_HEXAGON: ECase(SHT_HEX_ORDERED) break; case ELF::EM_X86_64: ECase(SHT_X86_64_UNWIND) break; case ELF::EM_MIPS: ECase(SHT_MIPS_REGINFO) ECase(SHT_MIPS_OPTIONS) ECase(SHT_MIPS_ABIFLAGS) break; default: // Nothing to do. break; } #undef ECase } void ScalarBitSetTraits<ELFYAML::ELF_SHF>::bitset(IO &IO, ELFYAML::ELF_SHF &Value) { #define BCase(X) IO.bitSetCase(Value, #X, ELF::X); BCase(SHF_WRITE) BCase(SHF_ALLOC) BCase(SHF_EXCLUDE) BCase(SHF_EXECINSTR) BCase(SHF_MERGE) BCase(SHF_STRINGS) BCase(SHF_INFO_LINK) BCase(SHF_LINK_ORDER) BCase(SHF_OS_NONCONFORMING) BCase(SHF_GROUP) BCase(SHF_TLS) #undef BCase } void ScalarEnumerationTraits<ELFYAML::ELF_STT>::enumeration( IO &IO, ELFYAML::ELF_STT &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); ECase(STT_NOTYPE) ECase(STT_OBJECT) ECase(STT_FUNC) ECase(STT_SECTION) ECase(STT_FILE) ECase(STT_COMMON) ECase(STT_TLS) ECase(STT_GNU_IFUNC) #undef ECase } void ScalarEnumerationTraits<ELFYAML::ELF_STV>::enumeration( IO &IO, ELFYAML::ELF_STV &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); ECase(STV_DEFAULT) ECase(STV_INTERNAL) ECase(STV_HIDDEN) ECase(STV_PROTECTED) #undef ECase } void ScalarBitSetTraits<ELFYAML::ELF_STO>::bitset(IO &IO, ELFYAML::ELF_STO &Value) { const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext()); assert(Object && "The IO context is not initialized"); #define BCase(X) IO.bitSetCase(Value, #X, ELF::X); switch (Object->Header.Machine) { case ELF::EM_MIPS: BCase(STO_MIPS_OPTIONAL) BCase(STO_MIPS_PLT) BCase(STO_MIPS_PIC) BCase(STO_MIPS_MICROMIPS) break; default: break; // Nothing to do } #undef BCase #undef BCaseMask } void ScalarEnumerationTraits<ELFYAML::ELF_RSS>::enumeration( IO &IO, ELFYAML::ELF_RSS &Value) { #define ECase(X) IO.enumCase(Value, #X, ELF::X); ECase(RSS_UNDEF) ECase(RSS_GP) ECase(RSS_GP0) ECase(RSS_LOC) #undef ECase } void ScalarEnumerationTraits<ELFYAML::ELF_REL>::enumeration( IO &IO, ELFYAML::ELF_REL &Value) { const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext()); assert(Object && "The IO context is not initialized"); #define ELF_RELOC(X, Y) IO.enumCase(Value, #X, ELF::X); switch (Object->Header.Machine) { case ELF::EM_X86_64: #include "llvm/Support/ELFRelocs/x86_64.def" break; case ELF::EM_MIPS: #include "llvm/Support/ELFRelocs/Mips.def" break; case ELF::EM_HEXAGON: #include "llvm/Support/ELFRelocs/Hexagon.def" break; case ELF::EM_386: #include "llvm/Support/ELFRelocs/i386.def" break; case ELF::EM_AARCH64: #include "llvm/Support/ELFRelocs/AArch64.def" break; case ELF::EM_ARM: #include "llvm/Support/ELFRelocs/ARM.def" break; default: llvm_unreachable("Unsupported architecture"); } #undef ELF_RELOC } void ScalarEnumerationTraits<ELFYAML::MIPS_AFL_REG>::enumeration( IO &IO, ELFYAML::MIPS_AFL_REG &Value) { #define ECase(X) IO.enumCase(Value, #X, Mips::AFL_##X); ECase(REG_NONE) ECase(REG_32) ECase(REG_64) ECase(REG_128) #undef ECase } void ScalarEnumerationTraits<ELFYAML::MIPS_ABI_FP>::enumeration( IO &IO, ELFYAML::MIPS_ABI_FP &Value) { #define ECase(X) IO.enumCase(Value, #X, Mips::Val_GNU_MIPS_ABI_##X); ECase(FP_ANY) ECase(FP_DOUBLE) ECase(FP_SINGLE) ECase(FP_SOFT) ECase(FP_OLD_64) ECase(FP_XX) ECase(FP_64) ECase(FP_64A) #undef ECase } void ScalarEnumerationTraits<ELFYAML::MIPS_AFL_EXT>::enumeration( IO &IO, ELFYAML::MIPS_AFL_EXT &Value) { #define ECase(X) IO.enumCase(Value, #X, Mips::AFL_##X); ECase(EXT_NONE) ECase(EXT_XLR) ECase(EXT_OCTEON2) ECase(EXT_OCTEONP) ECase(EXT_LOONGSON_3A) ECase(EXT_OCTEON) ECase(EXT_5900) ECase(EXT_4650) ECase(EXT_4010) ECase(EXT_4100) ECase(EXT_3900) ECase(EXT_10000) ECase(EXT_SB1) ECase(EXT_4111) ECase(EXT_4120) ECase(EXT_5400) ECase(EXT_5500) ECase(EXT_LOONGSON_2E) ECase(EXT_LOONGSON_2F) ECase(EXT_OCTEON3) #undef ECase } void ScalarEnumerationTraits<ELFYAML::MIPS_ISA>::enumeration( IO &IO, ELFYAML::MIPS_ISA &Value) { IO.enumCase(Value, "MIPS1", 1); IO.enumCase(Value, "MIPS2", 2); IO.enumCase(Value, "MIPS3", 3); IO.enumCase(Value, "MIPS4", 4); IO.enumCase(Value, "MIPS5", 5); IO.enumCase(Value, "MIPS32", 32); IO.enumCase(Value, "MIPS64", 64); } void ScalarBitSetTraits<ELFYAML::MIPS_AFL_ASE>::bitset( IO &IO, ELFYAML::MIPS_AFL_ASE &Value) { #define BCase(X) IO.bitSetCase(Value, #X, Mips::AFL_ASE_##X); BCase(DSP) BCase(DSPR2) BCase(EVA) BCase(MCU) BCase(MDMX) BCase(MIPS3D) BCase(MT) BCase(SMARTMIPS) BCase(VIRT) BCase(MSA) BCase(MIPS16) BCase(MICROMIPS) BCase(XPA) #undef BCase } void ScalarBitSetTraits<ELFYAML::MIPS_AFL_FLAGS1>::bitset( IO &IO, ELFYAML::MIPS_AFL_FLAGS1 &Value) { #define BCase(X) IO.bitSetCase(Value, #X, Mips::AFL_FLAGS1_##X); BCase(ODDSPREG) #undef BCase } void MappingTraits<ELFYAML::FileHeader>::mapping(IO &IO, ELFYAML::FileHeader &FileHdr) { IO.mapRequired("Class", FileHdr.Class); IO.mapRequired("Data", FileHdr.Data); IO.mapOptional("OSABI", FileHdr.OSABI, ELFYAML::ELF_ELFOSABI(0)); IO.mapRequired("Type", FileHdr.Type); IO.mapRequired("Machine", FileHdr.Machine); IO.mapOptional("Flags", FileHdr.Flags, ELFYAML::ELF_EF(0)); IO.mapOptional("Entry", FileHdr.Entry, Hex64(0)); } namespace { struct NormalizedOther { NormalizedOther(IO &) : Visibility(ELFYAML::ELF_STV(0)), Other(ELFYAML::ELF_STO(0)) {} NormalizedOther(IO &, uint8_t Original) : Visibility(Original & 0x3), Other(Original & ~0x3) {} uint8_t denormalize(IO &) { return Visibility | Other; } ELFYAML::ELF_STV Visibility; ELFYAML::ELF_STO Other; }; } void MappingTraits<ELFYAML::Symbol>::mapping(IO &IO, ELFYAML::Symbol &Symbol) { IO.mapOptional("Name", Symbol.Name, StringRef()); IO.mapOptional("Type", Symbol.Type, ELFYAML::ELF_STT(0)); IO.mapOptional("Section", Symbol.Section, StringRef()); IO.mapOptional("Value", Symbol.Value, Hex64(0)); IO.mapOptional("Size", Symbol.Size, Hex64(0)); MappingNormalization<NormalizedOther, uint8_t> Keys(IO, Symbol.Other); IO.mapOptional("Visibility", Keys->Visibility, ELFYAML::ELF_STV(0)); IO.mapOptional("Other", Keys->Other, ELFYAML::ELF_STO(0)); } void MappingTraits<ELFYAML::LocalGlobalWeakSymbols>::mapping( IO &IO, ELFYAML::LocalGlobalWeakSymbols &Symbols) { IO.mapOptional("Local", Symbols.Local); IO.mapOptional("Global", Symbols.Global); IO.mapOptional("Weak", Symbols.Weak); } static void commonSectionMapping(IO &IO, ELFYAML::Section &Section) { IO.mapOptional("Name", Section.Name, StringRef()); IO.mapRequired("Type", Section.Type); IO.mapOptional("Flags", Section.Flags, ELFYAML::ELF_SHF(0)); IO.mapOptional("Address", Section.Address, Hex64(0)); IO.mapOptional("Link", Section.Link, StringRef()); IO.mapOptional("AddressAlign", Section.AddressAlign, Hex64(0)); IO.mapOptional("Info", Section.Info, StringRef()); } static void sectionMapping(IO &IO, ELFYAML::RawContentSection &Section) { commonSectionMapping(IO, Section); IO.mapOptional("Content", Section.Content); IO.mapOptional("Size", Section.Size, Hex64(Section.Content.binary_size())); } static void sectionMapping(IO &IO, ELFYAML::NoBitsSection &Section) { commonSectionMapping(IO, Section); IO.mapOptional("Size", Section.Size, Hex64(0)); } static void sectionMapping(IO &IO, ELFYAML::RelocationSection &Section) { commonSectionMapping(IO, Section); IO.mapOptional("Relocations", Section.Relocations); } static void groupSectionMapping(IO &IO, ELFYAML::Group &group) { commonSectionMapping(IO, group); IO.mapRequired("Members", group.Members); } void MappingTraits<ELFYAML::SectionOrType>::mapping( IO &IO, ELFYAML::SectionOrType &sectionOrType) { IO.mapRequired("SectionOrType", sectionOrType.sectionNameOrType); } static void sectionMapping(IO &IO, ELFYAML::MipsABIFlags &Section) { commonSectionMapping(IO, Section); IO.mapOptional("Version", Section.Version, Hex16(0)); IO.mapRequired("ISA", Section.ISALevel); IO.mapOptional("ISARevision", Section.ISARevision, Hex8(0)); IO.mapOptional("ISAExtension", Section.ISAExtension, ELFYAML::MIPS_AFL_EXT(Mips::AFL_EXT_NONE)); IO.mapOptional("ASEs", Section.ASEs, ELFYAML::MIPS_AFL_ASE(0)); IO.mapOptional("FpABI", Section.FpABI, ELFYAML::MIPS_ABI_FP(Mips::Val_GNU_MIPS_ABI_FP_ANY)); IO.mapOptional("GPRSize", Section.GPRSize, ELFYAML::MIPS_AFL_REG(Mips::AFL_REG_NONE)); IO.mapOptional("CPR1Size", Section.CPR1Size, ELFYAML::MIPS_AFL_REG(Mips::AFL_REG_NONE)); IO.mapOptional("CPR2Size", Section.CPR2Size, ELFYAML::MIPS_AFL_REG(Mips::AFL_REG_NONE)); IO.mapOptional("Flags1", Section.Flags1, ELFYAML::MIPS_AFL_FLAGS1(0)); IO.mapOptional("Flags2", Section.Flags2, Hex32(0)); } void MappingTraits<std::unique_ptr<ELFYAML::Section>>::mapping( IO &IO, std::unique_ptr<ELFYAML::Section> &Section) { ELFYAML::ELF_SHT sectionType; if (IO.outputting()) sectionType = Section->Type; else IO.mapRequired("Type", sectionType); switch (sectionType) { case ELF::SHT_REL: case ELF::SHT_RELA: if (!IO.outputting()) Section.reset(new ELFYAML::RelocationSection()); sectionMapping(IO, *cast<ELFYAML::RelocationSection>(Section.get())); break; case ELF::SHT_GROUP: if (!IO.outputting()) Section.reset(new ELFYAML::Group()); groupSectionMapping(IO, *cast<ELFYAML::Group>(Section.get())); break; case ELF::SHT_NOBITS: if (!IO.outputting()) Section.reset(new ELFYAML::NoBitsSection()); sectionMapping(IO, *cast<ELFYAML::NoBitsSection>(Section.get())); break; case ELF::SHT_MIPS_ABIFLAGS: if (!IO.outputting()) Section.reset(new ELFYAML::MipsABIFlags()); sectionMapping(IO, *cast<ELFYAML::MipsABIFlags>(Section.get())); break; default: if (!IO.outputting()) Section.reset(new ELFYAML::RawContentSection()); sectionMapping(IO, *cast<ELFYAML::RawContentSection>(Section.get())); } } StringRef MappingTraits<std::unique_ptr<ELFYAML::Section>>::validate( IO &io, std::unique_ptr<ELFYAML::Section> &Section) { const auto *RawSection = dyn_cast<ELFYAML::RawContentSection>(Section.get()); if (!RawSection || RawSection->Size >= RawSection->Content.binary_size()) return StringRef(); return "Section size must be greater or equal to the content size"; } namespace { struct NormalizedMips64RelType { NormalizedMips64RelType(IO &) : Type(ELFYAML::ELF_REL(ELF::R_MIPS_NONE)), Type2(ELFYAML::ELF_REL(ELF::R_MIPS_NONE)), Type3(ELFYAML::ELF_REL(ELF::R_MIPS_NONE)), SpecSym(ELFYAML::ELF_REL(ELF::RSS_UNDEF)) {} NormalizedMips64RelType(IO &, ELFYAML::ELF_REL Original) : Type(Original & 0xFF), Type2(Original >> 8 & 0xFF), Type3(Original >> 16 & 0xFF), SpecSym(Original >> 24 & 0xFF) {} ELFYAML::ELF_REL denormalize(IO &) { ELFYAML::ELF_REL Res = Type | Type2 << 8 | Type3 << 16 | SpecSym << 24; return Res; } ELFYAML::ELF_REL Type; ELFYAML::ELF_REL Type2; ELFYAML::ELF_REL Type3; ELFYAML::ELF_RSS SpecSym; }; } void MappingTraits<ELFYAML::Relocation>::mapping(IO &IO, ELFYAML::Relocation &Rel) { const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext()); assert(Object && "The IO context is not initialized"); IO.mapRequired("Offset", Rel.Offset); IO.mapRequired("Symbol", Rel.Symbol); if (Object->Header.Machine == ELFYAML::ELF_EM(ELF::EM_MIPS) && Object->Header.Class == ELFYAML::ELF_ELFCLASS(ELF::ELFCLASS64)) { MappingNormalization<NormalizedMips64RelType, ELFYAML::ELF_REL> Key( IO, Rel.Type); IO.mapRequired("Type", Key->Type); IO.mapOptional("Type2", Key->Type2, ELFYAML::ELF_REL(ELF::R_MIPS_NONE)); IO.mapOptional("Type3", Key->Type3, ELFYAML::ELF_REL(ELF::R_MIPS_NONE)); IO.mapOptional("SpecSym", Key->SpecSym, ELFYAML::ELF_RSS(ELF::RSS_UNDEF)); } else IO.mapRequired("Type", Rel.Type); IO.mapOptional("Addend", Rel.Addend, (int64_t)0); } void MappingTraits<ELFYAML::Object>::mapping(IO &IO, ELFYAML::Object &Object) { assert(!IO.getContext() && "The IO context is initialized already"); IO.setContext(&Object); IO.mapRequired("FileHeader", Object.Header); IO.mapOptional("Sections", Object.Sections); IO.mapOptional("Symbols", Object.Symbols); IO.setContext(nullptr); } LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_AFL_REG) LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_ABI_FP) LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_EXT) LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_ASE) LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_FLAGS1) } // end namespace yaml } // end namespace llvm
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/SymbolicFile.cpp
//===- SymbolicFile.cpp - Interface that only provides symbols --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a file format independent SymbolicFile class. // //===----------------------------------------------------------------------===// #include "llvm/Object/IRObjectFile.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Object/SymbolicFile.h" #include "llvm/Support/MemoryBuffer.h" using namespace llvm; using namespace object; SymbolicFile::SymbolicFile(unsigned int Type, MemoryBufferRef Source) : Binary(Type, Source) {} SymbolicFile::~SymbolicFile() {} ErrorOr<std::unique_ptr<SymbolicFile>> SymbolicFile::createSymbolicFile( MemoryBufferRef Object, sys::fs::file_magic Type, LLVMContext *Context) { StringRef Data = Object.getBuffer(); if (Type == sys::fs::file_magic::unknown) Type = sys::fs::identify_magic(Data); switch (Type) { case sys::fs::file_magic::bitcode: if (Context) return IRObjectFile::create(Object, *Context); // Fallthrough case sys::fs::file_magic::unknown: case sys::fs::file_magic::archive: case sys::fs::file_magic::macho_universal_binary: case sys::fs::file_magic::windows_resource: return object_error::invalid_file_type; case sys::fs::file_magic::elf: case sys::fs::file_magic::elf_executable: case sys::fs::file_magic::elf_shared_object: case sys::fs::file_magic::elf_core: case sys::fs::file_magic::macho_executable: case sys::fs::file_magic::macho_fixed_virtual_memory_shared_lib: case sys::fs::file_magic::macho_core: case sys::fs::file_magic::macho_preload_executable: case sys::fs::file_magic::macho_dynamically_linked_shared_lib: case sys::fs::file_magic::macho_dynamic_linker: case sys::fs::file_magic::macho_bundle: case sys::fs::file_magic::macho_dynamically_linked_shared_lib_stub: case sys::fs::file_magic::macho_dsym_companion: case sys::fs::file_magic::macho_kext_bundle: case sys::fs::file_magic::coff_import_library: case sys::fs::file_magic::pecoff_executable: return ObjectFile::createObjectFile(Object, Type); case sys::fs::file_magic::elf_relocatable: case sys::fs::file_magic::macho_object: case sys::fs::file_magic::coff_object: { ErrorOr<std::unique_ptr<ObjectFile>> Obj = ObjectFile::createObjectFile(Object, Type); if (!Obj || !Context) return std::move(Obj); ErrorOr<MemoryBufferRef> BCData = IRObjectFile::findBitcodeInObject(*Obj->get()); if (!BCData) return std::move(Obj); return IRObjectFile::create( MemoryBufferRef(BCData->getBuffer(), Object.getBufferIdentifier()), *Context); } } llvm_unreachable("Unexpected Binary File Type"); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/COFFYAML.cpp
//===- COFFYAML.cpp - COFF YAMLIO implementation --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines classes for handling the YAML representation of COFF. // //===----------------------------------------------------------------------===// #include "llvm/Object/COFFYAML.h" #define ECase(X) IO.enumCase(Value, #X, COFF::X); namespace llvm { namespace COFFYAML { Section::Section() { memset(&Header, 0, sizeof(COFF::section)); } Symbol::Symbol() { memset(&Header, 0, sizeof(COFF::symbol)); } Object::Object() { memset(&Header, 0, sizeof(COFF::header)); } } namespace yaml { void ScalarEnumerationTraits<COFFYAML::COMDATType>::enumeration( IO &IO, COFFYAML::COMDATType &Value) { IO.enumCase(Value, "0", 0); ECase(IMAGE_COMDAT_SELECT_NODUPLICATES); ECase(IMAGE_COMDAT_SELECT_ANY); ECase(IMAGE_COMDAT_SELECT_SAME_SIZE); ECase(IMAGE_COMDAT_SELECT_EXACT_MATCH); ECase(IMAGE_COMDAT_SELECT_ASSOCIATIVE); ECase(IMAGE_COMDAT_SELECT_LARGEST); ECase(IMAGE_COMDAT_SELECT_NEWEST); } void ScalarEnumerationTraits<COFFYAML::WeakExternalCharacteristics>::enumeration( IO &IO, COFFYAML::WeakExternalCharacteristics &Value) { IO.enumCase(Value, "0", 0); ECase(IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY); ECase(IMAGE_WEAK_EXTERN_SEARCH_LIBRARY); ECase(IMAGE_WEAK_EXTERN_SEARCH_ALIAS); } void ScalarEnumerationTraits<COFFYAML::AuxSymbolType>::enumeration( IO &IO, COFFYAML::AuxSymbolType &Value) { ECase(IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF); } void ScalarEnumerationTraits<COFF::MachineTypes>::enumeration( IO &IO, COFF::MachineTypes &Value) { ECase(IMAGE_FILE_MACHINE_UNKNOWN); ECase(IMAGE_FILE_MACHINE_AM33); ECase(IMAGE_FILE_MACHINE_AMD64); ECase(IMAGE_FILE_MACHINE_ARM); ECase(IMAGE_FILE_MACHINE_ARMNT); ECase(IMAGE_FILE_MACHINE_EBC); ECase(IMAGE_FILE_MACHINE_I386); ECase(IMAGE_FILE_MACHINE_IA64); ECase(IMAGE_FILE_MACHINE_M32R); ECase(IMAGE_FILE_MACHINE_MIPS16); ECase(IMAGE_FILE_MACHINE_MIPSFPU); ECase(IMAGE_FILE_MACHINE_MIPSFPU16); ECase(IMAGE_FILE_MACHINE_POWERPC); ECase(IMAGE_FILE_MACHINE_POWERPCFP); ECase(IMAGE_FILE_MACHINE_R4000); ECase(IMAGE_FILE_MACHINE_SH3); ECase(IMAGE_FILE_MACHINE_SH3DSP); ECase(IMAGE_FILE_MACHINE_SH4); ECase(IMAGE_FILE_MACHINE_SH5); ECase(IMAGE_FILE_MACHINE_THUMB); ECase(IMAGE_FILE_MACHINE_WCEMIPSV2); } void ScalarEnumerationTraits<COFF::SymbolBaseType>::enumeration( IO &IO, COFF::SymbolBaseType &Value) { ECase(IMAGE_SYM_TYPE_NULL); ECase(IMAGE_SYM_TYPE_VOID); ECase(IMAGE_SYM_TYPE_CHAR); ECase(IMAGE_SYM_TYPE_SHORT); ECase(IMAGE_SYM_TYPE_INT); ECase(IMAGE_SYM_TYPE_LONG); ECase(IMAGE_SYM_TYPE_FLOAT); ECase(IMAGE_SYM_TYPE_DOUBLE); ECase(IMAGE_SYM_TYPE_STRUCT); ECase(IMAGE_SYM_TYPE_UNION); ECase(IMAGE_SYM_TYPE_ENUM); ECase(IMAGE_SYM_TYPE_MOE); ECase(IMAGE_SYM_TYPE_BYTE); ECase(IMAGE_SYM_TYPE_WORD); ECase(IMAGE_SYM_TYPE_UINT); ECase(IMAGE_SYM_TYPE_DWORD); } void ScalarEnumerationTraits<COFF::SymbolStorageClass>::enumeration( IO &IO, COFF::SymbolStorageClass &Value) { ECase(IMAGE_SYM_CLASS_END_OF_FUNCTION); ECase(IMAGE_SYM_CLASS_NULL); ECase(IMAGE_SYM_CLASS_AUTOMATIC); ECase(IMAGE_SYM_CLASS_EXTERNAL); ECase(IMAGE_SYM_CLASS_STATIC); ECase(IMAGE_SYM_CLASS_REGISTER); ECase(IMAGE_SYM_CLASS_EXTERNAL_DEF); ECase(IMAGE_SYM_CLASS_LABEL); ECase(IMAGE_SYM_CLASS_UNDEFINED_LABEL); ECase(IMAGE_SYM_CLASS_MEMBER_OF_STRUCT); ECase(IMAGE_SYM_CLASS_ARGUMENT); ECase(IMAGE_SYM_CLASS_STRUCT_TAG); ECase(IMAGE_SYM_CLASS_MEMBER_OF_UNION); ECase(IMAGE_SYM_CLASS_UNION_TAG); ECase(IMAGE_SYM_CLASS_TYPE_DEFINITION); ECase(IMAGE_SYM_CLASS_UNDEFINED_STATIC); ECase(IMAGE_SYM_CLASS_ENUM_TAG); ECase(IMAGE_SYM_CLASS_MEMBER_OF_ENUM); ECase(IMAGE_SYM_CLASS_REGISTER_PARAM); ECase(IMAGE_SYM_CLASS_BIT_FIELD); ECase(IMAGE_SYM_CLASS_BLOCK); ECase(IMAGE_SYM_CLASS_FUNCTION); ECase(IMAGE_SYM_CLASS_END_OF_STRUCT); ECase(IMAGE_SYM_CLASS_FILE); ECase(IMAGE_SYM_CLASS_SECTION); ECase(IMAGE_SYM_CLASS_WEAK_EXTERNAL); ECase(IMAGE_SYM_CLASS_CLR_TOKEN); } void ScalarEnumerationTraits<COFF::SymbolComplexType>::enumeration( IO &IO, COFF::SymbolComplexType &Value) { ECase(IMAGE_SYM_DTYPE_NULL); ECase(IMAGE_SYM_DTYPE_POINTER); ECase(IMAGE_SYM_DTYPE_FUNCTION); ECase(IMAGE_SYM_DTYPE_ARRAY); } void ScalarEnumerationTraits<COFF::RelocationTypeI386>::enumeration( IO &IO, COFF::RelocationTypeI386 &Value) { ECase(IMAGE_REL_I386_ABSOLUTE); ECase(IMAGE_REL_I386_DIR16); ECase(IMAGE_REL_I386_REL16); ECase(IMAGE_REL_I386_DIR32); ECase(IMAGE_REL_I386_DIR32NB); ECase(IMAGE_REL_I386_SEG12); ECase(IMAGE_REL_I386_SECTION); ECase(IMAGE_REL_I386_SECREL); ECase(IMAGE_REL_I386_TOKEN); ECase(IMAGE_REL_I386_SECREL7); ECase(IMAGE_REL_I386_REL32); } void ScalarEnumerationTraits<COFF::RelocationTypeAMD64>::enumeration( IO &IO, COFF::RelocationTypeAMD64 &Value) { ECase(IMAGE_REL_AMD64_ABSOLUTE); ECase(IMAGE_REL_AMD64_ADDR64); ECase(IMAGE_REL_AMD64_ADDR32); ECase(IMAGE_REL_AMD64_ADDR32NB); ECase(IMAGE_REL_AMD64_REL32); ECase(IMAGE_REL_AMD64_REL32_1); ECase(IMAGE_REL_AMD64_REL32_2); ECase(IMAGE_REL_AMD64_REL32_3); ECase(IMAGE_REL_AMD64_REL32_4); ECase(IMAGE_REL_AMD64_REL32_5); ECase(IMAGE_REL_AMD64_SECTION); ECase(IMAGE_REL_AMD64_SECREL); ECase(IMAGE_REL_AMD64_SECREL7); ECase(IMAGE_REL_AMD64_TOKEN); ECase(IMAGE_REL_AMD64_SREL32); ECase(IMAGE_REL_AMD64_PAIR); ECase(IMAGE_REL_AMD64_SSPAN32); } void ScalarEnumerationTraits<COFF::WindowsSubsystem>::enumeration( IO &IO, COFF::WindowsSubsystem &Value) { ECase(IMAGE_SUBSYSTEM_UNKNOWN); ECase(IMAGE_SUBSYSTEM_NATIVE); ECase(IMAGE_SUBSYSTEM_WINDOWS_GUI); ECase(IMAGE_SUBSYSTEM_WINDOWS_CUI); ECase(IMAGE_SUBSYSTEM_OS2_CUI); ECase(IMAGE_SUBSYSTEM_POSIX_CUI); ECase(IMAGE_SUBSYSTEM_NATIVE_WINDOWS); ECase(IMAGE_SUBSYSTEM_WINDOWS_CE_GUI); ECase(IMAGE_SUBSYSTEM_EFI_APPLICATION); ECase(IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER); ECase(IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER); ECase(IMAGE_SUBSYSTEM_EFI_ROM); ECase(IMAGE_SUBSYSTEM_XBOX); ECase(IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION); } #undef ECase #define BCase(X) IO.bitSetCase(Value, #X, COFF::X); void ScalarBitSetTraits<COFF::Characteristics>::bitset( IO &IO, COFF::Characteristics &Value) { BCase(IMAGE_FILE_RELOCS_STRIPPED); BCase(IMAGE_FILE_EXECUTABLE_IMAGE); BCase(IMAGE_FILE_LINE_NUMS_STRIPPED); BCase(IMAGE_FILE_LOCAL_SYMS_STRIPPED); BCase(IMAGE_FILE_AGGRESSIVE_WS_TRIM); BCase(IMAGE_FILE_LARGE_ADDRESS_AWARE); BCase(IMAGE_FILE_BYTES_REVERSED_LO); BCase(IMAGE_FILE_32BIT_MACHINE); BCase(IMAGE_FILE_DEBUG_STRIPPED); BCase(IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP); BCase(IMAGE_FILE_NET_RUN_FROM_SWAP); BCase(IMAGE_FILE_SYSTEM); BCase(IMAGE_FILE_DLL); BCase(IMAGE_FILE_UP_SYSTEM_ONLY); BCase(IMAGE_FILE_BYTES_REVERSED_HI); } void ScalarBitSetTraits<COFF::SectionCharacteristics>::bitset( IO &IO, COFF::SectionCharacteristics &Value) { BCase(IMAGE_SCN_TYPE_NO_PAD); BCase(IMAGE_SCN_CNT_CODE); BCase(IMAGE_SCN_CNT_INITIALIZED_DATA); BCase(IMAGE_SCN_CNT_UNINITIALIZED_DATA); BCase(IMAGE_SCN_LNK_OTHER); BCase(IMAGE_SCN_LNK_INFO); BCase(IMAGE_SCN_LNK_REMOVE); BCase(IMAGE_SCN_LNK_COMDAT); BCase(IMAGE_SCN_GPREL); BCase(IMAGE_SCN_MEM_PURGEABLE); BCase(IMAGE_SCN_MEM_16BIT); BCase(IMAGE_SCN_MEM_LOCKED); BCase(IMAGE_SCN_MEM_PRELOAD); BCase(IMAGE_SCN_LNK_NRELOC_OVFL); BCase(IMAGE_SCN_MEM_DISCARDABLE); BCase(IMAGE_SCN_MEM_NOT_CACHED); BCase(IMAGE_SCN_MEM_NOT_PAGED); BCase(IMAGE_SCN_MEM_SHARED); BCase(IMAGE_SCN_MEM_EXECUTE); BCase(IMAGE_SCN_MEM_READ); BCase(IMAGE_SCN_MEM_WRITE); } void ScalarBitSetTraits<COFF::DLLCharacteristics>::bitset( IO &IO, COFF::DLLCharacteristics &Value) { BCase(IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA); BCase(IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE); BCase(IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY); BCase(IMAGE_DLL_CHARACTERISTICS_NX_COMPAT); BCase(IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION); BCase(IMAGE_DLL_CHARACTERISTICS_NO_SEH); BCase(IMAGE_DLL_CHARACTERISTICS_NO_BIND); BCase(IMAGE_DLL_CHARACTERISTICS_APPCONTAINER); BCase(IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER); BCase(IMAGE_DLL_CHARACTERISTICS_GUARD_CF); BCase(IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE); } #undef BCase namespace { struct NSectionSelectionType { NSectionSelectionType(IO &) : SelectionType(COFFYAML::COMDATType(0)) {} NSectionSelectionType(IO &, uint8_t C) : SelectionType(COFFYAML::COMDATType(C)) {} uint8_t denormalize(IO &) { return SelectionType; } COFFYAML::COMDATType SelectionType; }; struct NWeakExternalCharacteristics { NWeakExternalCharacteristics(IO &) : Characteristics(COFFYAML::WeakExternalCharacteristics(0)) {} NWeakExternalCharacteristics(IO &, uint32_t C) : Characteristics(COFFYAML::WeakExternalCharacteristics(C)) {} uint32_t denormalize(IO &) { return Characteristics; } COFFYAML::WeakExternalCharacteristics Characteristics; }; struct NSectionCharacteristics { NSectionCharacteristics(IO &) : Characteristics(COFF::SectionCharacteristics(0)) {} NSectionCharacteristics(IO &, uint32_t C) : Characteristics(COFF::SectionCharacteristics(C)) {} uint32_t denormalize(IO &) { return Characteristics; } COFF::SectionCharacteristics Characteristics; }; struct NAuxTokenType { NAuxTokenType(IO &) : AuxType(COFFYAML::AuxSymbolType(0)) {} NAuxTokenType(IO &, uint8_t C) : AuxType(COFFYAML::AuxSymbolType(C)) {} uint32_t denormalize(IO &) { return AuxType; } COFFYAML::AuxSymbolType AuxType; }; struct NStorageClass { NStorageClass(IO &) : StorageClass(COFF::SymbolStorageClass(0)) {} NStorageClass(IO &, uint8_t S) : StorageClass(COFF::SymbolStorageClass(S)) {} uint8_t denormalize(IO &) { return StorageClass; } COFF::SymbolStorageClass StorageClass; }; struct NMachine { NMachine(IO &) : Machine(COFF::MachineTypes(0)) {} NMachine(IO &, uint16_t M) : Machine(COFF::MachineTypes(M)) {} uint16_t denormalize(IO &) { return Machine; } COFF::MachineTypes Machine; }; struct NHeaderCharacteristics { NHeaderCharacteristics(IO &) : Characteristics(COFF::Characteristics(0)) {} NHeaderCharacteristics(IO &, uint16_t C) : Characteristics(COFF::Characteristics(C)) {} uint16_t denormalize(IO &) { return Characteristics; } COFF::Characteristics Characteristics; }; template <typename RelocType> struct NType { NType(IO &) : Type(RelocType(0)) {} NType(IO &, uint16_t T) : Type(RelocType(T)) {} uint16_t denormalize(IO &) { return Type; } RelocType Type; }; struct NWindowsSubsystem { NWindowsSubsystem(IO &) : Subsystem(COFF::WindowsSubsystem(0)) {} NWindowsSubsystem(IO &, uint16_t C) : Subsystem(COFF::WindowsSubsystem(C)) {} uint16_t denormalize(IO &) { return Subsystem; } COFF::WindowsSubsystem Subsystem; }; struct NDLLCharacteristics { NDLLCharacteristics(IO &) : Characteristics(COFF::DLLCharacteristics(0)) {} NDLLCharacteristics(IO &, uint16_t C) : Characteristics(COFF::DLLCharacteristics(C)) {} uint16_t denormalize(IO &) { return Characteristics; } COFF::DLLCharacteristics Characteristics; }; } void MappingTraits<COFFYAML::Relocation>::mapping(IO &IO, COFFYAML::Relocation &Rel) { IO.mapRequired("VirtualAddress", Rel.VirtualAddress); IO.mapRequired("SymbolName", Rel.SymbolName); COFF::header &H = *static_cast<COFF::header *>(IO.getContext()); if (H.Machine == COFF::IMAGE_FILE_MACHINE_I386) { MappingNormalization<NType<COFF::RelocationTypeI386>, uint16_t> NT( IO, Rel.Type); IO.mapRequired("Type", NT->Type); } else if (H.Machine == COFF::IMAGE_FILE_MACHINE_AMD64) { MappingNormalization<NType<COFF::RelocationTypeAMD64>, uint16_t> NT( IO, Rel.Type); IO.mapRequired("Type", NT->Type); } else { IO.mapRequired("Type", Rel.Type); } } void MappingTraits<COFF::DataDirectory>::mapping(IO &IO, COFF::DataDirectory &DD) { IO.mapRequired("RelativeVirtualAddress", DD.RelativeVirtualAddress); IO.mapRequired("Size", DD.Size); } void MappingTraits<COFFYAML::PEHeader>::mapping(IO &IO, COFFYAML::PEHeader &PH) { MappingNormalization<NWindowsSubsystem, uint16_t> NWS(IO, PH.Header.Subsystem); MappingNormalization<NDLLCharacteristics, uint16_t> NDC( IO, PH.Header.DLLCharacteristics); IO.mapRequired("AddressOfEntryPoint", PH.Header.AddressOfEntryPoint); IO.mapRequired("ImageBase", PH.Header.ImageBase); IO.mapRequired("SectionAlignment", PH.Header.SectionAlignment); IO.mapRequired("FileAlignment", PH.Header.FileAlignment); IO.mapRequired("MajorOperatingSystemVersion", PH.Header.MajorOperatingSystemVersion); IO.mapRequired("MinorOperatingSystemVersion", PH.Header.MinorOperatingSystemVersion); IO.mapRequired("MajorImageVersion", PH.Header.MajorImageVersion); IO.mapRequired("MinorImageVersion", PH.Header.MinorImageVersion); IO.mapRequired("MajorSubsystemVersion", PH.Header.MajorSubsystemVersion); IO.mapRequired("MinorSubsystemVersion", PH.Header.MinorSubsystemVersion); IO.mapRequired("Subsystem", NWS->Subsystem); IO.mapRequired("DLLCharacteristics", NDC->Characteristics); IO.mapRequired("SizeOfStackReserve", PH.Header.SizeOfStackReserve); IO.mapRequired("SizeOfStackCommit", PH.Header.SizeOfStackCommit); IO.mapRequired("SizeOfHeapReserve", PH.Header.SizeOfHeapReserve); IO.mapRequired("SizeOfHeapCommit", PH.Header.SizeOfHeapCommit); IO.mapOptional("ExportTable", PH.DataDirectories[COFF::EXPORT_TABLE]); IO.mapOptional("ImportTable", PH.DataDirectories[COFF::IMPORT_TABLE]); IO.mapOptional("ResourceTable", PH.DataDirectories[COFF::RESOURCE_TABLE]); IO.mapOptional("ExceptionTable", PH.DataDirectories[COFF::EXCEPTION_TABLE]); IO.mapOptional("CertificateTable", PH.DataDirectories[COFF::CERTIFICATE_TABLE]); IO.mapOptional("BaseRelocationTable", PH.DataDirectories[COFF::BASE_RELOCATION_TABLE]); IO.mapOptional("Debug", PH.DataDirectories[COFF::DEBUG]); IO.mapOptional("Architecture", PH.DataDirectories[COFF::ARCHITECTURE]); IO.mapOptional("GlobalPtr", PH.DataDirectories[COFF::GLOBAL_PTR]); IO.mapOptional("TlsTable", PH.DataDirectories[COFF::TLS_TABLE]); IO.mapOptional("LoadConfigTable", PH.DataDirectories[COFF::LOAD_CONFIG_TABLE]); IO.mapOptional("BoundImport", PH.DataDirectories[COFF::BOUND_IMPORT]); IO.mapOptional("IAT", PH.DataDirectories[COFF::IAT]); IO.mapOptional("DelayImportDescriptor", PH.DataDirectories[COFF::DELAY_IMPORT_DESCRIPTOR]); IO.mapOptional("ClrRuntimeHeader", PH.DataDirectories[COFF::CLR_RUNTIME_HEADER]); } void MappingTraits<COFF::header>::mapping(IO &IO, COFF::header &H) { MappingNormalization<NMachine, uint16_t> NM(IO, H.Machine); MappingNormalization<NHeaderCharacteristics, uint16_t> NC(IO, H.Characteristics); IO.mapRequired("Machine", NM->Machine); IO.mapOptional("Characteristics", NC->Characteristics); IO.setContext(static_cast<void *>(&H)); } void MappingTraits<COFF::AuxiliaryFunctionDefinition>::mapping( IO &IO, COFF::AuxiliaryFunctionDefinition &AFD) { IO.mapRequired("TagIndex", AFD.TagIndex); IO.mapRequired("TotalSize", AFD.TotalSize); IO.mapRequired("PointerToLinenumber", AFD.PointerToLinenumber); IO.mapRequired("PointerToNextFunction", AFD.PointerToNextFunction); } void MappingTraits<COFF::AuxiliarybfAndefSymbol>::mapping( IO &IO, COFF::AuxiliarybfAndefSymbol &AAS) { IO.mapRequired("Linenumber", AAS.Linenumber); IO.mapRequired("PointerToNextFunction", AAS.PointerToNextFunction); } void MappingTraits<COFF::AuxiliaryWeakExternal>::mapping( IO &IO, COFF::AuxiliaryWeakExternal &AWE) { MappingNormalization<NWeakExternalCharacteristics, uint32_t> NWEC( IO, AWE.Characteristics); IO.mapRequired("TagIndex", AWE.TagIndex); IO.mapRequired("Characteristics", NWEC->Characteristics); } void MappingTraits<COFF::AuxiliarySectionDefinition>::mapping( IO &IO, COFF::AuxiliarySectionDefinition &ASD) { MappingNormalization<NSectionSelectionType, uint8_t> NSST( IO, ASD.Selection); IO.mapRequired("Length", ASD.Length); IO.mapRequired("NumberOfRelocations", ASD.NumberOfRelocations); IO.mapRequired("NumberOfLinenumbers", ASD.NumberOfLinenumbers); IO.mapRequired("CheckSum", ASD.CheckSum); IO.mapRequired("Number", ASD.Number); IO.mapOptional("Selection", NSST->SelectionType, COFFYAML::COMDATType(0)); } void MappingTraits<COFF::AuxiliaryCLRToken>::mapping( IO &IO, COFF::AuxiliaryCLRToken &ACT) { MappingNormalization<NAuxTokenType, uint8_t> NATT(IO, ACT.AuxType); IO.mapRequired("AuxType", NATT->AuxType); IO.mapRequired("SymbolTableIndex", ACT.SymbolTableIndex); } void MappingTraits<COFFYAML::Symbol>::mapping(IO &IO, COFFYAML::Symbol &S) { MappingNormalization<NStorageClass, uint8_t> NS(IO, S.Header.StorageClass); IO.mapRequired("Name", S.Name); IO.mapRequired("Value", S.Header.Value); IO.mapRequired("SectionNumber", S.Header.SectionNumber); IO.mapRequired("SimpleType", S.SimpleType); IO.mapRequired("ComplexType", S.ComplexType); IO.mapRequired("StorageClass", NS->StorageClass); IO.mapOptional("FunctionDefinition", S.FunctionDefinition); IO.mapOptional("bfAndefSymbol", S.bfAndefSymbol); IO.mapOptional("WeakExternal", S.WeakExternal); IO.mapOptional("File", S.File, StringRef()); IO.mapOptional("SectionDefinition", S.SectionDefinition); IO.mapOptional("CLRToken", S.CLRToken); } void MappingTraits<COFFYAML::Section>::mapping(IO &IO, COFFYAML::Section &Sec) { MappingNormalization<NSectionCharacteristics, uint32_t> NC( IO, Sec.Header.Characteristics); IO.mapRequired("Name", Sec.Name); IO.mapRequired("Characteristics", NC->Characteristics); IO.mapOptional("VirtualAddress", Sec.Header.VirtualAddress, 0U); IO.mapOptional("VirtualSize", Sec.Header.VirtualSize, 0U); IO.mapOptional("Alignment", Sec.Alignment); IO.mapRequired("SectionData", Sec.SectionData); IO.mapOptional("Relocations", Sec.Relocations); } void MappingTraits<COFFYAML::Object>::mapping(IO &IO, COFFYAML::Object &Obj) { IO.mapOptional("OptionalHeader", Obj.OptionalHeader); IO.mapRequired("header", Obj.Header); IO.mapRequired("sections", Obj.Sections); IO.mapRequired("symbols", Obj.Symbols); } } }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/Object/COFFObjectFile.cpp
//===- COFFObjectFile.cpp - COFF object file implementation -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the COFFObjectFile class. // //===----------------------------------------------------------------------===// #include "llvm/Object/COFF.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/COFF.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <cctype> #include <limits> using namespace llvm; using namespace object; using support::ulittle16_t; using support::ulittle32_t; using support::ulittle64_t; using support::little16_t; // Returns false if size is greater than the buffer size. And sets ec. static bool checkSize(MemoryBufferRef M, std::error_code &EC, uint64_t Size) { if (M.getBufferSize() < Size) { EC = object_error::unexpected_eof; return false; } return true; } static std::error_code checkOffset(MemoryBufferRef M, uintptr_t Addr, const uint64_t Size) { if (Addr + Size < Addr || Addr + Size < Size || Addr + Size > uintptr_t(M.getBufferEnd()) || Addr < uintptr_t(M.getBufferStart())) { return object_error::unexpected_eof; } return std::error_code(); } // Sets Obj unless any bytes in [addr, addr + size) fall outsize of m. // Returns unexpected_eof if error. template <typename T> static std::error_code getObject(const T *&Obj, MemoryBufferRef M, const void *Ptr, const uint64_t Size = sizeof(T)) { uintptr_t Addr = uintptr_t(Ptr); if (std::error_code EC = checkOffset(M, Addr, Size)) return EC; Obj = reinterpret_cast<const T *>(Addr); return std::error_code(); } // Decode a string table entry in base 64 (//AAAAAA). Expects \arg Str without // prefixed slashes. static bool decodeBase64StringEntry(StringRef Str, uint32_t &Result) { assert(Str.size() <= 6 && "String too long, possible overflow."); if (Str.size() > 6) return true; uint64_t Value = 0; while (!Str.empty()) { unsigned CharVal; if (Str[0] >= 'A' && Str[0] <= 'Z') // 0..25 CharVal = Str[0] - 'A'; else if (Str[0] >= 'a' && Str[0] <= 'z') // 26..51 CharVal = Str[0] - 'a' + 26; else if (Str[0] >= '0' && Str[0] <= '9') // 52..61 CharVal = Str[0] - '0' + 52; else if (Str[0] == '+') // 62 CharVal = 62; else if (Str[0] == '/') // 63 CharVal = 63; else return true; Value = (Value * 64) + CharVal; Str = Str.substr(1); } if (Value > std::numeric_limits<uint32_t>::max()) return true; Result = static_cast<uint32_t>(Value); return false; } template <typename coff_symbol_type> const coff_symbol_type *COFFObjectFile::toSymb(DataRefImpl Ref) const { const coff_symbol_type *Addr = reinterpret_cast<const coff_symbol_type *>(Ref.p); assert(!checkOffset(Data, uintptr_t(Addr), sizeof(*Addr))); #ifndef NDEBUG // Verify that the symbol points to a valid entry in the symbol table. uintptr_t Offset = uintptr_t(Addr) - uintptr_t(base()); assert((Offset - getPointerToSymbolTable()) % sizeof(coff_symbol_type) == 0 && "Symbol did not point to the beginning of a symbol"); #endif return Addr; } const coff_section *COFFObjectFile::toSec(DataRefImpl Ref) const { const coff_section *Addr = reinterpret_cast<const coff_section*>(Ref.p); # ifndef NDEBUG // Verify that the section points to a valid entry in the section table. if (Addr < SectionTable || Addr >= (SectionTable + getNumberOfSections())) report_fatal_error("Section was outside of section table."); uintptr_t Offset = uintptr_t(Addr) - uintptr_t(SectionTable); assert(Offset % sizeof(coff_section) == 0 && "Section did not point to the beginning of a section"); # endif return Addr; } void COFFObjectFile::moveSymbolNext(DataRefImpl &Ref) const { auto End = reinterpret_cast<uintptr_t>(StringTable); if (SymbolTable16) { const coff_symbol16 *Symb = toSymb<coff_symbol16>(Ref); Symb += 1 + Symb->NumberOfAuxSymbols; Ref.p = std::min(reinterpret_cast<uintptr_t>(Symb), End); } else if (SymbolTable32) { const coff_symbol32 *Symb = toSymb<coff_symbol32>(Ref); Symb += 1 + Symb->NumberOfAuxSymbols; Ref.p = std::min(reinterpret_cast<uintptr_t>(Symb), End); } else { llvm_unreachable("no symbol table pointer!"); } } ErrorOr<StringRef> COFFObjectFile::getSymbolName(DataRefImpl Ref) const { COFFSymbolRef Symb = getCOFFSymbol(Ref); StringRef Result; std::error_code EC = getSymbolName(Symb, Result); if (EC) return EC; return Result; } uint64_t COFFObjectFile::getSymbolValueImpl(DataRefImpl Ref) const { return getCOFFSymbol(Ref).getValue(); } ErrorOr<uint64_t> COFFObjectFile::getSymbolAddress(DataRefImpl Ref) const { uint64_t Result = getSymbolValue(Ref); COFFSymbolRef Symb = getCOFFSymbol(Ref); int32_t SectionNumber = Symb.getSectionNumber(); if (Symb.isAnyUndefined() || Symb.isCommon() || COFF::isReservedSectionNumber(SectionNumber)) return Result; const coff_section *Section = nullptr; if (std::error_code EC = getSection(SectionNumber, Section)) return EC; Result += Section->VirtualAddress; return Result; } SymbolRef::Type COFFObjectFile::getSymbolType(DataRefImpl Ref) const { COFFSymbolRef Symb = getCOFFSymbol(Ref); int32_t SectionNumber = Symb.getSectionNumber(); if (Symb.isAnyUndefined()) return SymbolRef::ST_Unknown; if (Symb.isFunctionDefinition()) return SymbolRef::ST_Function; if (Symb.isCommon()) return SymbolRef::ST_Data; if (Symb.isFileRecord()) return SymbolRef::ST_File; // TODO: perhaps we need a new symbol type ST_Section. if (SectionNumber == COFF::IMAGE_SYM_DEBUG || Symb.isSectionDefinition()) return SymbolRef::ST_Debug; if (!COFF::isReservedSectionNumber(SectionNumber)) return SymbolRef::ST_Data; return SymbolRef::ST_Other; } uint32_t COFFObjectFile::getSymbolFlags(DataRefImpl Ref) const { COFFSymbolRef Symb = getCOFFSymbol(Ref); uint32_t Result = SymbolRef::SF_None; if (Symb.isExternal() || Symb.isWeakExternal()) Result |= SymbolRef::SF_Global; if (Symb.isWeakExternal()) Result |= SymbolRef::SF_Weak; if (Symb.getSectionNumber() == COFF::IMAGE_SYM_ABSOLUTE) Result |= SymbolRef::SF_Absolute; if (Symb.isFileRecord()) Result |= SymbolRef::SF_FormatSpecific; if (Symb.isSectionDefinition()) Result |= SymbolRef::SF_FormatSpecific; if (Symb.isCommon()) Result |= SymbolRef::SF_Common; if (Symb.isAnyUndefined()) Result |= SymbolRef::SF_Undefined; return Result; } uint64_t COFFObjectFile::getCommonSymbolSizeImpl(DataRefImpl Ref) const { COFFSymbolRef Symb = getCOFFSymbol(Ref); return Symb.getValue(); } std::error_code COFFObjectFile::getSymbolSection(DataRefImpl Ref, section_iterator &Result) const { COFFSymbolRef Symb = getCOFFSymbol(Ref); if (COFF::isReservedSectionNumber(Symb.getSectionNumber())) { Result = section_end(); } else { const coff_section *Sec = nullptr; if (std::error_code EC = getSection(Symb.getSectionNumber(), Sec)) return EC; DataRefImpl Ref; Ref.p = reinterpret_cast<uintptr_t>(Sec); Result = section_iterator(SectionRef(Ref, this)); } return std::error_code(); } unsigned COFFObjectFile::getSymbolSectionID(SymbolRef Sym) const { COFFSymbolRef Symb = getCOFFSymbol(Sym.getRawDataRefImpl()); return Symb.getSectionNumber(); } void COFFObjectFile::moveSectionNext(DataRefImpl &Ref) const { const coff_section *Sec = toSec(Ref); Sec += 1; Ref.p = reinterpret_cast<uintptr_t>(Sec); } std::error_code COFFObjectFile::getSectionName(DataRefImpl Ref, StringRef &Result) const { const coff_section *Sec = toSec(Ref); return getSectionName(Sec, Result); } uint64_t COFFObjectFile::getSectionAddress(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); return Sec->VirtualAddress; } uint64_t COFFObjectFile::getSectionSize(DataRefImpl Ref) const { return getSectionSize(toSec(Ref)); } std::error_code COFFObjectFile::getSectionContents(DataRefImpl Ref, StringRef &Result) const { const coff_section *Sec = toSec(Ref); ArrayRef<uint8_t> Res; std::error_code EC = getSectionContents(Sec, Res); Result = StringRef(reinterpret_cast<const char*>(Res.data()), Res.size()); return EC; } uint64_t COFFObjectFile::getSectionAlignment(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); return uint64_t(1) << (((Sec->Characteristics & 0x00F00000) >> 20) - 1); } bool COFFObjectFile::isSectionText(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); return Sec->Characteristics & COFF::IMAGE_SCN_CNT_CODE; } bool COFFObjectFile::isSectionData(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); return Sec->Characteristics & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA; } bool COFFObjectFile::isSectionBSS(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); const uint32_t BssFlags = COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA | COFF::IMAGE_SCN_MEM_READ | COFF::IMAGE_SCN_MEM_WRITE; return (Sec->Characteristics & BssFlags) == BssFlags; } unsigned COFFObjectFile::getSectionID(SectionRef Sec) const { uintptr_t Offset = uintptr_t(Sec.getRawDataRefImpl().p) - uintptr_t(SectionTable); assert((Offset % sizeof(coff_section)) == 0); return (Offset / sizeof(coff_section)) + 1; } bool COFFObjectFile::isSectionVirtual(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); // In COFF, a virtual section won't have any in-file // content, so the file pointer to the content will be zero. return Sec->PointerToRawData == 0; } static uint32_t getNumberOfRelocations(const coff_section *Sec, MemoryBufferRef M, const uint8_t *base) { // The field for the number of relocations in COFF section table is only // 16-bit wide. If a section has more than 65535 relocations, 0xFFFF is set to // NumberOfRelocations field, and the actual relocation count is stored in the // VirtualAddress field in the first relocation entry. if (Sec->hasExtendedRelocations()) { const coff_relocation *FirstReloc; if (getObject(FirstReloc, M, reinterpret_cast<const coff_relocation*>( base + Sec->PointerToRelocations))) return 0; // -1 to exclude this first relocation entry. return FirstReloc->VirtualAddress - 1; } return Sec->NumberOfRelocations; } static const coff_relocation * getFirstReloc(const coff_section *Sec, MemoryBufferRef M, const uint8_t *Base) { uint64_t NumRelocs = getNumberOfRelocations(Sec, M, Base); if (!NumRelocs) return nullptr; auto begin = reinterpret_cast<const coff_relocation *>( Base + Sec->PointerToRelocations); if (Sec->hasExtendedRelocations()) { // Skip the first relocation entry repurposed to store the number of // relocations. begin++; } if (checkOffset(M, uintptr_t(begin), sizeof(coff_relocation) * NumRelocs)) return nullptr; return begin; } relocation_iterator COFFObjectFile::section_rel_begin(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); const coff_relocation *begin = getFirstReloc(Sec, Data, base()); if (begin && Sec->VirtualAddress != 0) report_fatal_error("Sections with relocations should have an address of 0"); DataRefImpl Ret; Ret.p = reinterpret_cast<uintptr_t>(begin); return relocation_iterator(RelocationRef(Ret, this)); } relocation_iterator COFFObjectFile::section_rel_end(DataRefImpl Ref) const { const coff_section *Sec = toSec(Ref); const coff_relocation *I = getFirstReloc(Sec, Data, base()); if (I) I += getNumberOfRelocations(Sec, Data, base()); DataRefImpl Ret; Ret.p = reinterpret_cast<uintptr_t>(I); return relocation_iterator(RelocationRef(Ret, this)); } // Initialize the pointer to the symbol table. std::error_code COFFObjectFile::initSymbolTablePtr() { if (COFFHeader) if (std::error_code EC = getObject( SymbolTable16, Data, base() + getPointerToSymbolTable(), (uint64_t)getNumberOfSymbols() * getSymbolTableEntrySize())) return EC; if (COFFBigObjHeader) if (std::error_code EC = getObject( SymbolTable32, Data, base() + getPointerToSymbolTable(), (uint64_t)getNumberOfSymbols() * getSymbolTableEntrySize())) return EC; // Find string table. The first four byte of the string table contains the // total size of the string table, including the size field itself. If the // string table is empty, the value of the first four byte would be 4. uint32_t StringTableOffset = getPointerToSymbolTable() + getNumberOfSymbols() * getSymbolTableEntrySize(); const uint8_t *StringTableAddr = base() + StringTableOffset; const ulittle32_t *StringTableSizePtr; if (std::error_code EC = getObject(StringTableSizePtr, Data, StringTableAddr)) return EC; StringTableSize = *StringTableSizePtr; if (std::error_code EC = getObject(StringTable, Data, StringTableAddr, StringTableSize)) return EC; // Treat table sizes < 4 as empty because contrary to the PECOFF spec, some // tools like cvtres write a size of 0 for an empty table instead of 4. if (StringTableSize < 4) StringTableSize = 4; // Check that the string table is null terminated if has any in it. if (StringTableSize > 4 && StringTable[StringTableSize - 1] != 0) return object_error::parse_failed; return std::error_code(); } // Returns the file offset for the given VA. std::error_code COFFObjectFile::getVaPtr(uint64_t Addr, uintptr_t &Res) const { uint64_t ImageBase = PE32Header ? (uint64_t)PE32Header->ImageBase : (uint64_t)PE32PlusHeader->ImageBase; uint64_t Rva = Addr - ImageBase; assert(Rva <= UINT32_MAX); return getRvaPtr((uint32_t)Rva, Res); } // Returns the file offset for the given RVA. std::error_code COFFObjectFile::getRvaPtr(uint32_t Addr, uintptr_t &Res) const { for (const SectionRef &S : sections()) { const coff_section *Section = getCOFFSection(S); uint32_t SectionStart = Section->VirtualAddress; uint32_t SectionEnd = Section->VirtualAddress + Section->VirtualSize; if (SectionStart <= Addr && Addr < SectionEnd) { uint32_t Offset = Addr - SectionStart; Res = uintptr_t(base()) + Section->PointerToRawData + Offset; return std::error_code(); } } return object_error::parse_failed; } // Returns hint and name fields, assuming \p Rva is pointing to a Hint/Name // table entry. std::error_code COFFObjectFile::getHintName(uint32_t Rva, uint16_t &Hint, StringRef &Name) const { uintptr_t IntPtr = 0; if (std::error_code EC = getRvaPtr(Rva, IntPtr)) return EC; const uint8_t *Ptr = reinterpret_cast<const uint8_t *>(IntPtr); Hint = *reinterpret_cast<const ulittle16_t *>(Ptr); Name = StringRef(reinterpret_cast<const char *>(Ptr + 2)); return std::error_code(); } // Find the import table. std::error_code COFFObjectFile::initImportTablePtr() { // First, we get the RVA of the import table. If the file lacks a pointer to // the import table, do nothing. const data_directory *DataEntry; if (getDataDirectory(COFF::IMPORT_TABLE, DataEntry)) return std::error_code(); // Do nothing if the pointer to import table is NULL. if (DataEntry->RelativeVirtualAddress == 0) return std::error_code(); uint32_t ImportTableRva = DataEntry->RelativeVirtualAddress; // -1 because the last entry is the null entry. NumberOfImportDirectory = DataEntry->Size / sizeof(import_directory_table_entry) - 1; // Find the section that contains the RVA. This is needed because the RVA is // the import table's memory address which is different from its file offset. uintptr_t IntPtr = 0; if (std::error_code EC = getRvaPtr(ImportTableRva, IntPtr)) return EC; ImportDirectory = reinterpret_cast< const import_directory_table_entry *>(IntPtr); return std::error_code(); } // Initializes DelayImportDirectory and NumberOfDelayImportDirectory. std::error_code COFFObjectFile::initDelayImportTablePtr() { const data_directory *DataEntry; if (getDataDirectory(COFF::DELAY_IMPORT_DESCRIPTOR, DataEntry)) return std::error_code(); if (DataEntry->RelativeVirtualAddress == 0) return std::error_code(); uint32_t RVA = DataEntry->RelativeVirtualAddress; NumberOfDelayImportDirectory = DataEntry->Size / sizeof(delay_import_directory_table_entry) - 1; uintptr_t IntPtr = 0; if (std::error_code EC = getRvaPtr(RVA, IntPtr)) return EC; DelayImportDirectory = reinterpret_cast< const delay_import_directory_table_entry *>(IntPtr); return std::error_code(); } // Find the export table. std::error_code COFFObjectFile::initExportTablePtr() { // First, we get the RVA of the export table. If the file lacks a pointer to // the export table, do nothing. const data_directory *DataEntry; if (getDataDirectory(COFF::EXPORT_TABLE, DataEntry)) return std::error_code(); // Do nothing if the pointer to export table is NULL. if (DataEntry->RelativeVirtualAddress == 0) return std::error_code(); uint32_t ExportTableRva = DataEntry->RelativeVirtualAddress; uintptr_t IntPtr = 0; if (std::error_code EC = getRvaPtr(ExportTableRva, IntPtr)) return EC; ExportDirectory = reinterpret_cast<const export_directory_table_entry *>(IntPtr); return std::error_code(); } std::error_code COFFObjectFile::initBaseRelocPtr() { const data_directory *DataEntry; if (getDataDirectory(COFF::BASE_RELOCATION_TABLE, DataEntry)) return std::error_code(); if (DataEntry->RelativeVirtualAddress == 0) return std::error_code(); uintptr_t IntPtr = 0; if (std::error_code EC = getRvaPtr(DataEntry->RelativeVirtualAddress, IntPtr)) return EC; BaseRelocHeader = reinterpret_cast<const coff_base_reloc_block_header *>( IntPtr); BaseRelocEnd = reinterpret_cast<coff_base_reloc_block_header *>( IntPtr + DataEntry->Size); return std::error_code(); } COFFObjectFile::COFFObjectFile(MemoryBufferRef Object, std::error_code &EC) : ObjectFile(Binary::ID_COFF, Object), COFFHeader(nullptr), COFFBigObjHeader(nullptr), PE32Header(nullptr), PE32PlusHeader(nullptr), DataDirectory(nullptr), SectionTable(nullptr), SymbolTable16(nullptr), SymbolTable32(nullptr), StringTable(nullptr), StringTableSize(0), ImportDirectory(nullptr), NumberOfImportDirectory(0), DelayImportDirectory(nullptr), NumberOfDelayImportDirectory(0), ExportDirectory(nullptr), BaseRelocHeader(nullptr), BaseRelocEnd(nullptr) { // Check that we at least have enough room for a header. if (!checkSize(Data, EC, sizeof(coff_file_header))) return; // The current location in the file where we are looking at. uint64_t CurPtr = 0; // PE header is optional and is present only in executables. If it exists, // it is placed right after COFF header. bool HasPEHeader = false; // Check if this is a PE/COFF file. if (checkSize(Data, EC, sizeof(dos_header) + sizeof(COFF::PEMagic))) { // PE/COFF, seek through MS-DOS compatibility stub and 4-byte // PE signature to find 'normal' COFF header. const auto *DH = reinterpret_cast<const dos_header *>(base()); if (DH->Magic[0] == 'M' && DH->Magic[1] == 'Z') { CurPtr = DH->AddressOfNewExeHeader; // Check the PE magic bytes. ("PE\0\0") if (memcmp(base() + CurPtr, COFF::PEMagic, sizeof(COFF::PEMagic)) != 0) { EC = object_error::parse_failed; return; } CurPtr += sizeof(COFF::PEMagic); // Skip the PE magic bytes. HasPEHeader = true; } } if ((EC = getObject(COFFHeader, Data, base() + CurPtr))) return; // It might be a bigobj file, let's check. Note that COFF bigobj and COFF // import libraries share a common prefix but bigobj is more restrictive. if (!HasPEHeader && COFFHeader->Machine == COFF::IMAGE_FILE_MACHINE_UNKNOWN && COFFHeader->NumberOfSections == uint16_t(0xffff) && checkSize(Data, EC, sizeof(coff_bigobj_file_header))) { if ((EC = getObject(COFFBigObjHeader, Data, base() + CurPtr))) return; // Verify that we are dealing with bigobj. if (COFFBigObjHeader->Version >= COFF::BigObjHeader::MinBigObjectVersion && std::memcmp(COFFBigObjHeader->UUID, COFF::BigObjMagic, sizeof(COFF::BigObjMagic)) == 0) { COFFHeader = nullptr; CurPtr += sizeof(coff_bigobj_file_header); } else { // It's not a bigobj. COFFBigObjHeader = nullptr; } } if (COFFHeader) { // The prior checkSize call may have failed. This isn't a hard error // because we were just trying to sniff out bigobj. EC = std::error_code(); CurPtr += sizeof(coff_file_header); if (COFFHeader->isImportLibrary()) return; } if (HasPEHeader) { const pe32_header *Header; if ((EC = getObject(Header, Data, base() + CurPtr))) return; const uint8_t *DataDirAddr; uint64_t DataDirSize; if (Header->Magic == COFF::PE32Header::PE32) { PE32Header = Header; DataDirAddr = base() + CurPtr + sizeof(pe32_header); DataDirSize = sizeof(data_directory) * PE32Header->NumberOfRvaAndSize; } else if (Header->Magic == COFF::PE32Header::PE32_PLUS) { PE32PlusHeader = reinterpret_cast<const pe32plus_header *>(Header); DataDirAddr = base() + CurPtr + sizeof(pe32plus_header); DataDirSize = sizeof(data_directory) * PE32PlusHeader->NumberOfRvaAndSize; } else { // It's neither PE32 nor PE32+. EC = object_error::parse_failed; return; } if ((EC = getObject(DataDirectory, Data, DataDirAddr, DataDirSize))) return; CurPtr += COFFHeader->SizeOfOptionalHeader; } if ((EC = getObject(SectionTable, Data, base() + CurPtr, (uint64_t)getNumberOfSections() * sizeof(coff_section)))) return; // Initialize the pointer to the symbol table. if (getPointerToSymbolTable() != 0) { if ((EC = initSymbolTablePtr())) return; } else { // We had better not have any symbols if we don't have a symbol table. if (getNumberOfSymbols() != 0) { EC = object_error::parse_failed; return; } } // Initialize the pointer to the beginning of the import table. if ((EC = initImportTablePtr())) return; if ((EC = initDelayImportTablePtr())) return; // Initialize the pointer to the export table. if ((EC = initExportTablePtr())) return; // Initialize the pointer to the base relocation table. if ((EC = initBaseRelocPtr())) return; EC = std::error_code(); } basic_symbol_iterator COFFObjectFile::symbol_begin_impl() const { DataRefImpl Ret; Ret.p = getSymbolTable(); return basic_symbol_iterator(SymbolRef(Ret, this)); } basic_symbol_iterator COFFObjectFile::symbol_end_impl() const { // The symbol table ends where the string table begins. DataRefImpl Ret; Ret.p = reinterpret_cast<uintptr_t>(StringTable); return basic_symbol_iterator(SymbolRef(Ret, this)); } import_directory_iterator COFFObjectFile::import_directory_begin() const { return import_directory_iterator( ImportDirectoryEntryRef(ImportDirectory, 0, this)); } import_directory_iterator COFFObjectFile::import_directory_end() const { return import_directory_iterator( ImportDirectoryEntryRef(ImportDirectory, NumberOfImportDirectory, this)); } delay_import_directory_iterator COFFObjectFile::delay_import_directory_begin() const { return delay_import_directory_iterator( DelayImportDirectoryEntryRef(DelayImportDirectory, 0, this)); } delay_import_directory_iterator COFFObjectFile::delay_import_directory_end() const { return delay_import_directory_iterator( DelayImportDirectoryEntryRef( DelayImportDirectory, NumberOfDelayImportDirectory, this)); } export_directory_iterator COFFObjectFile::export_directory_begin() const { return export_directory_iterator( ExportDirectoryEntryRef(ExportDirectory, 0, this)); } export_directory_iterator COFFObjectFile::export_directory_end() const { if (!ExportDirectory) return export_directory_iterator(ExportDirectoryEntryRef(nullptr, 0, this)); ExportDirectoryEntryRef Ref(ExportDirectory, ExportDirectory->AddressTableEntries, this); return export_directory_iterator(Ref); } section_iterator COFFObjectFile::section_begin() const { DataRefImpl Ret; Ret.p = reinterpret_cast<uintptr_t>(SectionTable); return section_iterator(SectionRef(Ret, this)); } section_iterator COFFObjectFile::section_end() const { DataRefImpl Ret; int NumSections = COFFHeader && COFFHeader->isImportLibrary() ? 0 : getNumberOfSections(); Ret.p = reinterpret_cast<uintptr_t>(SectionTable + NumSections); return section_iterator(SectionRef(Ret, this)); } base_reloc_iterator COFFObjectFile::base_reloc_begin() const { return base_reloc_iterator(BaseRelocRef(BaseRelocHeader, this)); } base_reloc_iterator COFFObjectFile::base_reloc_end() const { return base_reloc_iterator(BaseRelocRef(BaseRelocEnd, this)); } uint8_t COFFObjectFile::getBytesInAddress() const { return getArch() == Triple::x86_64 ? 8 : 4; } StringRef COFFObjectFile::getFileFormatName() const { switch(getMachine()) { case COFF::IMAGE_FILE_MACHINE_I386: return "COFF-i386"; case COFF::IMAGE_FILE_MACHINE_AMD64: return "COFF-x86-64"; case COFF::IMAGE_FILE_MACHINE_ARMNT: return "COFF-ARM"; default: return "COFF-<unknown arch>"; } } unsigned COFFObjectFile::getArch() const { switch (getMachine()) { case COFF::IMAGE_FILE_MACHINE_I386: return Triple::x86; case COFF::IMAGE_FILE_MACHINE_AMD64: return Triple::x86_64; case COFF::IMAGE_FILE_MACHINE_ARMNT: return Triple::thumb; default: return Triple::UnknownArch; } } iterator_range<import_directory_iterator> COFFObjectFile::import_directories() const { return make_range(import_directory_begin(), import_directory_end()); } iterator_range<delay_import_directory_iterator> COFFObjectFile::delay_import_directories() const { return make_range(delay_import_directory_begin(), delay_import_directory_end()); } iterator_range<export_directory_iterator> COFFObjectFile::export_directories() const { return make_range(export_directory_begin(), export_directory_end()); } iterator_range<base_reloc_iterator> COFFObjectFile::base_relocs() const { return make_range(base_reloc_begin(), base_reloc_end()); } std::error_code COFFObjectFile::getPE32Header(const pe32_header *&Res) const { Res = PE32Header; return std::error_code(); } std::error_code COFFObjectFile::getPE32PlusHeader(const pe32plus_header *&Res) const { Res = PE32PlusHeader; return std::error_code(); } std::error_code COFFObjectFile::getDataDirectory(uint32_t Index, const data_directory *&Res) const { // Error if if there's no data directory or the index is out of range. if (!DataDirectory) { Res = nullptr; return object_error::parse_failed; } assert(PE32Header || PE32PlusHeader); uint32_t NumEnt = PE32Header ? PE32Header->NumberOfRvaAndSize : PE32PlusHeader->NumberOfRvaAndSize; if (Index >= NumEnt) { Res = nullptr; return object_error::parse_failed; } Res = &DataDirectory[Index]; return std::error_code(); } std::error_code COFFObjectFile::getSection(int32_t Index, const coff_section *&Result) const { Result = nullptr; if (COFF::isReservedSectionNumber(Index)) return std::error_code(); if (static_cast<uint32_t>(Index) <= getNumberOfSections()) { // We already verified the section table data, so no need to check again. Result = SectionTable + (Index - 1); return std::error_code(); } return object_error::parse_failed; } std::error_code COFFObjectFile::getString(uint32_t Offset, StringRef &Result) const { if (StringTableSize <= 4) // Tried to get a string from an empty string table. return object_error::parse_failed; if (Offset >= StringTableSize) return object_error::unexpected_eof; Result = StringRef(StringTable + Offset); return std::error_code(); } std::error_code COFFObjectFile::getSymbolName(COFFSymbolRef Symbol, StringRef &Res) const { return getSymbolName(Symbol.getGeneric(), Res); } std::error_code COFFObjectFile::getSymbolName(const coff_symbol_generic *Symbol, StringRef &Res) const { // Check for string table entry. First 4 bytes are 0. if (Symbol->Name.Offset.Zeroes == 0) { if (std::error_code EC = getString(Symbol->Name.Offset.Offset, Res)) return EC; return std::error_code(); } if (Symbol->Name.ShortName[COFF::NameSize - 1] == 0) // Null terminated, let ::strlen figure out the length. Res = StringRef(Symbol->Name.ShortName); else // Not null terminated, use all 8 bytes. Res = StringRef(Symbol->Name.ShortName, COFF::NameSize); return std::error_code(); } ArrayRef<uint8_t> COFFObjectFile::getSymbolAuxData(COFFSymbolRef Symbol) const { const uint8_t *Aux = nullptr; size_t SymbolSize = getSymbolTableEntrySize(); if (Symbol.getNumberOfAuxSymbols() > 0) { // AUX data comes immediately after the symbol in COFF Aux = reinterpret_cast<const uint8_t *>(Symbol.getRawPtr()) + SymbolSize; # ifndef NDEBUG // Verify that the Aux symbol points to a valid entry in the symbol table. uintptr_t Offset = uintptr_t(Aux) - uintptr_t(base()); if (Offset < getPointerToSymbolTable() || Offset >= getPointerToSymbolTable() + (getNumberOfSymbols() * SymbolSize)) report_fatal_error("Aux Symbol data was outside of symbol table."); assert((Offset - getPointerToSymbolTable()) % SymbolSize == 0 && "Aux Symbol data did not point to the beginning of a symbol"); # endif } return makeArrayRef(Aux, Symbol.getNumberOfAuxSymbols() * SymbolSize); } std::error_code COFFObjectFile::getSectionName(const coff_section *Sec, StringRef &Res) const { StringRef Name; if (Sec->Name[COFF::NameSize - 1] == 0) // Null terminated, let ::strlen figure out the length. Name = Sec->Name; else // Not null terminated, use all 8 bytes. Name = StringRef(Sec->Name, COFF::NameSize); // Check for string table entry. First byte is '/'. if (Name.startswith("/")) { uint32_t Offset; if (Name.startswith("//")) { if (decodeBase64StringEntry(Name.substr(2), Offset)) return object_error::parse_failed; } else { if (Name.substr(1).getAsInteger(10, Offset)) return object_error::parse_failed; } if (std::error_code EC = getString(Offset, Name)) return EC; } Res = Name; return std::error_code(); } uint64_t COFFObjectFile::getSectionSize(const coff_section *Sec) const { // SizeOfRawData and VirtualSize change what they represent depending on // whether or not we have an executable image. // // For object files, SizeOfRawData contains the size of section's data; // VirtualSize should be zero but isn't due to buggy COFF writers. // // For executables, SizeOfRawData *must* be a multiple of FileAlignment; the // actual section size is in VirtualSize. It is possible for VirtualSize to // be greater than SizeOfRawData; the contents past that point should be // considered to be zero. if (getDOSHeader()) return std::min(Sec->VirtualSize, Sec->SizeOfRawData); return Sec->SizeOfRawData; } std::error_code COFFObjectFile::getSectionContents(const coff_section *Sec, ArrayRef<uint8_t> &Res) const { // PointerToRawData and SizeOfRawData won't make sense for BSS sections, // don't do anything interesting for them. assert((Sec->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0 && "BSS sections don't have contents!"); // The only thing that we need to verify is that the contents is contained // within the file bounds. We don't need to make sure it doesn't cover other // data, as there's nothing that says that is not allowed. uintptr_t ConStart = uintptr_t(base()) + Sec->PointerToRawData; uint32_t SectionSize = getSectionSize(Sec); if (checkOffset(Data, ConStart, SectionSize)) return object_error::parse_failed; Res = makeArrayRef(reinterpret_cast<const uint8_t *>(ConStart), SectionSize); return std::error_code(); } const coff_relocation *COFFObjectFile::toRel(DataRefImpl Rel) const { return reinterpret_cast<const coff_relocation*>(Rel.p); } void COFFObjectFile::moveRelocationNext(DataRefImpl &Rel) const { Rel.p = reinterpret_cast<uintptr_t>( reinterpret_cast<const coff_relocation*>(Rel.p) + 1); } uint64_t COFFObjectFile::getRelocationOffset(DataRefImpl Rel) const { const coff_relocation *R = toRel(Rel); return R->VirtualAddress; } symbol_iterator COFFObjectFile::getRelocationSymbol(DataRefImpl Rel) const { const coff_relocation *R = toRel(Rel); DataRefImpl Ref; if (R->SymbolTableIndex >= getNumberOfSymbols()) return symbol_end(); if (SymbolTable16) Ref.p = reinterpret_cast<uintptr_t>(SymbolTable16 + R->SymbolTableIndex); else if (SymbolTable32) Ref.p = reinterpret_cast<uintptr_t>(SymbolTable32 + R->SymbolTableIndex); else llvm_unreachable("no symbol table pointer!"); return symbol_iterator(SymbolRef(Ref, this)); } uint64_t COFFObjectFile::getRelocationType(DataRefImpl Rel) const { const coff_relocation* R = toRel(Rel); return R->Type; } const coff_section * COFFObjectFile::getCOFFSection(const SectionRef &Section) const { return toSec(Section.getRawDataRefImpl()); } COFFSymbolRef COFFObjectFile::getCOFFSymbol(const DataRefImpl &Ref) const { if (SymbolTable16) return toSymb<coff_symbol16>(Ref); if (SymbolTable32) return toSymb<coff_symbol32>(Ref); llvm_unreachable("no symbol table pointer!"); } COFFSymbolRef COFFObjectFile::getCOFFSymbol(const SymbolRef &Symbol) const { return getCOFFSymbol(Symbol.getRawDataRefImpl()); } const coff_relocation * COFFObjectFile::getCOFFRelocation(const RelocationRef &Reloc) const { return toRel(Reloc.getRawDataRefImpl()); } iterator_range<const coff_relocation *> COFFObjectFile::getRelocations(const coff_section *Sec) const { const coff_relocation *I = getFirstReloc(Sec, Data, base()); const coff_relocation *E = I; if (I) E += getNumberOfRelocations(Sec, Data, base()); return make_range(I, E); } #define LLVM_COFF_SWITCH_RELOC_TYPE_NAME(reloc_type) \ case COFF::reloc_type: \ Res = #reloc_type; \ break; void COFFObjectFile::getRelocationTypeName( DataRefImpl Rel, SmallVectorImpl<char> &Result) const { const coff_relocation *Reloc = toRel(Rel); StringRef Res; switch (getMachine()) { case COFF::IMAGE_FILE_MACHINE_AMD64: switch (Reloc->Type) { LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_ABSOLUTE); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_ADDR64); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_ADDR32); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_ADDR32NB); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_REL32); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_REL32_1); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_REL32_2); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_REL32_3); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_REL32_4); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_REL32_5); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_SECTION); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_SECREL); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_SECREL7); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_TOKEN); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_SREL32); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_PAIR); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_AMD64_SSPAN32); default: Res = "Unknown"; } break; case COFF::IMAGE_FILE_MACHINE_ARMNT: switch (Reloc->Type) { LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_ABSOLUTE); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_ADDR32); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_ADDR32NB); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH24); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH11); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_TOKEN); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BLX24); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BLX11); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_SECTION); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_SECREL); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_MOV32A); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_MOV32T); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH20T); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BRANCH24T); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_ARM_BLX23T); default: Res = "Unknown"; } break; case COFF::IMAGE_FILE_MACHINE_I386: switch (Reloc->Type) { LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_ABSOLUTE); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_DIR16); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_REL16); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_DIR32); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_DIR32NB); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_SEG12); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_SECTION); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_SECREL); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_TOKEN); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_SECREL7); LLVM_COFF_SWITCH_RELOC_TYPE_NAME(IMAGE_REL_I386_REL32); default: Res = "Unknown"; } break; default: Res = "Unknown"; } Result.append(Res.begin(), Res.end()); } #undef LLVM_COFF_SWITCH_RELOC_TYPE_NAME bool COFFObjectFile::isRelocatableObject() const { return !DataDirectory; } bool ImportDirectoryEntryRef:: operator==(const ImportDirectoryEntryRef &Other) const { return ImportTable == Other.ImportTable && Index == Other.Index; } void ImportDirectoryEntryRef::moveNext() { ++Index; } std::error_code ImportDirectoryEntryRef::getImportTableEntry( const import_directory_table_entry *&Result) const { Result = ImportTable + Index; return std::error_code(); } static imported_symbol_iterator makeImportedSymbolIterator(const COFFObjectFile *Object, uintptr_t Ptr, int Index) { if (Object->getBytesInAddress() == 4) { auto *P = reinterpret_cast<const import_lookup_table_entry32 *>(Ptr); return imported_symbol_iterator(ImportedSymbolRef(P, Index, Object)); } auto *P = reinterpret_cast<const import_lookup_table_entry64 *>(Ptr); return imported_symbol_iterator(ImportedSymbolRef(P, Index, Object)); } static imported_symbol_iterator importedSymbolBegin(uint32_t RVA, const COFFObjectFile *Object) { uintptr_t IntPtr = 0; Object->getRvaPtr(RVA, IntPtr); return makeImportedSymbolIterator(Object, IntPtr, 0); } static imported_symbol_iterator importedSymbolEnd(uint32_t RVA, const COFFObjectFile *Object) { uintptr_t IntPtr = 0; Object->getRvaPtr(RVA, IntPtr); // Forward the pointer to the last entry which is null. int Index = 0; if (Object->getBytesInAddress() == 4) { auto *Entry = reinterpret_cast<ulittle32_t *>(IntPtr); while (*Entry++) ++Index; } else { auto *Entry = reinterpret_cast<ulittle64_t *>(IntPtr); while (*Entry++) ++Index; } return makeImportedSymbolIterator(Object, IntPtr, Index); } imported_symbol_iterator ImportDirectoryEntryRef::imported_symbol_begin() const { return importedSymbolBegin(ImportTable[Index].ImportLookupTableRVA, OwningObject); } imported_symbol_iterator ImportDirectoryEntryRef::imported_symbol_end() const { return importedSymbolEnd(ImportTable[Index].ImportLookupTableRVA, OwningObject); } iterator_range<imported_symbol_iterator> ImportDirectoryEntryRef::imported_symbols() const { return make_range(imported_symbol_begin(), imported_symbol_end()); } std::error_code ImportDirectoryEntryRef::getName(StringRef &Result) const { uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(ImportTable[Index].NameRVA, IntPtr)) return EC; Result = StringRef(reinterpret_cast<const char *>(IntPtr)); return std::error_code(); } std::error_code ImportDirectoryEntryRef::getImportLookupTableRVA(uint32_t &Result) const { Result = ImportTable[Index].ImportLookupTableRVA; return std::error_code(); } std::error_code ImportDirectoryEntryRef::getImportAddressTableRVA(uint32_t &Result) const { Result = ImportTable[Index].ImportAddressTableRVA; return std::error_code(); } std::error_code ImportDirectoryEntryRef::getImportLookupEntry( const import_lookup_table_entry32 *&Result) const { uintptr_t IntPtr = 0; uint32_t RVA = ImportTable[Index].ImportLookupTableRVA; if (std::error_code EC = OwningObject->getRvaPtr(RVA, IntPtr)) return EC; Result = reinterpret_cast<const import_lookup_table_entry32 *>(IntPtr); return std::error_code(); } bool DelayImportDirectoryEntryRef:: operator==(const DelayImportDirectoryEntryRef &Other) const { return Table == Other.Table && Index == Other.Index; } void DelayImportDirectoryEntryRef::moveNext() { ++Index; } imported_symbol_iterator DelayImportDirectoryEntryRef::imported_symbol_begin() const { return importedSymbolBegin(Table[Index].DelayImportNameTable, OwningObject); } imported_symbol_iterator DelayImportDirectoryEntryRef::imported_symbol_end() const { return importedSymbolEnd(Table[Index].DelayImportNameTable, OwningObject); } iterator_range<imported_symbol_iterator> DelayImportDirectoryEntryRef::imported_symbols() const { return make_range(imported_symbol_begin(), imported_symbol_end()); } std::error_code DelayImportDirectoryEntryRef::getName(StringRef &Result) const { uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(Table[Index].Name, IntPtr)) return EC; Result = StringRef(reinterpret_cast<const char *>(IntPtr)); return std::error_code(); } std::error_code DelayImportDirectoryEntryRef:: getDelayImportTable(const delay_import_directory_table_entry *&Result) const { Result = Table; return std::error_code(); } std::error_code DelayImportDirectoryEntryRef:: getImportAddress(int AddrIndex, uint64_t &Result) const { uint32_t RVA = Table[Index].DelayImportAddressTable + AddrIndex * (OwningObject->is64() ? 8 : 4); uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(RVA, IntPtr)) return EC; if (OwningObject->is64()) Result = *reinterpret_cast<const ulittle64_t *>(IntPtr); else Result = *reinterpret_cast<const ulittle32_t *>(IntPtr); return std::error_code(); } bool ExportDirectoryEntryRef:: operator==(const ExportDirectoryEntryRef &Other) const { return ExportTable == Other.ExportTable && Index == Other.Index; } void ExportDirectoryEntryRef::moveNext() { ++Index; } // Returns the name of the current export symbol. If the symbol is exported only // by ordinal, the empty string is set as a result. std::error_code ExportDirectoryEntryRef::getDllName(StringRef &Result) const { uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(ExportTable->NameRVA, IntPtr)) return EC; Result = StringRef(reinterpret_cast<const char *>(IntPtr)); return std::error_code(); } // Returns the starting ordinal number. std::error_code ExportDirectoryEntryRef::getOrdinalBase(uint32_t &Result) const { Result = ExportTable->OrdinalBase; return std::error_code(); } // Returns the export ordinal of the current export symbol. std::error_code ExportDirectoryEntryRef::getOrdinal(uint32_t &Result) const { Result = ExportTable->OrdinalBase + Index; return std::error_code(); } // Returns the address of the current export symbol. std::error_code ExportDirectoryEntryRef::getExportRVA(uint32_t &Result) const { uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(ExportTable->ExportAddressTableRVA, IntPtr)) return EC; const export_address_table_entry *entry = reinterpret_cast<const export_address_table_entry *>(IntPtr); Result = entry[Index].ExportRVA; return std::error_code(); } // Returns the name of the current export symbol. If the symbol is exported only // by ordinal, the empty string is set as a result. std::error_code ExportDirectoryEntryRef::getSymbolName(StringRef &Result) const { uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(ExportTable->OrdinalTableRVA, IntPtr)) return EC; const ulittle16_t *Start = reinterpret_cast<const ulittle16_t *>(IntPtr); uint32_t NumEntries = ExportTable->NumberOfNamePointers; int Offset = 0; for (const ulittle16_t *I = Start, *E = Start + NumEntries; I < E; ++I, ++Offset) { if (*I != Index) continue; if (std::error_code EC = OwningObject->getRvaPtr(ExportTable->NamePointerRVA, IntPtr)) return EC; const ulittle32_t *NamePtr = reinterpret_cast<const ulittle32_t *>(IntPtr); if (std::error_code EC = OwningObject->getRvaPtr(NamePtr[Offset], IntPtr)) return EC; Result = StringRef(reinterpret_cast<const char *>(IntPtr)); return std::error_code(); } Result = ""; return std::error_code(); } bool ImportedSymbolRef:: operator==(const ImportedSymbolRef &Other) const { return Entry32 == Other.Entry32 && Entry64 == Other.Entry64 && Index == Other.Index; } void ImportedSymbolRef::moveNext() { ++Index; } std::error_code ImportedSymbolRef::getSymbolName(StringRef &Result) const { uint32_t RVA; if (Entry32) { // If a symbol is imported only by ordinal, it has no name. if (Entry32[Index].isOrdinal()) return std::error_code(); RVA = Entry32[Index].getHintNameRVA(); } else { if (Entry64[Index].isOrdinal()) return std::error_code(); RVA = Entry64[Index].getHintNameRVA(); } uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(RVA, IntPtr)) return EC; // +2 because the first two bytes is hint. Result = StringRef(reinterpret_cast<const char *>(IntPtr + 2)); return std::error_code(); } std::error_code ImportedSymbolRef::getOrdinal(uint16_t &Result) const { uint32_t RVA; if (Entry32) { if (Entry32[Index].isOrdinal()) { Result = Entry32[Index].getOrdinal(); return std::error_code(); } RVA = Entry32[Index].getHintNameRVA(); } else { if (Entry64[Index].isOrdinal()) { Result = Entry64[Index].getOrdinal(); return std::error_code(); } RVA = Entry64[Index].getHintNameRVA(); } uintptr_t IntPtr = 0; if (std::error_code EC = OwningObject->getRvaPtr(RVA, IntPtr)) return EC; Result = *reinterpret_cast<const ulittle16_t *>(IntPtr); return std::error_code(); } ErrorOr<std::unique_ptr<COFFObjectFile>> ObjectFile::createCOFFObjectFile(MemoryBufferRef Object) { std::error_code EC; std::unique_ptr<COFFObjectFile> Ret(new COFFObjectFile(Object, EC)); if (EC) return EC; return std::move(Ret); } bool BaseRelocRef::operator==(const BaseRelocRef &Other) const { return Header == Other.Header && Index == Other.Index; } void BaseRelocRef::moveNext() { // Header->BlockSize is the size of the current block, including the // size of the header itself. uint32_t Size = sizeof(*Header) + sizeof(coff_base_reloc_block_entry) * (Index + 1); if (Size == Header->BlockSize) { // .reloc contains a list of base relocation blocks. Each block // consists of the header followed by entries. The header contains // how many entories will follow. When we reach the end of the // current block, proceed to the next block. Header = reinterpret_cast<const coff_base_reloc_block_header *>( reinterpret_cast<const uint8_t *>(Header) + Size); Index = 0; } else { ++Index; } } std::error_code BaseRelocRef::getType(uint8_t &Type) const { auto *Entry = reinterpret_cast<const coff_base_reloc_block_entry *>(Header + 1); Type = Entry[Index].getType(); return std::error_code(); } std::error_code BaseRelocRef::getRVA(uint32_t &Result) const { auto *Entry = reinterpret_cast<const coff_base_reloc_block_entry *>(Header + 1); Result = Header->PageRVA + Entry[Index].getOffset(); return std::error_code(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/CodeGen/LatencyPriorityQueue.cpp
//===---- LatencyPriorityQueue.cpp - A latency-oriented priority queue ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the LatencyPriorityQueue class, which is a // SchedulingPriorityQueue that schedules using latency information to // reduce the length of the critical path through the basic block. // //===----------------------------------------------------------------------===// #include "llvm/CodeGen/LatencyPriorityQueue.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "scheduler" bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const { // The isScheduleHigh flag allows nodes with wraparound dependencies that // cannot easily be modeled as edges with latencies to be scheduled as // soon as possible in a top-down schedule. if (LHS->isScheduleHigh && !RHS->isScheduleHigh) return false; if (!LHS->isScheduleHigh && RHS->isScheduleHigh) return true; unsigned LHSNum = LHS->NodeNum; unsigned RHSNum = RHS->NodeNum; // The most important heuristic is scheduling the critical path. unsigned LHSLatency = PQ->getLatency(LHSNum); unsigned RHSLatency = PQ->getLatency(RHSNum); if (LHSLatency < RHSLatency) return true; if (LHSLatency > RHSLatency) return false; // After that, if two nodes have identical latencies, look to see if one will // unblock more other nodes than the other. unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum); unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum); if (LHSBlocked < RHSBlocked) return true; if (LHSBlocked > RHSBlocked) return false; // Finally, just to provide a stable ordering, use the node number as a // deciding factor. return RHSNum < LHSNum; } /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor /// of SU, return it, otherwise return null. SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) { SUnit *OnlyAvailablePred = nullptr; for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) { SUnit &Pred = *I->getSUnit(); if (!Pred.isScheduled) { // We found an available, but not scheduled, predecessor. If it's the // only one we have found, keep track of it... otherwise give up. if (OnlyAvailablePred && OnlyAvailablePred != &Pred) return nullptr; OnlyAvailablePred = &Pred; } } return OnlyAvailablePred; } void LatencyPriorityQueue::push(SUnit *SU) { // Look at all of the successors of this node. Count the number of nodes that // this node is the sole unscheduled node for. unsigned NumNodesBlocking = 0; for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { if (getSingleUnscheduledPred(I->getSUnit()) == SU) ++NumNodesBlocking; } NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking; Queue.push_back(SU); } // scheduledNode - As nodes are scheduled, we look to see if there are any // successor nodes that have a single unscheduled predecessor. If so, that // single predecessor has a higher priority, since scheduling it will make // the node available. void LatencyPriorityQueue::scheduledNode(SUnit *SU) { for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { AdjustPriorityOfUnscheduledPreds(I->getSUnit()); } } /// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just /// scheduled. If SU is not itself available, then there is at least one /// predecessor node that has not been scheduled yet. If SU has exactly ONE /// unscheduled predecessor, we want to increase its priority: it getting /// scheduled will make this node available, so it is better than some other /// node of the same priority that will not make a node available. void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) { if (SU->isAvailable) return; // All preds scheduled. SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU); if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) return; // Okay, we found a single predecessor that is available, but not scheduled. // Since it is available, it must be in the priority queue. First remove it. remove(OnlyAvailablePred); // Reinsert the node into the priority queue, which recomputes its // NumNodesSolelyBlocking value. push(OnlyAvailablePred); } SUnit *LatencyPriorityQueue::pop() { if (empty()) return nullptr; std::vector<SUnit *>::iterator Best = Queue.begin(); for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) if (Picker(*Best, *I)) Best = I; SUnit *V = *Best; if (Best != std::prev(Queue.end())) std::swap(*Best, Queue.back()); Queue.pop_back(); return V; } void LatencyPriorityQueue::remove(SUnit *SU) { assert(!Queue.empty() && "Queue is empty!"); std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU); if (I != std::prev(Queue.end())) std::swap(*I, Queue.back()); Queue.pop_back(); }
0
repos/DirectXShaderCompiler/lib
repos/DirectXShaderCompiler/lib/CodeGen/CriticalAntiDepBreaker.h
//=- llvm/CodeGen/CriticalAntiDepBreaker.h - Anti-Dep Support -*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the CriticalAntiDepBreaker class, which // implements register anti-dependence breaking along a blocks // critical path during post-RA scheduler. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_CODEGEN_CRITICALANTIDEPBREAKER_H #define LLVM_LIB_CODEGEN_CRITICALANTIDEPBREAKER_H #include "AntiDepBreaker.h" #include "llvm/ADT/BitVector.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/CodeGen/ScheduleDAG.h" #include <map> namespace llvm { class RegisterClassInfo; class TargetInstrInfo; class TargetRegisterInfo; class LLVM_LIBRARY_VISIBILITY CriticalAntiDepBreaker : public AntiDepBreaker { MachineFunction& MF; MachineRegisterInfo &MRI; const TargetInstrInfo *TII; const TargetRegisterInfo *TRI; const RegisterClassInfo &RegClassInfo; /// The set of allocatable registers. /// We'll be ignoring anti-dependencies on non-allocatable registers, /// because they may not be safe to break. const BitVector AllocatableSet; /// For live regs that are only used in one register class in a /// live range, the register class. If the register is not live, the /// corresponding value is null. If the register is live but used in /// multiple register classes, the corresponding value is -1 casted to a /// pointer. std::vector<const TargetRegisterClass*> Classes; /// Map registers to all their references within a live range. std::multimap<unsigned, MachineOperand *> RegRefs; typedef std::multimap<unsigned, MachineOperand *>::const_iterator RegRefIter; /// The index of the most recent kill (proceeding bottom-up), /// or ~0u if the register is not live. std::vector<unsigned> KillIndices; /// The index of the most recent complete def (proceeding /// bottom up), or ~0u if the register is live. std::vector<unsigned> DefIndices; /// A set of registers which are live and cannot be changed to /// break anti-dependencies. BitVector KeepRegs; public: CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo&); ~CriticalAntiDepBreaker() override; /// Initialize anti-dep breaking for a new basic block. void StartBlock(MachineBasicBlock *BB) override; /// Identifiy anti-dependencies along the critical path /// of the ScheduleDAG and break them by renaming registers. unsigned BreakAntiDependencies(const std::vector<SUnit>& SUnits, MachineBasicBlock::iterator Begin, MachineBasicBlock::iterator End, unsigned InsertPosIndex, DbgValueVector &DbgValues) override; /// Update liveness information to account for the current /// instruction, which will not be scheduled. void Observe(MachineInstr *MI, unsigned Count, unsigned InsertPosIndex) override; /// Finish anti-dep breaking for a basic block. void FinishBlock() override; private: void PrescanInstruction(MachineInstr *MI); void ScanInstruction(MachineInstr *MI, unsigned Count); bool isNewRegClobberedByRefs(RegRefIter RegRefBegin, RegRefIter RegRefEnd, unsigned NewReg); unsigned findSuitableFreeRegister(RegRefIter RegRefBegin, RegRefIter RegRefEnd, unsigned AntiDepReg, unsigned LastNewReg, const TargetRegisterClass *RC, SmallVectorImpl<unsigned> &Forbid); }; } #endif