Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
//===- ObjCARCAliasAnalysis.h - ObjC ARC Optimization -*- C++ -*-----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This file declares a simple ARC-aware AliasAnalysis using special knowledge /// of Objective C to enhance other optimization passes which rely on the Alias /// Analysis infrastructure. /// /// WARNING: This file knows about certain library functions. It recognizes them /// by name, and hardwires knowledge of their semantics. /// /// WARNING: This file knows about how certain Objective-C library functions are /// used. Naive LLVM IR transformations which would otherwise be /// behavior-preserving may break these assumptions. /// //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_TRANSFORMS_OBJCARC_OBJCARCALIASANALYSIS_H #define LLVM_LIB_TRANSFORMS_OBJCARC_OBJCARCALIASANALYSIS_H #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Pass.h" namespace llvm { namespace objcarc { /// \brief This is a simple alias analysis implementation that uses knowledge /// of ARC constructs to answer queries. /// /// TODO: This class could be generalized to know about other ObjC-specific /// tricks. Such as knowing that ivars in the non-fragile ABI are non-aliasing /// even though their offsets are dynamic. class ObjCARCAliasAnalysis : public ImmutablePass, public AliasAnalysis { public: static char ID; // Class identification, replacement for typeinfo ObjCARCAliasAnalysis() : ImmutablePass(ID) { initializeObjCARCAliasAnalysisPass(*PassRegistry::getPassRegistry()); } private: bool doInitialization(Module &M) override; /// This method is used when a pass implements an analysis interface through /// multiple inheritance. If needed, it should override this to adjust the /// this pointer as needed for the specified pass info. void *getAdjustedAnalysisPointer(const void *PI) override { if (PI == &AliasAnalysis::ID) return static_cast<AliasAnalysis *>(this); return this; } void getAnalysisUsage(AnalysisUsage &AU) const override; AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) override; bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) override; ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; ModRefBehavior getModRefBehavior(const Function *F) override; ModRefResult getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) override; ModRefResult getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) override; }; } // namespace objcarc } // namespace llvm #endif
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
//===- ObjCARCAPElim.cpp - ObjC ARC Optimization --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This file defines ObjC ARC optimizations. ARC stands for Automatic /// Reference Counting and is a system for managing reference counts for objects /// in Objective C. /// /// This specific file implements optimizations which remove extraneous /// autorelease pools. /// /// WARNING: This file knows about certain library functions. It recognizes them /// by name, and hardwires knowledge of their semantics. /// /// WARNING: This file knows about how certain Objective-C library functions are /// used. Naive LLVM IR transformations which would otherwise be /// behavior-preserving may break these assumptions. /// //===----------------------------------------------------------------------===// #include "ObjCARC.h" #include "llvm/ADT/STLExtras.h" #include "llvm/IR/Constants.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; using namespace llvm::objcarc; #define DEBUG_TYPE "objc-arc-ap-elim" namespace { /// \brief Autorelease pool elimination. class ObjCARCAPElim : public ModulePass { void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnModule(Module &M) override; static bool MayAutorelease(ImmutableCallSite CS, unsigned Depth = 0); static bool OptimizeBB(BasicBlock *BB); public: static char ID; ObjCARCAPElim() : ModulePass(ID) { initializeObjCARCAPElimPass(*PassRegistry::getPassRegistry()); } }; } char ObjCARCAPElim::ID = 0; INITIALIZE_PASS(ObjCARCAPElim, "objc-arc-apelim", "ObjC ARC autorelease pool elimination", false, false) Pass *llvm::createObjCARCAPElimPass() { return new ObjCARCAPElim(); } void ObjCARCAPElim::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); } /// Interprocedurally determine if calls made by the given call site can /// possibly produce autoreleases. bool ObjCARCAPElim::MayAutorelease(ImmutableCallSite CS, unsigned Depth) { if (const Function *Callee = CS.getCalledFunction()) { if (Callee->isDeclaration() || Callee->mayBeOverridden()) return true; for (Function::const_iterator I = Callee->begin(), E = Callee->end(); I != E; ++I) { const BasicBlock *BB = I; for (BasicBlock::const_iterator J = BB->begin(), F = BB->end(); J != F; ++J) if (ImmutableCallSite JCS = ImmutableCallSite(J)) // This recursion depth limit is arbitrary. It's just great // enough to cover known interesting testcases. if (Depth < 3 && !JCS.onlyReadsMemory() && MayAutorelease(JCS, Depth + 1)) return true; } return false; } return true; } bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) { bool Changed = false; Instruction *Push = nullptr; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { Instruction *Inst = I++; switch (GetBasicARCInstKind(Inst)) { case ARCInstKind::AutoreleasepoolPush: Push = Inst; break; case ARCInstKind::AutoreleasepoolPop: // If this pop matches a push and nothing in between can autorelease, // zap the pair. if (Push && cast<CallInst>(Inst)->getArgOperand(0) == Push) { Changed = true; DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop " "autorelease pair:\n" " Pop: " << *Inst << "\n" << " Push: " << *Push << "\n"); Inst->eraseFromParent(); Push->eraseFromParent(); } Push = nullptr; break; case ARCInstKind::CallOrUser: if (MayAutorelease(ImmutableCallSite(Inst))) Push = nullptr; break; default: break; } } return Changed; } bool ObjCARCAPElim::runOnModule(Module &M) { if (!EnableARCOpts) return false; // If nothing in the Module uses ARC, don't do anything. if (!ModuleHasARC(M)) return false; // Find the llvm.global_ctors variable, as the first step in // identifying the global constructors. In theory, unnecessary autorelease // pools could occur anywhere, but in practice it's pretty rare. Global // ctors are a place where autorelease pools get inserted automatically, // so it's pretty common for them to be unnecessary, and it's pretty // profitable to eliminate them. GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); if (!GV) return false; assert(GV->hasDefinitiveInitializer() && "llvm.global_ctors is uncooperative!"); bool Changed = false; // Dig the constructor functions out of GV's initializer. ConstantArray *Init = cast<ConstantArray>(GV->getInitializer()); for (User::op_iterator OI = Init->op_begin(), OE = Init->op_end(); OI != OE; ++OI) { Value *Op = *OI; // llvm.global_ctors is an array of three-field structs where the second // members are constructor functions. Function *F = dyn_cast<Function>(cast<ConstantStruct>(Op)->getOperand(1)); // If the user used a constructor function with the wrong signature and // it got bitcasted or whatever, look the other way. if (!F) continue; // Only look at function definitions. if (F->isDeclaration()) continue; // Only look at functions with one basic block. if (std::next(F->begin()) != F->end()) continue; // Ok, a single-block constructor function definition. Try to optimize it. Changed |= OptimizeBB(F->begin()); } return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
//===- DependencyAnalysis.cpp - ObjC ARC Optimization ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This file defines special dependency analysis routines used in Objective C /// ARC Optimizations. /// /// WARNING: This file knows about certain library functions. It recognizes them /// by name, and hardwires knowledge of their semantics. /// /// WARNING: This file knows about how certain Objective-C library functions are /// used. Naive LLVM IR transformations which would otherwise be /// behavior-preserving may break these assumptions. /// //===----------------------------------------------------------------------===// #include "ObjCARC.h" #include "DependencyAnalysis.h" #include "ProvenanceAnalysis.h" #include "llvm/IR/CFG.h" using namespace llvm; using namespace llvm::objcarc; #define DEBUG_TYPE "objc-arc-dependency" /// Test whether the given instruction can result in a reference count /// modification (positive or negative) for the pointer's object. bool llvm::objcarc::CanAlterRefCount(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, ARCInstKind Class) { switch (Class) { case ARCInstKind::Autorelease: case ARCInstKind::AutoreleaseRV: case ARCInstKind::IntrinsicUser: case ARCInstKind::User: // These operations never directly modify a reference count. return false; default: break; } ImmutableCallSite CS(Inst); assert(CS && "Only calls can alter reference counts!"); // See if AliasAnalysis can help us with the call. AliasAnalysis::ModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS); if (AliasAnalysis::onlyReadsMemory(MRB)) return false; if (AliasAnalysis::onlyAccessesArgPointees(MRB)) { const DataLayout &DL = Inst->getModule()->getDataLayout(); for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I) { const Value *Op = *I; if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op, DL)) return true; } return false; } // Assume the worst. return true; } bool llvm::objcarc::CanDecrementRefCount(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, ARCInstKind Class) { // First perform a quick check if Class can not touch ref counts. if (!CanDecrementRefCount(Class)) return false; // Otherwise, just use CanAlterRefCount for now. return CanAlterRefCount(Inst, Ptr, PA, Class); } /// Test whether the given instruction can "use" the given pointer's object in a /// way that requires the reference count to be positive. bool llvm::objcarc::CanUse(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, ARCInstKind Class) { // ARCInstKind::Call operations (as opposed to // ARCInstKind::CallOrUser) never "use" objc pointers. if (Class == ARCInstKind::Call) return false; const DataLayout &DL = Inst->getModule()->getDataLayout(); // Consider various instructions which may have pointer arguments which are // not "uses". if (const ICmpInst *ICI = dyn_cast<ICmpInst>(Inst)) { // Comparing a pointer with null, or any other constant, isn't really a use, // because we don't care what the pointer points to, or about the values // of any other dynamic reference-counted pointers. if (!IsPotentialRetainableObjPtr(ICI->getOperand(1), *PA.getAA())) return false; } else if (auto CS = ImmutableCallSite(Inst)) { // For calls, just check the arguments (and not the callee operand). for (ImmutableCallSite::arg_iterator OI = CS.arg_begin(), OE = CS.arg_end(); OI != OE; ++OI) { const Value *Op = *OI; if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op, DL)) return true; } return false; } else if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { // Special-case stores, because we don't care about the stored value, just // the store address. const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand(), DL); // If we can't tell what the underlying object was, assume there is a // dependence. return IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Op, Ptr, DL); } // Check each operand for a match. for (User::const_op_iterator OI = Inst->op_begin(), OE = Inst->op_end(); OI != OE; ++OI) { const Value *Op = *OI; if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op, DL)) return true; } return false; } /// Test if there can be dependencies on Inst through Arg. This function only /// tests dependencies relevant for removing pairs of calls. bool llvm::objcarc::Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg, ProvenanceAnalysis &PA) { // If we've reached the definition of Arg, stop. if (Inst == Arg) return true; switch (Flavor) { case NeedsPositiveRetainCount: { ARCInstKind Class = GetARCInstKind(Inst); switch (Class) { case ARCInstKind::AutoreleasepoolPop: case ARCInstKind::AutoreleasepoolPush: case ARCInstKind::None: return false; default: return CanUse(Inst, Arg, PA, Class); } } case AutoreleasePoolBoundary: { ARCInstKind Class = GetARCInstKind(Inst); switch (Class) { case ARCInstKind::AutoreleasepoolPop: case ARCInstKind::AutoreleasepoolPush: // These mark the end and begin of an autorelease pool scope. return true; default: // Nothing else does this. return false; } } case CanChangeRetainCount: { ARCInstKind Class = GetARCInstKind(Inst); switch (Class) { case ARCInstKind::AutoreleasepoolPop: // Conservatively assume this can decrement any count. return true; case ARCInstKind::AutoreleasepoolPush: case ARCInstKind::None: return false; default: return CanAlterRefCount(Inst, Arg, PA, Class); } } case RetainAutoreleaseDep: switch (GetBasicARCInstKind(Inst)) { case ARCInstKind::AutoreleasepoolPop: case ARCInstKind::AutoreleasepoolPush: // Don't merge an objc_autorelease with an objc_retain inside a different // autoreleasepool scope. return true; case ARCInstKind::Retain: case ARCInstKind::RetainRV: // Check for a retain of the same pointer for merging. return GetArgRCIdentityRoot(Inst) == Arg; default: // Nothing else matters for objc_retainAutorelease formation. return false; } case RetainAutoreleaseRVDep: { ARCInstKind Class = GetBasicARCInstKind(Inst); switch (Class) { case ARCInstKind::Retain: case ARCInstKind::RetainRV: // Check for a retain of the same pointer for merging. return GetArgRCIdentityRoot(Inst) == Arg; default: // Anything that can autorelease interrupts // retainAutoreleaseReturnValue formation. return CanInterruptRV(Class); } } case RetainRVDep: return CanInterruptRV(GetBasicARCInstKind(Inst)); } llvm_unreachable("Invalid dependence flavor"); } /// Walk up the CFG from StartPos (which is in StartBB) and find local and /// non-local dependencies on Arg. /// /// TODO: Cache results? void llvm::objcarc::FindDependencies(DependenceKind Flavor, const Value *Arg, BasicBlock *StartBB, Instruction *StartInst, SmallPtrSetImpl<Instruction *> &DependingInsts, SmallPtrSetImpl<const BasicBlock *> &Visited, ProvenanceAnalysis &PA) { BasicBlock::iterator StartPos = StartInst; SmallVector<std::pair<BasicBlock *, BasicBlock::iterator>, 4> Worklist; Worklist.push_back(std::make_pair(StartBB, StartPos)); do { std::pair<BasicBlock *, BasicBlock::iterator> Pair = Worklist.pop_back_val(); BasicBlock *LocalStartBB = Pair.first; BasicBlock::iterator LocalStartPos = Pair.second; BasicBlock::iterator StartBBBegin = LocalStartBB->begin(); for (;;) { if (LocalStartPos == StartBBBegin) { pred_iterator PI(LocalStartBB), PE(LocalStartBB, false); if (PI == PE) // If we've reached the function entry, produce a null dependence. DependingInsts.insert(nullptr); else // Add the predecessors to the worklist. do { BasicBlock *PredBB = *PI; if (Visited.insert(PredBB).second) Worklist.push_back(std::make_pair(PredBB, PredBB->end())); } while (++PI != PE); break; } Instruction *Inst = --LocalStartPos; if (Depends(Flavor, Inst, Arg, PA)) { DependingInsts.insert(Inst); break; } } } while (!Worklist.empty()); // Determine whether the original StartBB post-dominates all of the blocks we // visited. If not, insert a sentinal indicating that most optimizations are // not safe. for (const BasicBlock *BB : Visited) { if (BB == StartBB) continue; const TerminatorInst *TI = cast<TerminatorInst>(&BB->back()); for (succ_const_iterator SI(TI), SE(TI, false); SI != SE; ++SI) { const BasicBlock *Succ = *SI; if (Succ != StartBB && !Visited.count(Succ)) { DependingInsts.insert(reinterpret_cast<Instruction *>(-1)); return; } } } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
//===- ProvenanceAnalysis.cpp - ObjC ARC Optimization ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This file defines a special form of Alias Analysis called ``Provenance /// Analysis''. The word ``provenance'' refers to the history of the ownership /// of an object. Thus ``Provenance Analysis'' is an analysis which attempts to /// use various techniques to determine if locally /// /// WARNING: This file knows about certain library functions. It recognizes them /// by name, and hardwires knowledge of their semantics. /// /// WARNING: This file knows about how certain Objective-C library functions are /// used. Naive LLVM IR transformations which would otherwise be /// behavior-preserving may break these assumptions. /// //===----------------------------------------------------------------------===// #include "ObjCARC.h" #include "ProvenanceAnalysis.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" using namespace llvm; using namespace llvm::objcarc; bool ProvenanceAnalysis::relatedSelect(const SelectInst *A, const Value *B) { const DataLayout &DL = A->getModule()->getDataLayout(); // If the values are Selects with the same condition, we can do a more precise // check: just check for relations between the values on corresponding arms. if (const SelectInst *SB = dyn_cast<SelectInst>(B)) if (A->getCondition() == SB->getCondition()) return related(A->getTrueValue(), SB->getTrueValue(), DL) || related(A->getFalseValue(), SB->getFalseValue(), DL); // Check both arms of the Select node individually. return related(A->getTrueValue(), B, DL) || related(A->getFalseValue(), B, DL); } bool ProvenanceAnalysis::relatedPHI(const PHINode *A, const Value *B) { const DataLayout &DL = A->getModule()->getDataLayout(); // If the values are PHIs in the same block, we can do a more precise as well // as efficient check: just check for relations between the values on // corresponding edges. if (const PHINode *PNB = dyn_cast<PHINode>(B)) if (PNB->getParent() == A->getParent()) { for (unsigned i = 0, e = A->getNumIncomingValues(); i != e; ++i) if (related(A->getIncomingValue(i), PNB->getIncomingValueForBlock(A->getIncomingBlock(i)), DL)) return true; return false; } // Check each unique source of the PHI node against B. SmallPtrSet<const Value *, 4> UniqueSrc; for (Value *PV1 : A->incoming_values()) { if (UniqueSrc.insert(PV1).second && related(PV1, B, DL)) return true; } // All of the arms checked out. return false; } /// Test if the value of P, or any value covered by its provenance, is ever /// stored within the function (not counting callees). static bool IsStoredObjCPointer(const Value *P) { SmallPtrSet<const Value *, 8> Visited; SmallVector<const Value *, 8> Worklist; Worklist.push_back(P); Visited.insert(P); do { P = Worklist.pop_back_val(); for (const Use &U : P->uses()) { const User *Ur = U.getUser(); if (isa<StoreInst>(Ur)) { if (U.getOperandNo() == 0) // The pointer is stored. return true; // The pointed is stored through. continue; } if (isa<CallInst>(Ur)) // The pointer is passed as an argument, ignore this. continue; if (isa<PtrToIntInst>(P)) // Assume the worst. return true; if (Visited.insert(Ur).second) Worklist.push_back(Ur); } } while (!Worklist.empty()); // Everything checked out. return false; } bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B, const DataLayout &DL) { // Skip past provenance pass-throughs. A = GetUnderlyingObjCPtr(A, DL); B = GetUnderlyingObjCPtr(B, DL); // Quick check. if (A == B) return true; // Ask regular AliasAnalysis, for a first approximation. switch (AA->alias(A, B)) { case NoAlias: return false; case MustAlias: case PartialAlias: return true; case MayAlias: break; } bool AIsIdentified = IsObjCIdentifiedObject(A); bool BIsIdentified = IsObjCIdentifiedObject(B); // An ObjC-Identified object can't alias a load if it is never locally stored. if (AIsIdentified) { // Check for an obvious escape. if (isa<LoadInst>(B)) return IsStoredObjCPointer(A); if (BIsIdentified) { // Check for an obvious escape. if (isa<LoadInst>(A)) return IsStoredObjCPointer(B); // Both pointers are identified and escapes aren't an evident problem. return false; } } else if (BIsIdentified) { // Check for an obvious escape. if (isa<LoadInst>(A)) return IsStoredObjCPointer(B); } // Special handling for PHI and Select. if (const PHINode *PN = dyn_cast<PHINode>(A)) return relatedPHI(PN, B); if (const PHINode *PN = dyn_cast<PHINode>(B)) return relatedPHI(PN, A); if (const SelectInst *S = dyn_cast<SelectInst>(A)) return relatedSelect(S, B); if (const SelectInst *S = dyn_cast<SelectInst>(B)) return relatedSelect(S, A); // Conservative. return true; } bool ProvenanceAnalysis::related(const Value *A, const Value *B, const DataLayout &DL) { // Begin by inserting a conservative value into the map. If the insertion // fails, we have the answer already. If it succeeds, leave it there until we // compute the real answer to guard against recursive queries. if (A > B) std::swap(A, B); std::pair<CachedResultsTy::iterator, bool> Pair = CachedResults.insert(std::make_pair(ValuePairTy(A, B), true)); if (!Pair.second) return Pair.first->second; bool Result = relatedCheck(A, B, DL); CachedResults[ValuePairTy(A, B)] = Result; return Result; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/ObjCARC/ARCInstKind.h
//===--- ARCInstKind.h - ARC instruction equivalence classes -*- C++ -*----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_TRANSFORMS_OBJCARC_ARCINSTKIND_H #define LLVM_LIB_TRANSFORMS_OBJCARC_ARCINSTKIND_H #include "llvm/IR/Instructions.h" #include "llvm/IR/Function.h" namespace llvm { namespace objcarc { /// \enum ARCInstKind /// /// \brief Equivalence classes of instructions in the ARC Model. /// /// Since we do not have "instructions" to represent ARC concepts in LLVM IR, /// we instead operate on equivalence classes of instructions. /// /// TODO: This should be split into two enums: a runtime entry point enum /// (possibly united with the ARCRuntimeEntrypoint class) and an enum that deals /// with effects of instructions in the ARC model (which would handle the notion /// of a User or CallOrUser). enum class ARCInstKind { Retain, ///< objc_retain RetainRV, ///< objc_retainAutoreleasedReturnValue RetainBlock, ///< objc_retainBlock Release, ///< objc_release Autorelease, ///< objc_autorelease AutoreleaseRV, ///< objc_autoreleaseReturnValue AutoreleasepoolPush, ///< objc_autoreleasePoolPush AutoreleasepoolPop, ///< objc_autoreleasePoolPop NoopCast, ///< objc_retainedObject, etc. FusedRetainAutorelease, ///< objc_retainAutorelease FusedRetainAutoreleaseRV, ///< objc_retainAutoreleaseReturnValue LoadWeakRetained, ///< objc_loadWeakRetained (primitive) StoreWeak, ///< objc_storeWeak (primitive) InitWeak, ///< objc_initWeak (derived) LoadWeak, ///< objc_loadWeak (derived) MoveWeak, ///< objc_moveWeak (derived) CopyWeak, ///< objc_copyWeak (derived) DestroyWeak, ///< objc_destroyWeak (derived) StoreStrong, ///< objc_storeStrong (derived) IntrinsicUser, ///< clang.arc.use CallOrUser, ///< could call objc_release and/or "use" pointers Call, ///< could call objc_release User, ///< could "use" a pointer None ///< anything that is inert from an ARC perspective. }; raw_ostream &operator<<(raw_ostream &OS, const ARCInstKind Class); /// \brief Test if the given class is a kind of user. bool IsUser(ARCInstKind Class); /// \brief Test if the given class is objc_retain or equivalent. bool IsRetain(ARCInstKind Class); /// \brief Test if the given class is objc_autorelease or equivalent. bool IsAutorelease(ARCInstKind Class); /// \brief Test if the given class represents instructions which return their /// argument verbatim. bool IsForwarding(ARCInstKind Class); /// \brief Test if the given class represents instructions which do nothing if /// passed a null pointer. bool IsNoopOnNull(ARCInstKind Class); /// \brief Test if the given class represents instructions which are always safe /// to mark with the "tail" keyword. bool IsAlwaysTail(ARCInstKind Class); /// \brief Test if the given class represents instructions which are never safe /// to mark with the "tail" keyword. bool IsNeverTail(ARCInstKind Class); /// \brief Test if the given class represents instructions which are always safe /// to mark with the nounwind attribute. bool IsNoThrow(ARCInstKind Class); /// Test whether the given instruction can autorelease any pointer or cause an /// autoreleasepool pop. bool CanInterruptRV(ARCInstKind Class); /// \brief Determine if F is one of the special known Functions. If it isn't, /// return ARCInstKind::CallOrUser. ARCInstKind GetFunctionClass(const Function *F); /// \brief Determine which objc runtime call instruction class V belongs to. /// /// This is similar to GetARCInstKind except that it only detects objc /// runtime calls. This allows it to be faster. /// static inline ARCInstKind GetBasicARCInstKind(const Value *V) { if (const CallInst *CI = dyn_cast<CallInst>(V)) { if (const Function *F = CI->getCalledFunction()) return GetFunctionClass(F); // Otherwise, be conservative. return ARCInstKind::CallOrUser; } // Otherwise, be conservative. return isa<InvokeInst>(V) ? ARCInstKind::CallOrUser : ARCInstKind::User; } /// Map V to its ARCInstKind equivalence class. ARCInstKind GetARCInstKind(const Value *V); /// Returns false if conservatively we can prove that any instruction mapped to /// this kind can not decrement ref counts. Returns true otherwise. bool CanDecrementRefCount(ARCInstKind Kind); } // end namespace objcarc } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopInterchange.cpp
//===- LoopInterchange.cpp - Loop interchange pass------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This Pass handles loop interchange transform. // This pass interchanges loops to provide a more cache-friendly memory access // patterns. // //===----------------------------------------------------------------------===// #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasSetTracker.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/BlockFrequencyInfo.h" #include "llvm/Analysis/CodeMetrics.h" #include "llvm/Analysis/DependenceAnalysis.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopIterator.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" using namespace llvm; #define DEBUG_TYPE "loop-interchange" namespace { typedef SmallVector<Loop *, 8> LoopVector; // TODO: Check if we can use a sparse matrix here. typedef std::vector<std::vector<char>> CharMatrix; // Maximum number of dependencies that can be handled in the dependency matrix. static const unsigned MaxMemInstrCount = 100; // Maximum loop depth supported. static const unsigned MaxLoopNestDepth = 10; struct LoopInterchange; #ifdef DUMP_DEP_MATRICIES void printDepMatrix(CharMatrix &DepMatrix) { for (auto I = DepMatrix.begin(), E = DepMatrix.end(); I != E; ++I) { std::vector<char> Vec = *I; for (auto II = Vec.begin(), EE = Vec.end(); II != EE; ++II) DEBUG(dbgs() << *II << " "); DEBUG(dbgs() << "\n"); } } #endif static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level, Loop *L, DependenceAnalysis *DA) { typedef SmallVector<Value *, 16> ValueVector; ValueVector MemInstr; if (Level > MaxLoopNestDepth) { DEBUG(dbgs() << "Cannot handle loops of depth greater than " << MaxLoopNestDepth << "\n"); return false; } // For each block. for (Loop::block_iterator BB = L->block_begin(), BE = L->block_end(); BB != BE; ++BB) { // Scan the BB and collect legal loads and stores. for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); I != E; ++I) { Instruction *Ins = dyn_cast<Instruction>(I); if (!Ins) return false; LoadInst *Ld = dyn_cast<LoadInst>(I); StoreInst *St = dyn_cast<StoreInst>(I); if (!St && !Ld) continue; if (Ld && !Ld->isSimple()) return false; if (St && !St->isSimple()) return false; MemInstr.push_back(I); } } DEBUG(dbgs() << "Found " << MemInstr.size() << " Loads and Stores to analyze\n"); ValueVector::iterator I, IE, J, JE; for (I = MemInstr.begin(), IE = MemInstr.end(); I != IE; ++I) { for (J = I, JE = MemInstr.end(); J != JE; ++J) { std::vector<char> Dep; Instruction *Src = dyn_cast<Instruction>(*I); Instruction *Des = dyn_cast<Instruction>(*J); if (Src == Des) continue; if (isa<LoadInst>(Src) && isa<LoadInst>(Des)) continue; if (auto D = DA->depends(Src, Des, true)) { DEBUG(dbgs() << "Found Dependency between Src=" << Src << " Des=" << Des << "\n"); if (D->isFlow()) { // TODO: Handle Flow dependence.Check if it is sufficient to populate // the Dependence Matrix with the direction reversed. DEBUG(dbgs() << "Flow dependence not handled"); return false; } if (D->isAnti()) { DEBUG(dbgs() << "Found Anti dependence \n"); unsigned Levels = D->getLevels(); char Direction; for (unsigned II = 1; II <= Levels; ++II) { const SCEV *Distance = D->getDistance(II); const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance); if (SCEVConst) { const ConstantInt *CI = SCEVConst->getValue(); if (CI->isNegative()) Direction = '<'; else if (CI->isZero()) Direction = '='; else Direction = '>'; Dep.push_back(Direction); } else if (D->isScalar(II)) { Direction = 'S'; Dep.push_back(Direction); } else { unsigned Dir = D->getDirection(II); if (Dir == Dependence::DVEntry::LT || Dir == Dependence::DVEntry::LE) Direction = '<'; else if (Dir == Dependence::DVEntry::GT || Dir == Dependence::DVEntry::GE) Direction = '>'; else if (Dir == Dependence::DVEntry::EQ) Direction = '='; else Direction = '*'; Dep.push_back(Direction); } } while (Dep.size() != Level) { Dep.push_back('I'); } DepMatrix.push_back(Dep); if (DepMatrix.size() > MaxMemInstrCount) { DEBUG(dbgs() << "Cannot handle more than " << MaxMemInstrCount << " dependencies inside loop\n"); return false; } } } } } // We don't have a DepMatrix to check legality return false if (DepMatrix.size() == 0) return false; return true; } // A loop is moved from index 'from' to an index 'to'. Update the Dependence // matrix by exchanging the two columns. static void interChangeDepedencies(CharMatrix &DepMatrix, unsigned FromIndx, unsigned ToIndx) { unsigned numRows = DepMatrix.size(); for (unsigned i = 0; i < numRows; ++i) { char TmpVal = DepMatrix[i][ToIndx]; DepMatrix[i][ToIndx] = DepMatrix[i][FromIndx]; DepMatrix[i][FromIndx] = TmpVal; } } // Checks if outermost non '=','S'or'I' dependence in the dependence matrix is // '>' static bool isOuterMostDepPositive(CharMatrix &DepMatrix, unsigned Row, unsigned Column) { for (unsigned i = 0; i <= Column; ++i) { if (DepMatrix[Row][i] == '<') return false; if (DepMatrix[Row][i] == '>') return true; } // All dependencies were '=','S' or 'I' return false; } // Checks if no dependence exist in the dependency matrix in Row before Column. static bool containsNoDependence(CharMatrix &DepMatrix, unsigned Row, unsigned Column) { for (unsigned i = 0; i < Column; ++i) { if (DepMatrix[Row][i] != '=' || DepMatrix[Row][i] != 'S' || DepMatrix[Row][i] != 'I') return false; } return true; } static bool validDepInterchange(CharMatrix &DepMatrix, unsigned Row, unsigned OuterLoopId, char InnerDep, char OuterDep) { if (isOuterMostDepPositive(DepMatrix, Row, OuterLoopId)) return false; if (InnerDep == OuterDep) return true; // It is legal to interchange if and only if after interchange no row has a // '>' direction as the leftmost non-'='. if (InnerDep == '=' || InnerDep == 'S' || InnerDep == 'I') return true; if (InnerDep == '<') return true; if (InnerDep == '>') { // If OuterLoopId represents outermost loop then interchanging will make the // 1st dependency as '>' if (OuterLoopId == 0) return false; // If all dependencies before OuterloopId are '=','S'or 'I'. Then // interchanging will result in this row having an outermost non '=' // dependency of '>' if (!containsNoDependence(DepMatrix, Row, OuterLoopId)) return true; } return false; } // Checks if it is legal to interchange 2 loops. // [Theorem] A permutation of the loops in a perfect nest is legal if and only // if // the direction matrix, after the same permutation is applied to its columns, // has no ">" direction as the leftmost non-"=" direction in any row. static bool isLegalToInterChangeLoops(CharMatrix &DepMatrix, unsigned InnerLoopId, unsigned OuterLoopId) { unsigned NumRows = DepMatrix.size(); // For each row check if it is valid to interchange. for (unsigned Row = 0; Row < NumRows; ++Row) { char InnerDep = DepMatrix[Row][InnerLoopId]; char OuterDep = DepMatrix[Row][OuterLoopId]; if (InnerDep == '*' || OuterDep == '*') return false; else if (!validDepInterchange(DepMatrix, Row, OuterLoopId, InnerDep, OuterDep)) return false; } return true; } static void populateWorklist(Loop &L, SmallVector<LoopVector, 8> &V) { DEBUG(dbgs() << "Calling populateWorklist called\n"); LoopVector LoopList; Loop *CurrentLoop = &L; const std::vector<Loop *> *Vec = &CurrentLoop->getSubLoops(); while (!Vec->empty()) { // The current loop has multiple subloops in it hence it is not tightly // nested. // Discard all loops above it added into Worklist. if (Vec->size() != 1) { LoopList.clear(); return; } LoopList.push_back(CurrentLoop); CurrentLoop = Vec->front(); Vec = &CurrentLoop->getSubLoops(); } LoopList.push_back(CurrentLoop); V.push_back(std::move(LoopList)); } static PHINode *getInductionVariable(Loop *L, ScalarEvolution *SE) { PHINode *InnerIndexVar = L->getCanonicalInductionVariable(); if (InnerIndexVar) return InnerIndexVar; if (L->getLoopLatch() == nullptr || L->getLoopPredecessor() == nullptr) return nullptr; for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { PHINode *PhiVar = cast<PHINode>(I); Type *PhiTy = PhiVar->getType(); if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() && !PhiTy->isPointerTy()) return nullptr; const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(PhiVar)); if (!AddRec || !AddRec->isAffine()) continue; const SCEV *Step = AddRec->getStepRecurrence(*SE); const SCEVConstant *C = dyn_cast<SCEVConstant>(Step); if (!C) continue; // Found the induction variable. // FIXME: Handle loops with more than one induction variable. Note that, // currently, legality makes sure we have only one induction variable. return PhiVar; } return nullptr; } /// LoopInterchangeLegality checks if it is legal to interchange the loop. class LoopInterchangeLegality { public: LoopInterchangeLegality(Loop *Outer, Loop *Inner, ScalarEvolution *SE, LoopInterchange *Pass) : OuterLoop(Outer), InnerLoop(Inner), SE(SE), CurrentPass(Pass), InnerLoopHasReduction(false) {} /// Check if the loops can be interchanged. bool canInterchangeLoops(unsigned InnerLoopId, unsigned OuterLoopId, CharMatrix &DepMatrix); /// Check if the loop structure is understood. We do not handle triangular /// loops for now. bool isLoopStructureUnderstood(PHINode *InnerInductionVar); bool currentLimitations(); bool hasInnerLoopReduction() { return InnerLoopHasReduction; } private: bool tightlyNested(Loop *Outer, Loop *Inner); bool containsUnsafeInstructionsInHeader(BasicBlock *BB); bool areAllUsesReductions(Instruction *Ins, Loop *L); bool containsUnsafeInstructionsInLatch(BasicBlock *BB); bool findInductionAndReductions(Loop *L, SmallVector<PHINode *, 8> &Inductions, SmallVector<PHINode *, 8> &Reductions); Loop *OuterLoop; Loop *InnerLoop; /// Scev analysis. ScalarEvolution *SE; LoopInterchange *CurrentPass; bool InnerLoopHasReduction; }; /// LoopInterchangeProfitability checks if it is profitable to interchange the /// loop. class LoopInterchangeProfitability { public: LoopInterchangeProfitability(Loop *Outer, Loop *Inner, ScalarEvolution *SE) : OuterLoop(Outer), InnerLoop(Inner), SE(SE) {} /// Check if the loop interchange is profitable bool isProfitable(unsigned InnerLoopId, unsigned OuterLoopId, CharMatrix &DepMatrix); private: int getInstrOrderCost(); Loop *OuterLoop; Loop *InnerLoop; /// Scev analysis. ScalarEvolution *SE; }; /// LoopInterchangeTransform interchanges the loop class LoopInterchangeTransform { public: LoopInterchangeTransform(Loop *Outer, Loop *Inner, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, LoopInterchange *Pass, BasicBlock *LoopNestExit, bool InnerLoopContainsReductions) : OuterLoop(Outer), InnerLoop(Inner), SE(SE), LI(LI), DT(DT), LoopExit(LoopNestExit), InnerLoopHasReduction(InnerLoopContainsReductions) {} /// Interchange OuterLoop and InnerLoop. bool transform(); void restructureLoops(Loop *InnerLoop, Loop *OuterLoop); void removeChildLoop(Loop *OuterLoop, Loop *InnerLoop); private: void splitInnerLoopLatch(Instruction *); void splitOuterLoopLatch(); void splitInnerLoopHeader(); bool adjustLoopLinks(); void adjustLoopPreheaders(); void adjustOuterLoopPreheader(); void adjustInnerLoopPreheader(); bool adjustLoopBranches(); void updateIncomingBlock(BasicBlock *CurrBlock, BasicBlock *OldPred, BasicBlock *NewPred); Loop *OuterLoop; Loop *InnerLoop; /// Scev analysis. ScalarEvolution *SE; LoopInfo *LI; DominatorTree *DT; BasicBlock *LoopExit; bool InnerLoopHasReduction; }; // Main LoopInterchange Pass struct LoopInterchange : public FunctionPass { static char ID; ScalarEvolution *SE; LoopInfo *LI; DependenceAnalysis *DA; DominatorTree *DT; LoopInterchange() : FunctionPass(ID), SE(nullptr), LI(nullptr), DA(nullptr), DT(nullptr) { initializeLoopInterchangePass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<ScalarEvolution>(); AU.addRequired<AliasAnalysis>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<DependenceAnalysis>(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); } bool runOnFunction(Function &F) override { SE = &getAnalysis<ScalarEvolution>(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); DA = &getAnalysis<DependenceAnalysis>(); auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DT = DTWP ? &DTWP->getDomTree() : nullptr; // Build up a worklist of loop pairs to analyze. SmallVector<LoopVector, 8> Worklist; for (Loop *L : *LI) populateWorklist(*L, Worklist); DEBUG(dbgs() << "Worklist size = " << Worklist.size() << "\n"); bool Changed = true; while (!Worklist.empty()) { LoopVector LoopList = Worklist.pop_back_val(); Changed = processLoopList(LoopList, F); } return Changed; } bool isComputableLoopNest(LoopVector LoopList) { for (auto I = LoopList.begin(), E = LoopList.end(); I != E; ++I) { Loop *L = *I; const SCEV *ExitCountOuter = SE->getBackedgeTakenCount(L); if (ExitCountOuter == SE->getCouldNotCompute()) { DEBUG(dbgs() << "Couldn't compute Backedge count\n"); return false; } if (L->getNumBackEdges() != 1) { DEBUG(dbgs() << "NumBackEdges is not equal to 1\n"); return false; } if (!L->getExitingBlock()) { DEBUG(dbgs() << "Loop Doesn't have unique exit block\n"); return false; } } return true; } unsigned selectLoopForInterchange(LoopVector LoopList) { // TODO: Add a better heuristic to select the loop to be interchanged based // on the dependece matrix. Currently we select the innermost loop. return LoopList.size() - 1; } bool processLoopList(LoopVector LoopList, Function &F) { bool Changed = false; CharMatrix DependencyMatrix; if (LoopList.size() < 2) { DEBUG(dbgs() << "Loop doesn't contain minimum nesting level.\n"); return false; } if (!isComputableLoopNest(LoopList)) { DEBUG(dbgs() << "Not vaild loop candidate for interchange\n"); return false; } Loop *OuterMostLoop = *(LoopList.begin()); DEBUG(dbgs() << "Processing LoopList of size = " << LoopList.size() << "\n"); if (!populateDependencyMatrix(DependencyMatrix, LoopList.size(), OuterMostLoop, DA)) { DEBUG(dbgs() << "Populating Dependency matrix failed\n"); return false; } #ifdef DUMP_DEP_MATRICIES DEBUG(dbgs() << "Dependence before inter change \n"); printDepMatrix(DependencyMatrix); #endif BasicBlock *OuterMostLoopLatch = OuterMostLoop->getLoopLatch(); BranchInst *OuterMostLoopLatchBI = dyn_cast<BranchInst>(OuterMostLoopLatch->getTerminator()); if (!OuterMostLoopLatchBI) return false; // Since we currently do not handle LCSSA PHI's any failure in loop // condition will now branch to LoopNestExit. // TODO: This should be removed once we handle LCSSA PHI nodes. // Get the Outermost loop exit. BasicBlock *LoopNestExit; if (OuterMostLoopLatchBI->getSuccessor(0) == OuterMostLoop->getHeader()) LoopNestExit = OuterMostLoopLatchBI->getSuccessor(1); else LoopNestExit = OuterMostLoopLatchBI->getSuccessor(0); if (isa<PHINode>(LoopNestExit->begin())) { DEBUG(dbgs() << "PHI Nodes in loop nest exit is not handled for now " "since on failure all loops branch to loop nest exit.\n"); return false; } unsigned SelecLoopId = selectLoopForInterchange(LoopList); // Move the selected loop outwards to the best posible position. for (unsigned i = SelecLoopId; i > 0; i--) { bool Interchanged = processLoop(LoopList, i, i - 1, LoopNestExit, DependencyMatrix); if (!Interchanged) return Changed; // Loops interchanged reflect the same in LoopList std::swap(LoopList[i - 1], LoopList[i]); // Update the DependencyMatrix interChangeDepedencies(DependencyMatrix, i, i - 1); DT->recalculate(F); #ifdef DUMP_DEP_MATRICIES DEBUG(dbgs() << "Dependence after inter change \n"); printDepMatrix(DependencyMatrix); #endif Changed |= Interchanged; } return Changed; } bool processLoop(LoopVector LoopList, unsigned InnerLoopId, unsigned OuterLoopId, BasicBlock *LoopNestExit, std::vector<std::vector<char>> &DependencyMatrix) { DEBUG(dbgs() << "Processing Innder Loop Id = " << InnerLoopId << " and OuterLoopId = " << OuterLoopId << "\n"); Loop *InnerLoop = LoopList[InnerLoopId]; Loop *OuterLoop = LoopList[OuterLoopId]; LoopInterchangeLegality LIL(OuterLoop, InnerLoop, SE, this); if (!LIL.canInterchangeLoops(InnerLoopId, OuterLoopId, DependencyMatrix)) { DEBUG(dbgs() << "Not interchanging Loops. Cannot prove legality\n"); return false; } DEBUG(dbgs() << "Loops are legal to interchange\n"); LoopInterchangeProfitability LIP(OuterLoop, InnerLoop, SE); if (!LIP.isProfitable(InnerLoopId, OuterLoopId, DependencyMatrix)) { DEBUG(dbgs() << "Interchanging Loops not profitable\n"); return false; } LoopInterchangeTransform LIT(OuterLoop, InnerLoop, SE, LI, DT, this, LoopNestExit, LIL.hasInnerLoopReduction()); LIT.transform(); DEBUG(dbgs() << "Loops interchanged\n"); return true; } }; } // end of namespace bool LoopInterchangeLegality::areAllUsesReductions(Instruction *Ins, Loop *L) { return !std::any_of(Ins->user_begin(), Ins->user_end(), [=](User *U) -> bool { PHINode *UserIns = dyn_cast<PHINode>(U); RecurrenceDescriptor RD; return !UserIns || !RecurrenceDescriptor::isReductionPHI(UserIns, L, RD); }); } bool LoopInterchangeLegality::containsUnsafeInstructionsInHeader( BasicBlock *BB) { for (auto I = BB->begin(), E = BB->end(); I != E; ++I) { // Load corresponding to reduction PHI's are safe while concluding if // tightly nested. if (LoadInst *L = dyn_cast<LoadInst>(I)) { if (!areAllUsesReductions(L, InnerLoop)) return true; } else if (I->mayHaveSideEffects() || I->mayReadFromMemory()) return true; } return false; } bool LoopInterchangeLegality::containsUnsafeInstructionsInLatch( BasicBlock *BB) { for (auto I = BB->begin(), E = BB->end(); I != E; ++I) { // Stores corresponding to reductions are safe while concluding if tightly // nested. if (StoreInst *L = dyn_cast<StoreInst>(I)) { PHINode *PHI = dyn_cast<PHINode>(L->getOperand(0)); if (!PHI) return true; } else if (I->mayHaveSideEffects() || I->mayReadFromMemory()) return true; } return false; } bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) { BasicBlock *OuterLoopHeader = OuterLoop->getHeader(); BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch(); DEBUG(dbgs() << "Checking if Loops are Tightly Nested\n"); // A perfectly nested loop will not have any branch in between the outer and // inner block i.e. outer header will branch to either inner preheader and // outerloop latch. BranchInst *outerLoopHeaderBI = dyn_cast<BranchInst>(OuterLoopHeader->getTerminator()); if (!outerLoopHeaderBI) return false; unsigned num = outerLoopHeaderBI->getNumSuccessors(); for (unsigned i = 0; i < num; i++) { if (outerLoopHeaderBI->getSuccessor(i) != InnerLoopPreHeader && outerLoopHeaderBI->getSuccessor(i) != OuterLoopLatch) return false; } DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch \n"); // We do not have any basic block in between now make sure the outer header // and outer loop latch doesnt contain any unsafe instructions. if (containsUnsafeInstructionsInHeader(OuterLoopHeader) || containsUnsafeInstructionsInLatch(OuterLoopLatch)) return false; DEBUG(dbgs() << "Loops are perfectly nested \n"); // We have a perfect loop nest. return true; } bool LoopInterchangeLegality::isLoopStructureUnderstood( PHINode *InnerInduction) { unsigned Num = InnerInduction->getNumOperands(); BasicBlock *InnerLoopPreheader = InnerLoop->getLoopPreheader(); for (unsigned i = 0; i < Num; ++i) { Value *Val = InnerInduction->getOperand(i); if (isa<Constant>(Val)) continue; Instruction *I = dyn_cast<Instruction>(Val); if (!I) return false; // TODO: Handle triangular loops. // e.g. for(int i=0;i<N;i++) // for(int j=i;j<N;j++) unsigned IncomBlockIndx = PHINode::getIncomingValueNumForOperand(i); if (InnerInduction->getIncomingBlock(IncomBlockIndx) == InnerLoopPreheader && !OuterLoop->isLoopInvariant(I)) { return false; } } return true; } bool LoopInterchangeLegality::findInductionAndReductions( Loop *L, SmallVector<PHINode *, 8> &Inductions, SmallVector<PHINode *, 8> &Reductions) { if (!L->getLoopLatch() || !L->getLoopPredecessor()) return false; for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { RecurrenceDescriptor RD; PHINode *PHI = cast<PHINode>(I); ConstantInt *StepValue = nullptr; if (isInductionPHI(PHI, SE, StepValue)) Inductions.push_back(PHI); else if (RecurrenceDescriptor::isReductionPHI(PHI, L, RD)) Reductions.push_back(PHI); else { DEBUG( dbgs() << "Failed to recognize PHI as an induction or reduction.\n"); return false; } } return true; } static bool containsSafePHI(BasicBlock *Block, bool isOuterLoopExitBlock) { for (auto I = Block->begin(); isa<PHINode>(I); ++I) { PHINode *PHI = cast<PHINode>(I); // Reduction lcssa phi will have only 1 incoming block that from loop latch. if (PHI->getNumIncomingValues() > 1) return false; Instruction *Ins = dyn_cast<Instruction>(PHI->getIncomingValue(0)); if (!Ins) return false; // Incoming value for lcssa phi's in outer loop exit can only be inner loop // exits lcssa phi else it would not be tightly nested. if (!isa<PHINode>(Ins) && isOuterLoopExitBlock) return false; } return true; } static BasicBlock *getLoopLatchExitBlock(BasicBlock *LatchBlock, BasicBlock *LoopHeader) { if (BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator())) { unsigned Num = BI->getNumSuccessors(); assert(Num == 2); for (unsigned i = 0; i < Num; ++i) { if (BI->getSuccessor(i) == LoopHeader) continue; return BI->getSuccessor(i); } } return nullptr; } // This function indicates the current limitations in the transform as a result // of which we do not proceed. bool LoopInterchangeLegality::currentLimitations() { BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); BasicBlock *InnerLoopHeader = InnerLoop->getHeader(); BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch(); BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch(); BasicBlock *OuterLoopHeader = OuterLoop->getHeader(); PHINode *InnerInductionVar; SmallVector<PHINode *, 8> Inductions; SmallVector<PHINode *, 8> Reductions; if (!findInductionAndReductions(InnerLoop, Inductions, Reductions)) return true; // TODO: Currently we handle only loops with 1 induction variable. if (Inductions.size() != 1) { DEBUG(dbgs() << "We currently only support loops with 1 induction variable." << "Failed to interchange due to current limitation\n"); return true; } if (Reductions.size() > 0) InnerLoopHasReduction = true; InnerInductionVar = Inductions.pop_back_val(); Reductions.clear(); if (!findInductionAndReductions(OuterLoop, Inductions, Reductions)) return true; // Outer loop cannot have reduction because then loops will not be tightly // nested. if (!Reductions.empty()) return true; // TODO: Currently we handle only loops with 1 induction variable. if (Inductions.size() != 1) return true; // TODO: Triangular loops are not handled for now. if (!isLoopStructureUnderstood(InnerInductionVar)) { DEBUG(dbgs() << "Loop structure not understood by pass\n"); return true; } // TODO: We only handle LCSSA PHI's corresponding to reduction for now. BasicBlock *LoopExitBlock = getLoopLatchExitBlock(OuterLoopLatch, OuterLoopHeader); if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, true)) return true; LoopExitBlock = getLoopLatchExitBlock(InnerLoopLatch, InnerLoopHeader); if (!LoopExitBlock || !containsSafePHI(LoopExitBlock, false)) return true; // TODO: Current limitation: Since we split the inner loop latch at the point // were induction variable is incremented (induction.next); We cannot have // more than 1 user of induction.next since it would result in broken code // after split. // e.g. // for(i=0;i<N;i++) { // for(j = 0;j<M;j++) { // A[j+1][i+2] = A[j][i]+k; // } // } bool FoundInduction = false; Instruction *InnerIndexVarInc = nullptr; if (InnerInductionVar->getIncomingBlock(0) == InnerLoopPreHeader) InnerIndexVarInc = dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(1)); else InnerIndexVarInc = dyn_cast<Instruction>(InnerInductionVar->getIncomingValue(0)); if (!InnerIndexVarInc) return true; // Since we split the inner loop latch on this induction variable. Make sure // we do not have any instruction between the induction variable and branch // instruction. for (auto I = InnerLoopLatch->rbegin(), E = InnerLoopLatch->rend(); I != E && !FoundInduction; ++I) { if (isa<BranchInst>(*I) || isa<CmpInst>(*I) || isa<TruncInst>(*I)) continue; const Instruction &Ins = *I; // We found an instruction. If this is not induction variable then it is not // safe to split this loop latch. if (!Ins.isIdenticalTo(InnerIndexVarInc)) return true; else FoundInduction = true; } // The loop latch ended and we didnt find the induction variable return as // current limitation. if (!FoundInduction) return true; return false; } bool LoopInterchangeLegality::canInterchangeLoops(unsigned InnerLoopId, unsigned OuterLoopId, CharMatrix &DepMatrix) { if (!isLegalToInterChangeLoops(DepMatrix, InnerLoopId, OuterLoopId)) { DEBUG(dbgs() << "Failed interchange InnerLoopId = " << InnerLoopId << "and OuterLoopId = " << OuterLoopId << "due to dependence\n"); return false; } // Create unique Preheaders if we already do not have one. BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader(); BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); // Create a unique outer preheader - // 1) If OuterLoop preheader is not present. // 2) If OuterLoop Preheader is same as OuterLoop Header // 3) If OuterLoop Preheader is same as Header of the previous loop. // 4) If OuterLoop Preheader is Entry node. if (!OuterLoopPreHeader || OuterLoopPreHeader == OuterLoop->getHeader() || isa<PHINode>(OuterLoopPreHeader->begin()) || !OuterLoopPreHeader->getUniquePredecessor()) { OuterLoopPreHeader = InsertPreheaderForLoop(OuterLoop, CurrentPass); } if (!InnerLoopPreHeader || InnerLoopPreHeader == InnerLoop->getHeader() || InnerLoopPreHeader == OuterLoop->getHeader()) { InnerLoopPreHeader = InsertPreheaderForLoop(InnerLoop, CurrentPass); } // TODO: The loops could not be interchanged due to current limitations in the // transform module. if (currentLimitations()) { DEBUG(dbgs() << "Not legal because of current transform limitation\n"); return false; } // Check if the loops are tightly nested. if (!tightlyNested(OuterLoop, InnerLoop)) { DEBUG(dbgs() << "Loops not tightly nested\n"); return false; } return true; } int LoopInterchangeProfitability::getInstrOrderCost() { unsigned GoodOrder, BadOrder; BadOrder = GoodOrder = 0; for (auto BI = InnerLoop->block_begin(), BE = InnerLoop->block_end(); BI != BE; ++BI) { for (auto I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I) { const Instruction &Ins = *I; if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&Ins)) { unsigned NumOp = GEP->getNumOperands(); bool FoundInnerInduction = false; bool FoundOuterInduction = false; for (unsigned i = 0; i < NumOp; ++i) { const SCEV *OperandVal = SE->getSCEV(GEP->getOperand(i)); const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(OperandVal); if (!AR) continue; // If we find the inner induction after an outer induction e.g. // for(int i=0;i<N;i++) // for(int j=0;j<N;j++) // A[i][j] = A[i-1][j-1]+k; // then it is a good order. if (AR->getLoop() == InnerLoop) { // We found an InnerLoop induction after OuterLoop induction. It is // a good order. FoundInnerInduction = true; if (FoundOuterInduction) { GoodOrder++; break; } } // If we find the outer induction after an inner induction e.g. // for(int i=0;i<N;i++) // for(int j=0;j<N;j++) // A[j][i] = A[j-1][i-1]+k; // then it is a bad order. if (AR->getLoop() == OuterLoop) { // We found an OuterLoop induction after InnerLoop induction. It is // a bad order. FoundOuterInduction = true; if (FoundInnerInduction) { BadOrder++; break; } } } } } } return GoodOrder - BadOrder; } static bool isProfitabileForVectorization(unsigned InnerLoopId, unsigned OuterLoopId, CharMatrix &DepMatrix) { // TODO: Improve this heuristic to catch more cases. // If the inner loop is loop independent or doesn't carry any dependency it is // profitable to move this to outer position. unsigned Row = DepMatrix.size(); for (unsigned i = 0; i < Row; ++i) { if (DepMatrix[i][InnerLoopId] != 'S' && DepMatrix[i][InnerLoopId] != 'I') return false; // TODO: We need to improve this heuristic. if (DepMatrix[i][OuterLoopId] != '=') return false; } // If outer loop has dependence and inner loop is loop independent then it is // profitable to interchange to enable parallelism. return true; } bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId, unsigned OuterLoopId, CharMatrix &DepMatrix) { // TODO: Add Better Profitibility checks. // e.g // 1) Construct dependency matrix and move the one with no loop carried dep // inside to enable vectorization. // This is rough cost estimation algorithm. It counts the good and bad order // of induction variables in the instruction and allows reordering if number // of bad orders is more than good. int Cost = 0; Cost += getInstrOrderCost(); DEBUG(dbgs() << "Cost = " << Cost << "\n"); if (Cost < 0) return true; // It is not profitable as per current cache profitibility model. But check if // we can move this loop outside to improve parallelism. bool ImprovesPar = isProfitabileForVectorization(InnerLoopId, OuterLoopId, DepMatrix); return ImprovesPar; } void LoopInterchangeTransform::removeChildLoop(Loop *OuterLoop, Loop *InnerLoop) { for (Loop::iterator I = OuterLoop->begin(), E = OuterLoop->end(); I != E; ++I) { if (*I == InnerLoop) { OuterLoop->removeChildLoop(I); return; } } assert(false && "Couldn't find loop"); } void LoopInterchangeTransform::restructureLoops(Loop *InnerLoop, Loop *OuterLoop) { Loop *OuterLoopParent = OuterLoop->getParentLoop(); if (OuterLoopParent) { // Remove the loop from its parent loop. removeChildLoop(OuterLoopParent, OuterLoop); removeChildLoop(OuterLoop, InnerLoop); OuterLoopParent->addChildLoop(InnerLoop); } else { removeChildLoop(OuterLoop, InnerLoop); LI->changeTopLevelLoop(OuterLoop, InnerLoop); } while (!InnerLoop->empty()) OuterLoop->addChildLoop(InnerLoop->removeChildLoop(InnerLoop->begin())); InnerLoop->addChildLoop(OuterLoop); } bool LoopInterchangeTransform::transform() { DEBUG(dbgs() << "transform\n"); bool Transformed = false; Instruction *InnerIndexVar; if (InnerLoop->getSubLoops().size() == 0) { BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); DEBUG(dbgs() << "Calling Split Inner Loop\n"); PHINode *InductionPHI = getInductionVariable(InnerLoop, SE); if (!InductionPHI) { DEBUG(dbgs() << "Failed to find the point to split loop latch \n"); return false; } if (InductionPHI->getIncomingBlock(0) == InnerLoopPreHeader) InnerIndexVar = dyn_cast<Instruction>(InductionPHI->getIncomingValue(1)); else InnerIndexVar = dyn_cast<Instruction>(InductionPHI->getIncomingValue(0)); // // Split at the place were the induction variable is // incremented/decremented. // TODO: This splitting logic may not work always. Fix this. splitInnerLoopLatch(InnerIndexVar); DEBUG(dbgs() << "splitInnerLoopLatch Done\n"); // Splits the inner loops phi nodes out into a seperate basic block. splitInnerLoopHeader(); DEBUG(dbgs() << "splitInnerLoopHeader Done\n"); } Transformed |= adjustLoopLinks(); if (!Transformed) { DEBUG(dbgs() << "adjustLoopLinks Failed\n"); return false; } restructureLoops(InnerLoop, OuterLoop); return true; } void LoopInterchangeTransform::splitInnerLoopLatch(Instruction *Inc) { BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch(); BasicBlock *InnerLoopLatchPred = InnerLoopLatch; InnerLoopLatch = SplitBlock(InnerLoopLatchPred, Inc, DT, LI); } void LoopInterchangeTransform::splitOuterLoopLatch() { BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch(); BasicBlock *OuterLatchLcssaPhiBlock = OuterLoopLatch; OuterLoopLatch = SplitBlock(OuterLatchLcssaPhiBlock, OuterLoopLatch->getFirstNonPHI(), DT, LI); } void LoopInterchangeTransform::splitInnerLoopHeader() { // Split the inner loop header out. Here make sure that the reduction PHI's // stay in the innerloop body. BasicBlock *InnerLoopHeader = InnerLoop->getHeader(); BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); if (InnerLoopHasReduction) { // FIXME: Check if the induction PHI will always be the first PHI. BasicBlock *New = InnerLoopHeader->splitBasicBlock( ++(InnerLoopHeader->begin()), InnerLoopHeader->getName() + ".split"); if (LI) if (Loop *L = LI->getLoopFor(InnerLoopHeader)) L->addBasicBlockToLoop(New, *LI); // Adjust Reduction PHI's in the block. SmallVector<PHINode *, 8> PHIVec; for (auto I = New->begin(); isa<PHINode>(I); ++I) { PHINode *PHI = dyn_cast<PHINode>(I); Value *V = PHI->getIncomingValueForBlock(InnerLoopPreHeader); PHI->replaceAllUsesWith(V); PHIVec.push_back((PHI)); } for (auto I = PHIVec.begin(), E = PHIVec.end(); I != E; ++I) { PHINode *P = *I; P->eraseFromParent(); } } else { SplitBlock(InnerLoopHeader, InnerLoopHeader->getFirstNonPHI(), DT, LI); } DEBUG(dbgs() << "Output of splitInnerLoopHeader InnerLoopHeaderSucc & " "InnerLoopHeader \n"); } /// \brief Move all instructions except the terminator from FromBB right before /// InsertBefore static void moveBBContents(BasicBlock *FromBB, Instruction *InsertBefore) { auto &ToList = InsertBefore->getParent()->getInstList(); auto &FromList = FromBB->getInstList(); ToList.splice(InsertBefore, FromList, FromList.begin(), FromBB->getTerminator()); } void LoopInterchangeTransform::adjustOuterLoopPreheader() { BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader(); BasicBlock *InnerPreHeader = InnerLoop->getLoopPreheader(); moveBBContents(OuterLoopPreHeader, InnerPreHeader->getTerminator()); } void LoopInterchangeTransform::adjustInnerLoopPreheader() { BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); BasicBlock *OuterHeader = OuterLoop->getHeader(); moveBBContents(InnerLoopPreHeader, OuterHeader->getTerminator()); } void LoopInterchangeTransform::updateIncomingBlock(BasicBlock *CurrBlock, BasicBlock *OldPred, BasicBlock *NewPred) { for (auto I = CurrBlock->begin(); isa<PHINode>(I); ++I) { PHINode *PHI = cast<PHINode>(I); unsigned Num = PHI->getNumIncomingValues(); for (unsigned i = 0; i < Num; ++i) { if (PHI->getIncomingBlock(i) == OldPred) PHI->setIncomingBlock(i, NewPred); } } } bool LoopInterchangeTransform::adjustLoopBranches() { DEBUG(dbgs() << "adjustLoopBranches called\n"); // Adjust the loop preheader BasicBlock *InnerLoopHeader = InnerLoop->getHeader(); BasicBlock *OuterLoopHeader = OuterLoop->getHeader(); BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch(); BasicBlock *OuterLoopLatch = OuterLoop->getLoopLatch(); BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader(); BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); BasicBlock *OuterLoopPredecessor = OuterLoopPreHeader->getUniquePredecessor(); BasicBlock *InnerLoopLatchPredecessor = InnerLoopLatch->getUniquePredecessor(); BasicBlock *InnerLoopLatchSuccessor; BasicBlock *OuterLoopLatchSuccessor; BranchInst *OuterLoopLatchBI = dyn_cast<BranchInst>(OuterLoopLatch->getTerminator()); BranchInst *InnerLoopLatchBI = dyn_cast<BranchInst>(InnerLoopLatch->getTerminator()); BranchInst *OuterLoopHeaderBI = dyn_cast<BranchInst>(OuterLoopHeader->getTerminator()); BranchInst *InnerLoopHeaderBI = dyn_cast<BranchInst>(InnerLoopHeader->getTerminator()); if (!OuterLoopPredecessor || !InnerLoopLatchPredecessor || !OuterLoopLatchBI || !InnerLoopLatchBI || !OuterLoopHeaderBI || !InnerLoopHeaderBI) return false; BranchInst *InnerLoopLatchPredecessorBI = dyn_cast<BranchInst>(InnerLoopLatchPredecessor->getTerminator()); BranchInst *OuterLoopPredecessorBI = dyn_cast<BranchInst>(OuterLoopPredecessor->getTerminator()); if (!OuterLoopPredecessorBI || !InnerLoopLatchPredecessorBI) return false; BasicBlock *InnerLoopHeaderSucessor = InnerLoopHeader->getUniqueSuccessor(); if (!InnerLoopHeaderSucessor) return false; // Adjust Loop Preheader and headers unsigned NumSucc = OuterLoopPredecessorBI->getNumSuccessors(); for (unsigned i = 0; i < NumSucc; ++i) { if (OuterLoopPredecessorBI->getSuccessor(i) == OuterLoopPreHeader) OuterLoopPredecessorBI->setSuccessor(i, InnerLoopPreHeader); } NumSucc = OuterLoopHeaderBI->getNumSuccessors(); for (unsigned i = 0; i < NumSucc; ++i) { if (OuterLoopHeaderBI->getSuccessor(i) == OuterLoopLatch) OuterLoopHeaderBI->setSuccessor(i, LoopExit); else if (OuterLoopHeaderBI->getSuccessor(i) == InnerLoopPreHeader) OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSucessor); } // Adjust reduction PHI's now that the incoming block has changed. updateIncomingBlock(InnerLoopHeaderSucessor, InnerLoopHeader, OuterLoopHeader); BranchInst::Create(OuterLoopPreHeader, InnerLoopHeaderBI); InnerLoopHeaderBI->eraseFromParent(); // -------------Adjust loop latches----------- if (InnerLoopLatchBI->getSuccessor(0) == InnerLoopHeader) InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(1); else InnerLoopLatchSuccessor = InnerLoopLatchBI->getSuccessor(0); NumSucc = InnerLoopLatchPredecessorBI->getNumSuccessors(); for (unsigned i = 0; i < NumSucc; ++i) { if (InnerLoopLatchPredecessorBI->getSuccessor(i) == InnerLoopLatch) InnerLoopLatchPredecessorBI->setSuccessor(i, InnerLoopLatchSuccessor); } // Adjust PHI nodes in InnerLoopLatchSuccessor. Update all uses of PHI with // the value and remove this PHI node from inner loop. SmallVector<PHINode *, 8> LcssaVec; for (auto I = InnerLoopLatchSuccessor->begin(); isa<PHINode>(I); ++I) { PHINode *LcssaPhi = cast<PHINode>(I); LcssaVec.push_back(LcssaPhi); } for (auto I = LcssaVec.begin(), E = LcssaVec.end(); I != E; ++I) { PHINode *P = *I; Value *Incoming = P->getIncomingValueForBlock(InnerLoopLatch); P->replaceAllUsesWith(Incoming); P->eraseFromParent(); } if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopHeader) OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(1); else OuterLoopLatchSuccessor = OuterLoopLatchBI->getSuccessor(0); if (InnerLoopLatchBI->getSuccessor(1) == InnerLoopLatchSuccessor) InnerLoopLatchBI->setSuccessor(1, OuterLoopLatchSuccessor); else InnerLoopLatchBI->setSuccessor(0, OuterLoopLatchSuccessor); updateIncomingBlock(OuterLoopLatchSuccessor, OuterLoopLatch, InnerLoopLatch); if (OuterLoopLatchBI->getSuccessor(0) == OuterLoopLatchSuccessor) { OuterLoopLatchBI->setSuccessor(0, InnerLoopLatch); } else { OuterLoopLatchBI->setSuccessor(1, InnerLoopLatch); } return true; } void LoopInterchangeTransform::adjustLoopPreheaders() { // We have interchanged the preheaders so we need to interchange the data in // the preheader as well. // This is because the content of inner preheader was previously executed // inside the outer loop. BasicBlock *OuterLoopPreHeader = OuterLoop->getLoopPreheader(); BasicBlock *InnerLoopPreHeader = InnerLoop->getLoopPreheader(); BasicBlock *OuterLoopHeader = OuterLoop->getHeader(); BranchInst *InnerTermBI = cast<BranchInst>(InnerLoopPreHeader->getTerminator()); // These instructions should now be executed inside the loop. // Move instruction into a new block after outer header. moveBBContents(InnerLoopPreHeader, OuterLoopHeader->getTerminator()); // These instructions were not executed previously in the loop so move them to // the older inner loop preheader. moveBBContents(OuterLoopPreHeader, InnerTermBI); } bool LoopInterchangeTransform::adjustLoopLinks() { // Adjust all branches in the inner and outer loop. bool Changed = adjustLoopBranches(); if (Changed) adjustLoopPreheaders(); return Changed; } char LoopInterchange::ID = 0; INITIALIZE_PASS_BEGIN(LoopInterchange, "loop-interchange", "Interchanges loops for cache reuse", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(DependenceAnalysis) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(LoopInterchange, "loop-interchange", "Interchanges loops for cache reuse", false, false) Pass *llvm::createLoopInterchangePass() { return new LoopInterchange(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/ConstantProp.cpp
//===- ConstantProp.cpp - Code to perform Simple Constant Propagation -----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements constant propagation and merging: // // Specifically, this: // * Converts instructions like "add int 1, 2" into 3 // // Notice that: // * This pass has a habit of making definitions be dead. It is a good idea // to run a DIE pass sometime after running this pass. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/IR/Constant.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instruction.h" #include "llvm/Pass.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include <set> using namespace llvm; #define DEBUG_TYPE "constprop" STATISTIC(NumInstKilled, "Number of instructions killed"); namespace { struct ConstantPropagation : public FunctionPass { static char ID; // Pass identification, replacement for typeid ConstantPropagation() : FunctionPass(ID) { initializeConstantPropagationPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } }; } char ConstantPropagation::ID = 0; INITIALIZE_PASS_BEGIN(ConstantPropagation, "constprop", "Simple constant propagation", false, false) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(ConstantPropagation, "constprop", "Simple constant propagation", false, false) FunctionPass *llvm::createConstantPropagationPass() { return new ConstantPropagation(); } bool ConstantPropagation::runOnFunction(Function &F) { // Initialize the worklist to all of the instructions ready to process... std::set<Instruction*> WorkList; for(inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i) { WorkList.insert(&*i); } bool Changed = false; const DataLayout &DL = F.getParent()->getDataLayout(); TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); while (!WorkList.empty()) { Instruction *I = *WorkList.begin(); WorkList.erase(WorkList.begin()); // Get an element from the worklist... if (!I->use_empty()) // Don't muck with dead instructions... if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) { // Add all of the users of this instruction to the worklist, they might // be constant propagatable now... for (User *U : I->users()) WorkList.insert(cast<Instruction>(U)); // Replace all of the uses of a variable with uses of the constant. I->replaceAllUsesWith(C); // Remove the dead instruction. WorkList.erase(I); I->eraseFromParent(); // We made a change to the function... Changed = true; ++NumInstKilled; } } return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopInstSimplify.cpp
//===- LoopInstSimplify.cpp - Loop Instruction Simplification Pass --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass performs lightweight instruction simplification on loop bodies. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/Support/Debug.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "loop-instsimplify" STATISTIC(NumSimplified, "Number of redundant instructions simplified"); namespace { class LoopInstSimplify : public LoopPass { public: static char ID; // Pass ID, replacement for typeid LoopInstSimplify() : LoopPass(ID) { initializeLoopInstSimplifyPass(*PassRegistry::getPassRegistry()); } bool runOnLoop(Loop*, LPPassManager&) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequiredID(LoopSimplifyID); AU.addPreservedID(LoopSimplifyID); AU.addPreservedID(LCSSAID); AU.addPreserved<ScalarEvolution>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } }; } char LoopInstSimplify::ID = 0; INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify", "Simplify instructions in loops", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(LoopInstSimplify, "loop-instsimplify", "Simplify instructions in loops", false, false) Pass *llvm::createLoopInstSimplifyPass() { return new LoopInstSimplify(); } bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache( *L->getHeader()->getParent()); SmallVector<BasicBlock*, 8> ExitBlocks; L->getUniqueExitBlocks(ExitBlocks); array_pod_sort(ExitBlocks.begin(), ExitBlocks.end()); SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2; // The bit we are stealing from the pointer represents whether this basic // block is the header of a subloop, in which case we only process its phis. typedef PointerIntPair<BasicBlock*, 1> WorklistItem; SmallVector<WorklistItem, 16> VisitStack; SmallPtrSet<BasicBlock*, 32> Visited; bool Changed = false; bool LocalChanged; do { LocalChanged = false; VisitStack.clear(); Visited.clear(); VisitStack.push_back(WorklistItem(L->getHeader(), false)); while (!VisitStack.empty()) { WorklistItem Item = VisitStack.pop_back_val(); BasicBlock *BB = Item.getPointer(); bool IsSubloopHeader = Item.getInt(); const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); // Simplify instructions in the current basic block. for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { Instruction *I = BI++; // The first time through the loop ToSimplify is empty and we try to // simplify all instructions. On later iterations ToSimplify is not // empty and we only bother simplifying instructions that are in it. if (!ToSimplify->empty() && !ToSimplify->count(I)) continue; // Don't bother simplifying unused instructions. if (!I->use_empty()) { Value *V = SimplifyInstruction(I, DL, TLI, DT, &AC); if (V && LI->replacementPreservesLCSSAForm(I, V)) { // Mark all uses for resimplification next time round the loop. for (User *U : I->users()) Next->insert(cast<Instruction>(U)); I->replaceAllUsesWith(V); LocalChanged = true; ++NumSimplified; } } bool res = RecursivelyDeleteTriviallyDeadInstructions(I, TLI); if (res) { // RecursivelyDeleteTriviallyDeadInstruction can remove // more than one instruction, so simply incrementing the // iterator does not work. When instructions get deleted // re-iterate instead. BI = BB->begin(); BE = BB->end(); LocalChanged |= res; } if (IsSubloopHeader && !isa<PHINode>(I)) break; } // Add all successors to the worklist, except for loop exit blocks and the // bodies of subloops. We visit the headers of loops so that we can process // their phis, but we contract the rest of the subloop body and only follow // edges leading back to the original loop. for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) { BasicBlock *SuccBB = *SI; if (!Visited.insert(SuccBB).second) continue; const Loop *SuccLoop = LI->getLoopFor(SuccBB); if (SuccLoop && SuccLoop->getHeader() == SuccBB && L->contains(SuccLoop)) { VisitStack.push_back(WorklistItem(SuccBB, true)); SmallVector<BasicBlock*, 8> SubLoopExitBlocks; SuccLoop->getExitBlocks(SubLoopExitBlocks); for (unsigned i = 0; i < SubLoopExitBlocks.size(); ++i) { BasicBlock *ExitBB = SubLoopExitBlocks[i]; if (LI->getLoopFor(ExitBB) == L && Visited.insert(ExitBB).second) VisitStack.push_back(WorklistItem(ExitBB, false)); } continue; } bool IsExitBlock = std::binary_search(ExitBlocks.begin(), ExitBlocks.end(), SuccBB); if (IsExitBlock) continue; VisitStack.push_back(WorklistItem(SuccBB, false)); } } // Place the list of instructions to simplify on the next loop iteration // into ToSimplify. std::swap(ToSimplify, Next); Next->clear(); Changed |= LocalChanged; } while (LocalChanged); return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/Reg2MemHLSL.cpp
//===- Reg2MemHLSL.cpp - Convert registers to allocas ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/ADT/Statistic.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include <list> using namespace llvm; #define DEBUG_TYPE "reg2mem_hlsl" STATISTIC(NumRegsDemotedHlsl, "Number of registers demoted"); STATISTIC(NumPhisDemotedHlsl, "Number of phi-nodes demoted"); namespace { struct RegToMemHlsl : public FunctionPass { static char ID; // Pass identification, replacement for typeid RegToMemHlsl() : FunctionPass(ID) { initializeRegToMemHlslPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequiredID(BreakCriticalEdgesID); AU.addPreservedID(BreakCriticalEdgesID); } bool valueEscapes(const Instruction *Inst) const { for (const User *U : Inst->users()) { const Instruction *UI = cast<Instruction>(U); if (isa<PHINode>(UI)) return true; } return false; } bool runOnFunction(Function &F) override; }; /// DemotePHIToStack - This function takes a virtual register computed by a /// PHI node and replaces it with a slot in the stack frame allocated via /// alloca. /// The PHI node is deleted. It returns the pointer to the alloca inserted. /// The difference of HLSL version is the new Alloca will be loaded for each /// use, for case a phi inside loop be used outside the loop. AllocaInst *DemotePHIToStack_HLSL(PHINode *P, Instruction *AllocaPoint) { if (P->use_empty()) { P->eraseFromParent(); return nullptr; } IRBuilder<> AllocaBuilder(P); if (!AllocaPoint) { Function *F = P->getParent()->getParent(); AllocaPoint = F->getEntryBlock().begin(); } AllocaBuilder.SetInsertPoint(AllocaPoint); // Create a stack slot to hold the value. AllocaInst *Slot = AllocaBuilder.CreateAlloca(P->getType(), nullptr, P->getName() + ".reg2mem"); // Insert a load in place of the PHI and replace all uses. BasicBlock::iterator InsertPt = P; for (; isa<PHINode>(InsertPt) || isa<LandingPadInst>(InsertPt); ++InsertPt) /* empty */; // Don't insert before PHI nodes or landingpad instrs. std::vector<Instruction *> WorkList; for (auto U = P->user_begin(); U != P->user_end();) { Instruction *I = cast<Instruction>(*(U++)); WorkList.emplace_back(I); } for (Instruction *I : WorkList) { IRBuilder<> Builder(I); Value *Load = Builder.CreateLoad(Slot); I->replaceUsesOfWith(P, Load); } // Iterate over each operand inserting a store in each predecessor. // This should be done after load inserting because store for phi must be // after all other instructions of the incoming block. for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) { if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) { assert(II->getParent() != P->getIncomingBlock(i) && "Invoke edge not supported yet"); (void)II; } Value *V = P->getIncomingValue(i); // Skip undef if (isa<UndefValue>(V)) continue; new StoreInst(P->getIncomingValue(i), Slot, P->getIncomingBlock(i)->getTerminator()); } // Delete PHI. P->eraseFromParent(); return Slot; } /// DemoteRegToStack - This function takes a virtual register computed by an /// Instruction and replaces it with a slot in the stack frame, allocated via /// alloca. This allows the CFG to be changed around without fear of /// invalidating the SSA information for the value. It returns the pointer to /// the alloca inserted to create a stack slot for I. /// The difference of HLSL version is for I is Alloca, only replace new Alloca /// with old alloca, and HLSL don't have InvokeInst AllocaInst *DemoteRegToStack_HLSL(Instruction &I, bool VolatileLoads, Instruction *AllocaPoint) { if (I.use_empty()) { I.eraseFromParent(); return nullptr; } IRBuilder<> AllocaBuilder(&I); if (!AllocaPoint) { Function *F = I.getParent()->getParent(); AllocaPoint = F->getEntryBlock().begin(); } AllocaBuilder.SetInsertPoint(AllocaPoint); if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { // Create a stack slot to hold the value. AllocaInst *Slot = AllocaBuilder.CreateAlloca( AI->getAllocatedType(), nullptr, I.getName() + ".reg2mem"); I.replaceAllUsesWith(Slot); I.eraseFromParent(); return Slot; } // Create a stack slot to hold the value. AllocaInst *Slot = AllocaBuilder.CreateAlloca(I.getType(), nullptr, I.getName() + ".reg2mem"); ; // Change all of the users of the instruction to read from the stack slot. while (!I.use_empty()) { Instruction *U = cast<Instruction>(I.user_back()); if (PHINode *PN = dyn_cast<PHINode>(U)) { // If this is a PHI node, we can't insert a load of the value before the // use. Instead insert the load in the predecessor block corresponding // to the incoming value. // // Note that if there are multiple edges from a basic block to this PHI // node that we cannot have multiple loads. The problem is that the // resulting PHI node will have multiple values (from each load) coming // in // from the same block, which is illegal SSA form. For this reason, we // keep track of and reuse loads we insert. DenseMap<BasicBlock *, Value *> Loads; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == &I) { Value *&V = Loads[PN->getIncomingBlock(i)]; if (!V) { // Insert the load into the predecessor block V = new LoadInst(Slot, I.getName() + ".reload", VolatileLoads, PN->getIncomingBlock(i)->getTerminator()); } PN->setIncomingValue(i, V); } } else { // If this is a normal instruction, just insert a load. Value *V = new LoadInst(Slot, I.getName() + ".reload", VolatileLoads, U); U->replaceUsesOfWith(&I, V); } } // Insert stores of the computed value into the stack slot. We have to be // careful if I is an invoke instruction, because we can't insert the store // AFTER the terminator instruction. BasicBlock::iterator InsertPt; if (!isa<TerminatorInst>(I)) { InsertPt = &I; ++InsertPt; for (; isa<PHINode>(InsertPt) || isa<LandingPadInst>(InsertPt); ++InsertPt) /* empty */; // Don't insert before PHI nodes or landingpad instrs. } else { InvokeInst &II = cast<InvokeInst>(I); InsertPt = II.getNormalDest()->getFirstInsertionPt(); } new StoreInst(&I, Slot, InsertPt); return Slot; } } // namespace char RegToMemHlsl::ID = 0; INITIALIZE_PASS_BEGIN(RegToMemHlsl, "reg2mem_hlsl", "Demote values with phi-node usage to stack slots", false, false) INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges) INITIALIZE_PASS_END(RegToMemHlsl, "reg2mem_hlsl", "Demote values with phi-node usage to stack slots", false, false) bool RegToMemHlsl::runOnFunction(Function &F) { if (F.isDeclaration()) return false; // Insert all new allocas into entry block. BasicBlock *BBEntry = &F.getEntryBlock(); assert(pred_empty(BBEntry) && "Entry block to function must not have predecessors!"); // Find first non-alloca instruction and create insertion point. This is // safe if block is well-formed: it always have terminator, otherwise // we'll get and assertion. BasicBlock::iterator I = BBEntry->begin(); while (isa<AllocaInst>(I)) ++I; CastInst *AllocaInsertionPoint = new BitCastInst( Constant::getNullValue(Type::getInt32Ty(F.getContext())), Type::getInt32Ty(F.getContext()), "reg2mem_hlsl alloca point", I); // Find the escaped instructions. But don't create stack slots for // allocas in entry block. std::list<Instruction *> WorkList; for (Function::iterator ibb = F.begin(), ibe = F.end(); ibb != ibe; ++ibb) for (BasicBlock::iterator iib = ibb->begin(), iie = ibb->end(); iib != iie; ++iib) { if (!(isa<AllocaInst>(iib) && iib->getParent() == BBEntry) && valueEscapes(iib)) { WorkList.push_front(&*iib); } } // Demote escaped instructions NumRegsDemotedHlsl += WorkList.size(); for (std::list<Instruction *>::iterator ilb = WorkList.begin(), ile = WorkList.end(); ilb != ile; ++ilb) DemoteRegToStack_HLSL(**ilb, false, AllocaInsertionPoint); WorkList.clear(); // Find all phi's for (Function::iterator ibb = F.begin(), ibe = F.end(); ibb != ibe; ++ibb) for (BasicBlock::iterator iib = ibb->begin(), iie = ibb->end(); iib != iie; ++iib) if (isa<PHINode>(iib)) WorkList.push_front(&*iib); // Demote phi nodes NumPhisDemotedHlsl += WorkList.size(); for (std::list<Instruction *>::iterator ilb = WorkList.begin(), ile = WorkList.end(); ilb != ile; ++ilb) DemotePHIToStack_HLSL(cast<PHINode>(*ilb), AllocaInsertionPoint); return true; } // createDemoteRegisterToMemoryHlsl - Provide an entry point to create this // pass. char &llvm::DemoteRegisterToMemoryHlslID = RegToMemHlsl::ID; FunctionPass *llvm::createDemoteRegisterToMemoryHlslPass() { return new RegToMemHlsl(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
//===--- PartiallyInlineLibCalls.cpp - Partially inline libcalls ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass tries to partially inline the fast path of well-known library // functions, such as using square-root instructions for cases where sqrt() // does not need to set errno. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Intrinsics.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" using namespace llvm; #define DEBUG_TYPE "partially-inline-libcalls" namespace { class PartiallyInlineLibCalls : public FunctionPass { public: static char ID; PartiallyInlineLibCalls() : FunctionPass(ID) { initializePartiallyInlineLibCallsPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnFunction(Function &F) override; private: /// Optimize calls to sqrt. bool optimizeSQRT(CallInst *Call, Function *CalledFunc, BasicBlock &CurrBB, Function::iterator &BB); }; char PartiallyInlineLibCalls::ID = 0; } INITIALIZE_PASS(PartiallyInlineLibCalls, "partially-inline-libcalls", "Partially inline calls to library functions", false, false) void PartiallyInlineLibCalls::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); FunctionPass::getAnalysisUsage(AU); } bool PartiallyInlineLibCalls::runOnFunction(Function &F) { bool Changed = false; Function::iterator CurrBB; TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); const TargetTransformInfo *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); for (Function::iterator BB = F.begin(), BE = F.end(); BB != BE;) { CurrBB = BB++; for (BasicBlock::iterator II = CurrBB->begin(), IE = CurrBB->end(); II != IE; ++II) { CallInst *Call = dyn_cast<CallInst>(&*II); Function *CalledFunc; if (!Call || !(CalledFunc = Call->getCalledFunction())) continue; // Skip if function either has local linkage or is not a known library // function. LibFunc::Func LibFunc; if (CalledFunc->hasLocalLinkage() || !CalledFunc->hasName() || !TLI->getLibFunc(CalledFunc->getName(), LibFunc)) continue; switch (LibFunc) { case LibFunc::sqrtf: case LibFunc::sqrt: if (TTI->haveFastSqrt(Call->getType()) && optimizeSQRT(Call, CalledFunc, *CurrBB, BB)) break; continue; default: continue; } Changed = true; break; } } return Changed; } bool PartiallyInlineLibCalls::optimizeSQRT(CallInst *Call, Function *CalledFunc, BasicBlock &CurrBB, Function::iterator &BB) { // There is no need to change the IR, since backend will emit sqrt // instruction if the call has already been marked read-only. if (Call->onlyReadsMemory()) return false; // The call must have the expected result type. if (!Call->getType()->isFloatingPointTy()) return false; // Do the following transformation: // // (before) // dst = sqrt(src) // // (after) // v0 = sqrt_noreadmem(src) # native sqrt instruction. // if (v0 is a NaN) // v1 = sqrt(src) # library call. // dst = phi(v0, v1) // // Move all instructions following Call to newly created block JoinBB. // Create phi and replace all uses. BasicBlock *JoinBB = llvm::SplitBlock(&CurrBB, Call->getNextNode()); IRBuilder<> Builder(JoinBB, JoinBB->begin()); PHINode *Phi = Builder.CreatePHI(Call->getType(), 2); Call->replaceAllUsesWith(Phi); // Create basic block LibCallBB and insert a call to library function sqrt. BasicBlock *LibCallBB = BasicBlock::Create(CurrBB.getContext(), "call.sqrt", CurrBB.getParent(), JoinBB); Builder.SetInsertPoint(LibCallBB); Instruction *LibCall = Call->clone(); Builder.Insert(LibCall); Builder.CreateBr(JoinBB); // Add attribute "readnone" so that backend can use a native sqrt instruction // for this call. Insert a FP compare instruction and a conditional branch // at the end of CurrBB. Call->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone); CurrBB.getTerminator()->eraseFromParent(); Builder.SetInsertPoint(&CurrBB); Value *FCmp = Builder.CreateFCmpOEQ(Call, Call); Builder.CreateCondBr(FCmp, JoinBB, LibCallBB); // Add phi operands. Phi->addIncoming(Call, &CurrBB); Phi->addIncoming(LibCall, LibCallBB); BB = JoinBB; return true; } FunctionPass *llvm::createPartiallyInlineLibCallsPass() { return new PartiallyInlineLibCalls(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilEraseDeadRegion.cpp
//===- DxilEraseDeadRegion.cpp - Heuristically Remove Dead Region ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Overview: // 1. Identify potentially dead regions by finding blocks with multiple // predecessors but no PHIs // 2. Find common dominant ancestor of all the predecessors // 3. Ensure original block post-dominates the ancestor // 4. Ensure no instructions in the region have side effects (not including // original block and ancestor) // 5. Remove all blocks in the region (excluding original block and ancestor) // #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include "dxc/DXIL/DxilMetadataHelper.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilNoops.h" #include <unordered_map> #include <unordered_set> using namespace llvm; using namespace hlsl; // TODO: Could probably move this to a common place at some point. namespace { struct MiniDCE { // Use a set vector because the same value could be added more than once, // which could lead to double free. SetVector<Instruction *> Worklist; void EraseAndProcessOperands(Instruction *TopI); }; void MiniDCE::EraseAndProcessOperands(Instruction *TopI) { Worklist.clear(); for (Value *Op : TopI->operands()) { if (Instruction *OpI = dyn_cast<Instruction>(Op)) Worklist.insert(OpI); } TopI->eraseFromParent(); TopI = nullptr; while (Worklist.size()) { Instruction *I = Worklist.pop_back_val(); if (llvm::isInstructionTriviallyDead(I)) { for (Value *Op : I->operands()) { if (Instruction *OpI = dyn_cast<Instruction>(Op)) Worklist.insert(OpI); } I->eraseFromParent(); } } } } // namespace struct DxilEraseDeadRegion : public FunctionPass { static char ID; DxilEraseDeadRegion() : FunctionPass(ID) { initializeDxilEraseDeadRegionPass(*PassRegistry::getPassRegistry()); } std::unordered_map<BasicBlock *, bool> m_SafeBlocks; MiniDCE m_DCE; // Replace all uses of every instruction in a block with undefs void UndefBasicBlock(BasicBlock *BB) { while (BB->begin() != BB->end()) { Instruction *I = &BB->back(); if (!I->user_empty()) I->replaceAllUsesWith(UndefValue::get(I->getType())); m_DCE.EraseAndProcessOperands(I); } } // Wave Ops are marked as having side effects to avoid moving them across // control flow. But they're safe to remove if unused. bool IsWaveIntrinsic(Instruction *I) { if (CallInst *CI = dyn_cast<CallInst>(I)) { if (hlsl::OP::IsDxilOpFuncCallInst(CI)) { DXIL::OpCode opcode = hlsl::OP::GetDxilOpFuncCallInst(CI); if (hlsl::OP::IsDxilOpWave(opcode)) return true; } } return false; } // This function takes in a basic block, and a *complete* region that this // block is in, and checks whether it's safe to delete this block as part of // this region (i.e. if any values defined in the block are used outside of // the region) and whether it's safe to delete the block in general (side // effects). bool SafeToDeleteBlock(BasicBlock *BB, const std::set<BasicBlock *> &Region) { assert(Region.count( BB)); // Region must be a complete region that contains the block. auto FindIt = m_SafeBlocks.find(BB); if (FindIt != m_SafeBlocks.end()) { return FindIt->second; } // Make sure all insts are safe to delete // (no side effects, etc.) bool ValuesReferencedOutsideOfBlock = false; bool ValuesReferencedOutsideOfRegion = false; for (Instruction &I : *BB) { for (User *U : I.users()) { if (Instruction *UI = dyn_cast<Instruction>(U)) { BasicBlock *UB = UI->getParent(); if (UB != BB) { ValuesReferencedOutsideOfBlock = true; if (!Region.count(UB)) ValuesReferencedOutsideOfRegion = true; } } } // Wave intrinsics are technically read-only and safe to delete if (IsWaveIntrinsic(&I)) continue; if (I.mayHaveSideEffects() && !hlsl::IsNop(&I)) { m_SafeBlocks[BB] = false; return false; } } if (ValuesReferencedOutsideOfRegion) return false; // If the block's defs are entirely referenced within the block itself, // it'll remain safe to delete no matter the region. if (!ValuesReferencedOutsideOfBlock) m_SafeBlocks[BB] = true; return true; } // Find a region of blocks between `Begin` and `End` that are entirely self // contained and produce no values that leave the region. bool FindDeadRegion(DominatorTree *DT, PostDominatorTree *PDT, BasicBlock *Begin, BasicBlock *End, std::set<BasicBlock *> &Region) { std::vector<BasicBlock *> WorkList; auto ProcessSuccessors = [DT, PDT, &WorkList, Begin, End, &Region](BasicBlock *BB) { for (BasicBlock *Succ : successors(BB)) { if (Succ == End) continue; if (Region.count(Succ)) continue; // Make sure it's safely inside the region. if (!DT->properlyDominates(Begin, Succ) || !PDT->properlyDominates(End, Succ)) return false; WorkList.push_back(Succ); Region.insert(Succ); } return true; }; if (!ProcessSuccessors(Begin)) return false; while (WorkList.size()) { BasicBlock *BB = WorkList.back(); WorkList.pop_back(); if (!ProcessSuccessors(BB)) return false; } if (Region.empty()) return false; for (BasicBlock *BB : Region) { // Give up if there are any edges coming from outside of the region // anywhere other than `Begin`. for (auto PredIt = llvm::pred_begin(BB); PredIt != llvm::pred_end(BB); PredIt++) { BasicBlock *PredBB = *PredIt; if (PredBB != Begin && !Region.count(PredBB)) return false; } // Check side effects etc. if (!this->SafeToDeleteBlock(BB, Region)) return false; } return true; } static bool IsMetadataKind(LLVMContext &Ctx, unsigned TargetID, StringRef MDKind) { unsigned ID = 0; if (Ctx.findMDKindID(MDKind, &ID)) return TargetID == ID; return false; } static bool HasUnsafeMetadata(Instruction *I) { SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; I->getAllMetadata(MDs); LLVMContext &Context = I->getContext(); for (auto &p : MDs) { if (p.first == (unsigned)LLVMContext::MD_dbg) continue; if (IsMetadataKind(Context, p.first, DxilMDHelper::kDxilControlFlowHintMDName)) continue; return true; } return false; } bool TrySimplify(DominatorTree *DT, PostDominatorTree *PDT, LoopInfo *LI, BasicBlock *BB) { // Give up if BB has any Phis if (BB->begin() != BB->end() && isa<PHINode>(BB->begin())) return false; std::vector<BasicBlock *> Predecessors(pred_begin(BB), pred_end(BB)); if (Predecessors.size() < 2) return false; // Find the common ancestor of all the predecessors BasicBlock *Common = DT->findNearestCommonDominator(Predecessors[0], Predecessors[1]); if (!Common) return false; for (unsigned i = 2; i < Predecessors.size(); i++) { Common = DT->findNearestCommonDominator(Common, Predecessors[i]); if (!Common) return false; } // If there are any metadata on Common block's branch, give up. if (HasUnsafeMetadata(Common->getTerminator())) return false; if (!DT->properlyDominates(Common, BB)) return false; if (!PDT->properlyDominates(BB, Common)) return false; std::set<BasicBlock *> Region; if (!this->FindDeadRegion(DT, PDT, Common, BB, Region)) return false; // Replace Common's branch with an unconditional branch to BB m_DCE.EraseAndProcessOperands(Common->getTerminator()); BranchInst::Create(BB, Common); DeleteRegion(Region, LI); return true; } // Only call this after all the incoming branches have // been removed. void DeleteRegion(std::set<BasicBlock *> &Region, LoopInfo *LI) { for (BasicBlock *BB : Region) { UndefBasicBlock(BB); // Don't leave any dangling pointers in the LoopInfo for subsequent // iterations. But don't bother to delete the (possibly now empty) Loop // objects, just leave them empty. LI->removeBlock(BB); } // All blocks should be empty now, so walking the set is fine for (BasicBlock *BB : Region) { assert((BB->size() == 0) && "Trying to delete a non-empty basic block!"); BB->eraseFromParent(); } } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<PostDominatorTree>(); AU.addRequired<LoopInfoWrapperPass>(); } // Go through list of all the loops and delete ones that definitely don't // contribute any outputs. Delete the loop if there's no side effects in the // loop, and the loop has no exit values at all. bool TryRemoveSimpleDeadLoops(LoopInfo *LI) { bool Changed = false; SmallVector<Loop *, 4> LoopWorklist; for (Loop *L : *LI) { LoopWorklist.push_back(L); } std::set<BasicBlock *> LoopRegion; while (LoopWorklist.size()) { Loop *L = LoopWorklist.pop_back_val(); // Skip empty loops. if (L->block_begin() == L->block_end()) continue; bool LoopSafeToDelete = true; BasicBlock *Preheader = L->getLoopPreheader(); BasicBlock *ExitBB = L->getExitBlock(); // If there's not a single preheader and exit block, give up. Those cases // can probably be handled by normal region deletion heuristic anyways. if (!Preheader || !ExitBB || !ExitBB->getSinglePredecessor()) { LoopSafeToDelete = false; } // Check if any values of the loop are used outside the loop. if (LoopSafeToDelete) { LoopRegion.clear(); for (BasicBlock *BB : L->getBlocks()) LoopRegion.insert(BB); for (BasicBlock *BB : L->getBlocks()) { if (!this->SafeToDeleteBlock(BB, LoopRegion)) { LoopSafeToDelete = false; break; } } } if (LoopSafeToDelete) { // Modify any phi nodes in the exit block to be incoming from // the preheader instead of the exiting BB. BasicBlock *ExitingBlock = L->getExitingBlock(); for (Instruction &I : *ExitBB) { PHINode *Phi = dyn_cast<PHINode>(&I); if (!Phi) break; assert(Phi->getNumIncomingValues() == 1); for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { if (Phi->getIncomingBlock(i) == ExitingBlock) { Phi->setIncomingBlock(i, Preheader); } } } // Re-branch anything that went to the loop's header to the loop's sole // exit. TerminatorInst *TI = Preheader->getTerminator(); TI->replaceUsesOfWith(L->getHeader(), ExitBB); DeleteRegion(LoopRegion, LI); Changed = true; } else { for (Loop *ChildLoop : *L) LoopWorklist.push_back(ChildLoop); } } return Changed; } bool runOnFunction(Function &F) override { auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); auto *PDT = &getAnalysis<PostDominatorTree>(); auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); bool Changed = false; while (1) { bool LocalChanged = false; LocalChanged |= hlsl::dxilutil::DeleteDeadAllocas(F); LocalChanged |= this->TryRemoveSimpleDeadLoops(LI); for (Function::iterator It = F.begin(), E = F.end(); It != E; It++) { BasicBlock &BB = *It; if (this->TrySimplify(DT, PDT, LI, &BB)) { LocalChanged = true; break; } } Changed |= LocalChanged; if (!LocalChanged) break; } return Changed; } }; char DxilEraseDeadRegion::ID; Pass *llvm::createDxilEraseDeadRegionPass() { return new DxilEraseDeadRegion(); } INITIALIZE_PASS_BEGIN(DxilEraseDeadRegion, "dxil-erase-dead-region", "Dxil Erase Dead Region", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(PostDominatorTree) INITIALIZE_PASS_END(DxilEraseDeadRegion, "dxil-erase-dead-region", "Dxil Erase Dead Region", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
//===- CorrelatedValuePropagation.cpp - Propagate CFG-derived info --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Correlated Value Propagation pass. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LazyValueInfo.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "correlated-value-propagation" STATISTIC(NumPhis, "Number of phis propagated"); STATISTIC(NumSelects, "Number of selects propagated"); STATISTIC(NumMemAccess, "Number of memory access targets propagated"); STATISTIC(NumCmps, "Number of comparisons propagated"); STATISTIC(NumDeadCases, "Number of switch cases removed"); namespace { class CorrelatedValuePropagation : public FunctionPass { LazyValueInfo *LVI; bool processSelect(SelectInst *SI); bool processPHI(PHINode *P); bool processMemAccess(Instruction *I); bool processCmp(CmpInst *C); bool processSwitch(SwitchInst *SI); public: static char ID; CorrelatedValuePropagation(): FunctionPass(ID) { initializeCorrelatedValuePropagationPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LazyValueInfo>(); } }; } char CorrelatedValuePropagation::ID = 0; INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation", "Value Propagation", false, false) INITIALIZE_PASS_DEPENDENCY(LazyValueInfo) INITIALIZE_PASS_END(CorrelatedValuePropagation, "correlated-propagation", "Value Propagation", false, false) // Public interface to the Value Propagation pass Pass *llvm::createCorrelatedValuePropagationPass() { return new CorrelatedValuePropagation(); } bool CorrelatedValuePropagation::processSelect(SelectInst *S) { if (S->getType()->isVectorTy()) return false; if (isa<Constant>(S->getOperand(0))) return false; Constant *C = LVI->getConstant(S->getOperand(0), S->getParent(), S); if (!C) return false; ConstantInt *CI = dyn_cast<ConstantInt>(C); if (!CI) return false; Value *ReplaceWith = S->getOperand(1); Value *Other = S->getOperand(2); if (!CI->isOne()) std::swap(ReplaceWith, Other); if (ReplaceWith == S) ReplaceWith = UndefValue::get(S->getType()); S->replaceAllUsesWith(ReplaceWith); S->eraseFromParent(); ++NumSelects; return true; } bool CorrelatedValuePropagation::processPHI(PHINode *P) { bool Changed = false; BasicBlock *BB = P->getParent(); for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) { Value *Incoming = P->getIncomingValue(i); if (isa<Constant>(Incoming)) continue; Value *V = LVI->getConstantOnEdge(Incoming, P->getIncomingBlock(i), BB, P); // Look if the incoming value is a select with a scalar condition for which // LVI can tells us the value. In that case replace the incoming value with // the appropriate value of the select. This often allows us to remove the // select later. if (!V) { SelectInst *SI = dyn_cast<SelectInst>(Incoming); if (!SI) continue; Value *Condition = SI->getCondition(); if (!Condition->getType()->isVectorTy()) { if (Constant *C = LVI->getConstantOnEdge( Condition, P->getIncomingBlock(i), BB, P)) { if (C->isOneValue()) { V = SI->getTrueValue(); } else if (C->isZeroValue()) { V = SI->getFalseValue(); } // Once LVI learns to handle vector types, we could also add support // for vector type constants that are not all zeroes or all ones. } } // Look if the select has a constant but LVI tells us that the incoming // value can never be that constant. In that case replace the incoming // value with the other value of the select. This often allows us to // remove the select later. if (!V) { Constant *C = dyn_cast<Constant>(SI->getFalseValue()); if (!C) continue; if (LVI->getPredicateOnEdge(ICmpInst::ICMP_EQ, SI, C, P->getIncomingBlock(i), BB, P) != LazyValueInfo::False) continue; V = SI->getTrueValue(); } DEBUG(dbgs() << "CVP: Threading PHI over " << *SI << '\n'); } P->setIncomingValue(i, V); Changed = true; } // FIXME: Provide TLI, DT, AT to SimplifyInstruction. const DataLayout &DL = BB->getModule()->getDataLayout(); if (Value *V = SimplifyInstruction(P, DL)) { P->replaceAllUsesWith(V); P->eraseFromParent(); Changed = true; } if (Changed) ++NumPhis; return Changed; } bool CorrelatedValuePropagation::processMemAccess(Instruction *I) { Value *Pointer = nullptr; if (LoadInst *L = dyn_cast<LoadInst>(I)) Pointer = L->getPointerOperand(); else Pointer = cast<StoreInst>(I)->getPointerOperand(); if (isa<Constant>(Pointer)) return false; Constant *C = LVI->getConstant(Pointer, I->getParent(), I); if (!C) return false; ++NumMemAccess; I->replaceUsesOfWith(Pointer, C); return true; } /// processCmp - If the value of this comparison could be determined locally, /// constant propagation would already have figured it out. Instead, walk /// the predecessors and statically evaluate the comparison based on information /// available on that edge. If a given static evaluation is true on ALL /// incoming edges, then it's true universally and we can simplify the compare. bool CorrelatedValuePropagation::processCmp(CmpInst *C) { Value *Op0 = C->getOperand(0); if (isa<Instruction>(Op0) && cast<Instruction>(Op0)->getParent() == C->getParent()) return false; Constant *Op1 = dyn_cast<Constant>(C->getOperand(1)); if (!Op1) return false; pred_iterator PI = pred_begin(C->getParent()), PE = pred_end(C->getParent()); if (PI == PE) return false; LazyValueInfo::Tristate Result = LVI->getPredicateOnEdge(C->getPredicate(), C->getOperand(0), Op1, *PI, C->getParent(), C); if (Result == LazyValueInfo::Unknown) return false; ++PI; while (PI != PE) { LazyValueInfo::Tristate Res = LVI->getPredicateOnEdge(C->getPredicate(), C->getOperand(0), Op1, *PI, C->getParent(), C); if (Res != Result) return false; ++PI; } ++NumCmps; if (Result == LazyValueInfo::True) C->replaceAllUsesWith(ConstantInt::getTrue(C->getContext())); else C->replaceAllUsesWith(ConstantInt::getFalse(C->getContext())); C->eraseFromParent(); return true; } /// processSwitch - Simplify a switch instruction by removing cases which can /// never fire. If the uselessness of a case could be determined locally then /// constant propagation would already have figured it out. Instead, walk the /// predecessors and statically evaluate cases based on information available /// on that edge. Cases that cannot fire no matter what the incoming edge can /// safely be removed. If a case fires on every incoming edge then the entire /// switch can be removed and replaced with a branch to the case destination. bool CorrelatedValuePropagation::processSwitch(SwitchInst *SI) { Value *Cond = SI->getCondition(); BasicBlock *BB = SI->getParent(); // If the condition was defined in same block as the switch then LazyValueInfo // currently won't say anything useful about it, though in theory it could. if (isa<Instruction>(Cond) && cast<Instruction>(Cond)->getParent() == BB) return false; // If the switch is unreachable then trying to improve it is a waste of time. pred_iterator PB = pred_begin(BB), PE = pred_end(BB); if (PB == PE) return false; // Analyse each switch case in turn. This is done in reverse order so that // removing a case doesn't cause trouble for the iteration. bool Changed = false; for (SwitchInst::CaseIt CI = SI->case_end(), CE = SI->case_begin(); CI-- != CE; ) { ConstantInt *Case = CI.getCaseValue(); // Check to see if the switch condition is equal to/not equal to the case // value on every incoming edge, equal/not equal being the same each time. LazyValueInfo::Tristate State = LazyValueInfo::Unknown; for (pred_iterator PI = PB; PI != PE; ++PI) { // Is the switch condition equal to the case value? LazyValueInfo::Tristate Value = LVI->getPredicateOnEdge(CmpInst::ICMP_EQ, Cond, Case, *PI, BB, SI); // Give up on this case if nothing is known. if (Value == LazyValueInfo::Unknown) { State = LazyValueInfo::Unknown; break; } // If this was the first edge to be visited, record that all other edges // need to give the same result. if (PI == PB) { State = Value; continue; } // If this case is known to fire for some edges and known not to fire for // others then there is nothing we can do - give up. if (Value != State) { State = LazyValueInfo::Unknown; break; } } if (State == LazyValueInfo::False) { // This case never fires - remove it. CI.getCaseSuccessor()->removePredecessor(BB); SI->removeCase(CI); // Does not invalidate the iterator. // The condition can be modified by removePredecessor's PHI simplification // logic. Cond = SI->getCondition(); ++NumDeadCases; Changed = true; } else if (State == LazyValueInfo::True) { // This case always fires. Arrange for the switch to be turned into an // unconditional branch by replacing the switch condition with the case // value. SI->setCondition(Case); NumDeadCases += SI->getNumCases(); Changed = true; break; } } if (Changed) // If the switch has been simplified to the point where it can be replaced // by a branch then do so now. ConstantFoldTerminator(BB); return Changed; } bool CorrelatedValuePropagation::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; LVI = &getAnalysis<LazyValueInfo>(); bool FnChanged = false; for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) { bool BBChanged = false; for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ) { Instruction *II = BI++; switch (II->getOpcode()) { case Instruction::Select: BBChanged |= processSelect(cast<SelectInst>(II)); break; case Instruction::PHI: BBChanged |= processPHI(cast<PHINode>(II)); break; case Instruction::ICmp: case Instruction::FCmp: BBChanged |= processCmp(cast<CmpInst>(II)); break; case Instruction::Load: case Instruction::Store: BBChanged |= processMemAccess(II); break; } } Instruction *Term = FI->getTerminator(); switch (Term->getOpcode()) { case Instruction::Switch: BBChanged |= processSwitch(cast<SwitchInst>(Term)); break; } FnChanged |= BBChanged; } return FnChanged; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/SCCP.cpp
//===- SCCP.cpp - Sparse Conditional Constant Propagation -----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements sparse conditional constant propagation and merging: // // Specifically, this: // * Assumes values are constant unless proven otherwise // * Assumes BasicBlocks are dead unless proven otherwise // * Proves values to be constant, and replaces them with constants // * Proves conditional branches to be unconditional // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/Instructions.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Utils/Local.h" #include <algorithm> using namespace llvm; #define DEBUG_TYPE "sccp" STATISTIC(NumInstRemoved, "Number of instructions removed"); STATISTIC(NumDeadBlocks , "Number of basic blocks unreachable"); STATISTIC(IPNumInstRemoved, "Number of instructions removed by IPSCCP"); STATISTIC(IPNumArgsElimed ,"Number of arguments constant propagated by IPSCCP"); STATISTIC(IPNumGlobalConst, "Number of globals found to be constant by IPSCCP"); namespace { /// LatticeVal class - This class represents the different lattice values that /// an LLVM value may occupy. It is a simple class with value semantics. /// class LatticeVal { enum LatticeValueTy { /// undefined - This LLVM Value has no known value yet. undefined, /// constant - This LLVM Value has a specific constant value. constant, /// forcedconstant - This LLVM Value was thought to be undef until /// ResolvedUndefsIn. This is treated just like 'constant', but if merged /// with another (different) constant, it goes to overdefined, instead of /// asserting. forcedconstant, /// overdefined - This instruction is not known to be constant, and we know /// it has a value. overdefined }; /// Val: This stores the current lattice value along with the Constant* for /// the constant if this is a 'constant' or 'forcedconstant' value. PointerIntPair<Constant *, 2, LatticeValueTy> Val; LatticeValueTy getLatticeValue() const { return Val.getInt(); } public: LatticeVal() : Val(nullptr, undefined) {} bool isUndefined() const { return getLatticeValue() == undefined; } bool isConstant() const { return getLatticeValue() == constant || getLatticeValue() == forcedconstant; } bool isOverdefined() const { return getLatticeValue() == overdefined; } Constant *getConstant() const { assert(isConstant() && "Cannot get the constant of a non-constant!"); return Val.getPointer(); } /// markOverdefined - Return true if this is a change in status. bool markOverdefined() { if (isOverdefined()) return false; Val.setInt(overdefined); return true; } /// markConstant - Return true if this is a change in status. bool markConstant(Constant *V) { if (getLatticeValue() == constant) { // Constant but not forcedconstant. assert(getConstant() == V && "Marking constant with different value"); return false; } if (isUndefined()) { Val.setInt(constant); assert(V && "Marking constant with NULL"); Val.setPointer(V); } else { assert(getLatticeValue() == forcedconstant && "Cannot move from overdefined to constant!"); // Stay at forcedconstant if the constant is the same. if (V == getConstant()) return false; // Otherwise, we go to overdefined. Assumptions made based on the // forced value are possibly wrong. Assuming this is another constant // could expose a contradiction. Val.setInt(overdefined); } return true; } /// getConstantInt - If this is a constant with a ConstantInt value, return it /// otherwise return null. ConstantInt *getConstantInt() const { if (isConstant()) return dyn_cast<ConstantInt>(getConstant()); return nullptr; } void markForcedConstant(Constant *V) { assert(isUndefined() && "Can't force a defined value!"); Val.setInt(forcedconstant); Val.setPointer(V); } }; } // end anonymous namespace. namespace { //===----------------------------------------------------------------------===// // /// SCCPSolver - This class is a general purpose solver for Sparse Conditional /// Constant Propagation. /// class SCCPSolver : public InstVisitor<SCCPSolver> { const DataLayout &DL; const TargetLibraryInfo *TLI; SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable. DenseMap<Value*, LatticeVal> ValueState; // The state each value is in. /// StructValueState - This maintains ValueState for values that have /// StructType, for example for formal arguments, calls, insertelement, etc. /// DenseMap<std::pair<Value*, unsigned>, LatticeVal> StructValueState; /// GlobalValue - If we are tracking any values for the contents of a global /// variable, we keep a mapping from the constant accessor to the element of /// the global, to the currently known value. If the value becomes /// overdefined, it's entry is simply removed from this map. DenseMap<GlobalVariable*, LatticeVal> TrackedGlobals; /// TrackedRetVals - If we are tracking arguments into and the return /// value out of a function, it will have an entry in this map, indicating /// what the known return value for the function is. DenseMap<Function*, LatticeVal> TrackedRetVals; /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions /// that return multiple values. DenseMap<std::pair<Function*, unsigned>, LatticeVal> TrackedMultipleRetVals; /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is /// represented here for efficient lookup. SmallPtrSet<Function*, 16> MRVFunctionsTracked; /// TrackingIncomingArguments - This is the set of functions for whose /// arguments we make optimistic assumptions about and try to prove as /// constants. SmallPtrSet<Function*, 16> TrackingIncomingArguments; /// The reason for two worklists is that overdefined is the lowest state /// on the lattice, and moving things to overdefined as fast as possible /// makes SCCP converge much faster. /// /// By having a separate worklist, we accomplish this because everything /// possibly overdefined will become overdefined at the soonest possible /// point. SmallVector<Value*, 64> OverdefinedInstWorkList; SmallVector<Value*, 64> InstWorkList; SmallVector<BasicBlock*, 64> BBWorkList; // The BasicBlock work list /// KnownFeasibleEdges - Entries in this set are edges which have already had /// PHI nodes retriggered. typedef std::pair<BasicBlock*, BasicBlock*> Edge; DenseSet<Edge> KnownFeasibleEdges; public: SCCPSolver(const DataLayout &DL, const TargetLibraryInfo *tli) : DL(DL), TLI(tli) {} /// MarkBlockExecutable - This method can be used by clients to mark all of /// the blocks that are known to be intrinsically live in the processed unit. /// /// This returns true if the block was not considered live before. bool MarkBlockExecutable(BasicBlock *BB) { if (!BBExecutable.insert(BB).second) return false; DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n'); BBWorkList.push_back(BB); // Add the block to the work list! return true; } /// TrackValueOfGlobalVariable - Clients can use this method to /// inform the SCCPSolver that it should track loads and stores to the /// specified global variable if it can. This is only legal to call if /// performing Interprocedural SCCP. void TrackValueOfGlobalVariable(GlobalVariable *GV) { // We only track the contents of scalar globals. if (GV->getType()->getElementType()->isSingleValueType()) { LatticeVal &IV = TrackedGlobals[GV]; if (!isa<UndefValue>(GV->getInitializer())) IV.markConstant(GV->getInitializer()); } } /// AddTrackedFunction - If the SCCP solver is supposed to track calls into /// and out of the specified function (which cannot have its address taken), /// this method must be called. void AddTrackedFunction(Function *F) { // Add an entry, F -> undef. if (StructType *STy = dyn_cast<StructType>(F->getReturnType())) { MRVFunctionsTracked.insert(F); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) TrackedMultipleRetVals.insert(std::make_pair(std::make_pair(F, i), LatticeVal())); } else TrackedRetVals.insert(std::make_pair(F, LatticeVal())); } void AddArgumentTrackedFunction(Function *F) { TrackingIncomingArguments.insert(F); } /// Solve - Solve for constants and executable blocks. /// void Solve(); /// ResolvedUndefsIn - While solving the dataflow for a function, we assume /// that branches on undef values cannot reach any of their successors. /// However, this is not a safe assumption. After we solve dataflow, this /// method should be use to handle this. If this returns true, the solver /// should be rerun. bool ResolvedUndefsIn(Function &F); bool isBlockExecutable(BasicBlock *BB) const { return BBExecutable.count(BB); } LatticeVal getLatticeValueFor(Value *V) const { DenseMap<Value*, LatticeVal>::const_iterator I = ValueState.find(V); assert(I != ValueState.end() && "V is not in valuemap!"); return I->second; } /// getTrackedRetVals - Get the inferred return value map. /// const DenseMap<Function*, LatticeVal> &getTrackedRetVals() { return TrackedRetVals; } /// getTrackedGlobals - Get and return the set of inferred initializers for /// global variables. const DenseMap<GlobalVariable*, LatticeVal> &getTrackedGlobals() { return TrackedGlobals; } void markOverdefined(Value *V) { assert(!V->getType()->isStructTy() && "Should use other method"); markOverdefined(ValueState[V], V); } /// markAnythingOverdefined - Mark the specified value overdefined. This /// works with both scalars and structs. void markAnythingOverdefined(Value *V) { if (StructType *STy = dyn_cast<StructType>(V->getType())) for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) markOverdefined(getStructValueState(V, i), V); else markOverdefined(V); } private: // markConstant - Make a value be marked as "constant". If the value // is not already a constant, add it to the instruction work list so that // the users of the instruction are updated later. // void markConstant(LatticeVal &IV, Value *V, Constant *C) { if (!IV.markConstant(C)) return; DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n'); if (IV.isOverdefined()) OverdefinedInstWorkList.push_back(V); else InstWorkList.push_back(V); } void markConstant(Value *V, Constant *C) { assert(!V->getType()->isStructTy() && "Should use other method"); markConstant(ValueState[V], V, C); } void markForcedConstant(Value *V, Constant *C) { assert(!V->getType()->isStructTy() && "Should use other method"); LatticeVal &IV = ValueState[V]; IV.markForcedConstant(C); DEBUG(dbgs() << "markForcedConstant: " << *C << ": " << *V << '\n'); if (IV.isOverdefined()) OverdefinedInstWorkList.push_back(V); else InstWorkList.push_back(V); } // markOverdefined - Make a value be marked as "overdefined". If the // value is not already overdefined, add it to the overdefined instruction // work list so that the users of the instruction are updated later. void markOverdefined(LatticeVal &IV, Value *V) { if (!IV.markOverdefined()) return; DEBUG(dbgs() << "markOverdefined: "; if (Function *F = dyn_cast<Function>(V)) dbgs() << "Function '" << F->getName() << "'\n"; else dbgs() << *V << '\n'); // Only instructions go on the work list OverdefinedInstWorkList.push_back(V); } void mergeInValue(LatticeVal &IV, Value *V, LatticeVal MergeWithV) { if (IV.isOverdefined() || MergeWithV.isUndefined()) return; // Noop. if (MergeWithV.isOverdefined()) markOverdefined(IV, V); else if (IV.isUndefined()) markConstant(IV, V, MergeWithV.getConstant()); else if (IV.getConstant() != MergeWithV.getConstant()) markOverdefined(IV, V); } void mergeInValue(Value *V, LatticeVal MergeWithV) { assert(!V->getType()->isStructTy() && "Should use other method"); mergeInValue(ValueState[V], V, MergeWithV); } /// getValueState - Return the LatticeVal object that corresponds to the /// value. This function handles the case when the value hasn't been seen yet /// by properly seeding constants etc. LatticeVal &getValueState(Value *V) { assert(!V->getType()->isStructTy() && "Should use getStructValueState"); std::pair<DenseMap<Value*, LatticeVal>::iterator, bool> I = ValueState.insert(std::make_pair(V, LatticeVal())); LatticeVal &LV = I.first->second; if (!I.second) return LV; // Common case, already in the map. if (Constant *C = dyn_cast<Constant>(V)) { // Undef values remain undefined. if (!isa<UndefValue>(V)) LV.markConstant(C); // Constants are constant } // All others are underdefined by default. return LV; } /// getStructValueState - Return the LatticeVal object that corresponds to the /// value/field pair. This function handles the case when the value hasn't /// been seen yet by properly seeding constants etc. LatticeVal &getStructValueState(Value *V, unsigned i) { assert(V->getType()->isStructTy() && "Should use getValueState"); assert(i < cast<StructType>(V->getType())->getNumElements() && "Invalid element #"); std::pair<DenseMap<std::pair<Value*, unsigned>, LatticeVal>::iterator, bool> I = StructValueState.insert( std::make_pair(std::make_pair(V, i), LatticeVal())); LatticeVal &LV = I.first->second; if (!I.second) return LV; // Common case, already in the map. if (Constant *C = dyn_cast<Constant>(V)) { Constant *Elt = C->getAggregateElement(i); if (!Elt) LV.markOverdefined(); // Unknown sort of constant. else if (isa<UndefValue>(Elt)) ; // Undef values remain undefined. else LV.markConstant(Elt); // Constants are constant. } // All others are underdefined by default. return LV; } /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB /// work list if it is not already executable. void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) { if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second) return; // This edge is already known to be executable! if (!MarkBlockExecutable(Dest)) { // If the destination is already executable, we just made an *edge* // feasible that wasn't before. Revisit the PHI nodes in the block // because they have potentially new operands. DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() << " -> " << Dest->getName() << '\n'); PHINode *PN; for (BasicBlock::iterator I = Dest->begin(); (PN = dyn_cast<PHINode>(I)); ++I) visitPHINode(*PN); } } // getFeasibleSuccessors - Return a vector of booleans to indicate which // successors are reachable from a given terminator instruction. // void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs); // isEdgeFeasible - Return true if the control flow edge from the 'From' basic // block to the 'To' basic block is currently feasible. // bool isEdgeFeasible(BasicBlock *From, BasicBlock *To); // OperandChangedState - This method is invoked on all of the users of an // instruction that was just changed state somehow. Based on this // information, we need to update the specified user of this instruction. // void OperandChangedState(Instruction *I) { if (BBExecutable.count(I->getParent())) // Inst is executable? visit(*I); } private: friend class InstVisitor<SCCPSolver>; // visit implementations - Something changed in this instruction. Either an // operand made a transition, or the instruction is newly executable. Change // the value type of I to reflect these changes if appropriate. void visitPHINode(PHINode &I); // Terminators void visitReturnInst(ReturnInst &I); void visitTerminatorInst(TerminatorInst &TI); void visitCastInst(CastInst &I); void visitSelectInst(SelectInst &I); void visitBinaryOperator(Instruction &I); void visitCmpInst(CmpInst &I); void visitExtractElementInst(ExtractElementInst &I); void visitInsertElementInst(InsertElementInst &I); void visitShuffleVectorInst(ShuffleVectorInst &I); void visitExtractValueInst(ExtractValueInst &EVI); void visitInsertValueInst(InsertValueInst &IVI); void visitLandingPadInst(LandingPadInst &I) { markAnythingOverdefined(&I); } // Instructions that cannot be folded away. void visitStoreInst (StoreInst &I); void visitLoadInst (LoadInst &I); void visitGetElementPtrInst(GetElementPtrInst &I); void visitCallInst (CallInst &I) { visitCallSite(&I); } void visitInvokeInst (InvokeInst &II) { visitCallSite(&II); visitTerminatorInst(II); } void visitCallSite (CallSite CS); void visitResumeInst (TerminatorInst &I) { /*returns void*/ } void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ } void visitFenceInst (FenceInst &I) { /*returns void*/ } void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { markAnythingOverdefined(&I); } void visitAtomicRMWInst (AtomicRMWInst &I) { markOverdefined(&I); } void visitAllocaInst (Instruction &I) { markOverdefined(&I); } void visitVAArgInst (Instruction &I) { markAnythingOverdefined(&I); } void visitInstruction(Instruction &I) { // If a new instruction is added to LLVM that we don't handle. dbgs() << "SCCP: Don't know how to handle: " << I << '\n'; markAnythingOverdefined(&I); // Just in case } }; } // end anonymous namespace // getFeasibleSuccessors - Return a vector of booleans to indicate which // successors are reachable from a given terminator instruction. // void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs) { Succs.resize(TI.getNumSuccessors()); if (BranchInst *BI = dyn_cast<BranchInst>(&TI)) { if (BI->isUnconditional()) { Succs[0] = true; return; } LatticeVal BCValue = getValueState(BI->getCondition()); ConstantInt *CI = BCValue.getConstantInt(); if (!CI) { // Overdefined condition variables, and branches on unfoldable constant // conditions, mean the branch could go either way. if (!BCValue.isUndefined()) Succs[0] = Succs[1] = true; return; } // Constant condition variables mean the branch can only go a single way. Succs[CI->isZero()] = true; return; } if (isa<InvokeInst>(TI)) { // Invoke instructions successors are always executable. Succs[0] = Succs[1] = true; return; } if (SwitchInst *SI = dyn_cast<SwitchInst>(&TI)) { if (!SI->getNumCases()) { Succs[0] = true; return; } LatticeVal SCValue = getValueState(SI->getCondition()); ConstantInt *CI = SCValue.getConstantInt(); if (!CI) { // Overdefined or undefined condition? // All destinations are executable! if (!SCValue.isUndefined()) Succs.assign(TI.getNumSuccessors(), true); return; } Succs[SI->findCaseValue(CI).getSuccessorIndex()] = true; return; } // TODO: This could be improved if the operand is a [cast of a] BlockAddress. if (isa<IndirectBrInst>(&TI)) { // Just mark all destinations executable! Succs.assign(TI.getNumSuccessors(), true); return; } #ifndef NDEBUG dbgs() << "Unknown terminator instruction: " << TI << '\n'; #endif llvm_unreachable("SCCP: Don't know how to handle this terminator!"); } // isEdgeFeasible - Return true if the control flow edge from the 'From' basic // block to the 'To' basic block is currently feasible. // bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) { assert(BBExecutable.count(To) && "Dest should always be alive!"); // Make sure the source basic block is executable!! if (!BBExecutable.count(From)) return false; // Check to make sure this edge itself is actually feasible now. TerminatorInst *TI = From->getTerminator(); if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { if (BI->isUnconditional()) return true; LatticeVal BCValue = getValueState(BI->getCondition()); // Overdefined condition variables mean the branch could go either way, // undef conditions mean that neither edge is feasible yet. ConstantInt *CI = BCValue.getConstantInt(); if (!CI) return !BCValue.isUndefined(); // Constant condition variables mean the branch can only go a single way. return BI->getSuccessor(CI->isZero()) == To; } // Invoke instructions successors are always executable. if (isa<InvokeInst>(TI)) return true; if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { if (SI->getNumCases() < 1) return true; LatticeVal SCValue = getValueState(SI->getCondition()); ConstantInt *CI = SCValue.getConstantInt(); if (!CI) return !SCValue.isUndefined(); return SI->findCaseValue(CI).getCaseSuccessor() == To; } // Just mark all destinations executable! // TODO: This could be improved if the operand is a [cast of a] BlockAddress. if (isa<IndirectBrInst>(TI)) return true; #ifndef NDEBUG dbgs() << "Unknown terminator instruction: " << *TI << '\n'; #endif llvm_unreachable(nullptr); } // visit Implementations - Something changed in this instruction, either an // operand made a transition, or the instruction is newly executable. Change // the value type of I to reflect these changes if appropriate. This method // makes sure to do the following actions: // // 1. If a phi node merges two constants in, and has conflicting value coming // from different branches, or if the PHI node merges in an overdefined // value, then the PHI node becomes overdefined. // 2. If a phi node merges only constants in, and they all agree on value, the // PHI node becomes a constant value equal to that. // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined // 6. If a conditional branch has a value that is constant, make the selected // destination executable // 7. If a conditional branch has a value that is overdefined, make all // successors executable. // void SCCPSolver::visitPHINode(PHINode &PN) { // If this PN returns a struct, just mark the result overdefined. // TODO: We could do a lot better than this if code actually uses this. if (PN.getType()->isStructTy()) return markAnythingOverdefined(&PN); if (getValueState(&PN).isOverdefined()) return; // Quick exit // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant, // and slow us down a lot. Just mark them overdefined. if (PN.getNumIncomingValues() > 64) return markOverdefined(&PN); // Look at all of the executable operands of the PHI node. If any of them // are overdefined, the PHI becomes overdefined as well. If they are all // constant, and they agree with each other, the PHI becomes the identical // constant. If they are constant and don't agree, the PHI is overdefined. // If there are no executable operands, the PHI remains undefined. // Constant *OperandVal = nullptr; for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { LatticeVal IV = getValueState(PN.getIncomingValue(i)); if (IV.isUndefined()) continue; // Doesn't influence PHI node. if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent())) continue; if (IV.isOverdefined()) // PHI node becomes overdefined! return markOverdefined(&PN); if (!OperandVal) { // Grab the first value. OperandVal = IV.getConstant(); continue; } // There is already a reachable operand. If we conflict with it, // then the PHI node becomes overdefined. If we agree with it, we // can continue on. // Check to see if there are two different constants merging, if so, the PHI // node is overdefined. if (IV.getConstant() != OperandVal) return markOverdefined(&PN); } // If we exited the loop, this means that the PHI node only has constant // arguments that agree with each other(and OperandVal is the constant) or // OperandVal is null because there are no defined incoming arguments. If // this is the case, the PHI remains undefined. // if (OperandVal) markConstant(&PN, OperandVal); // Acquire operand value } void SCCPSolver::visitReturnInst(ReturnInst &I) { if (I.getNumOperands() == 0) return; // ret void Function *F = I.getParent()->getParent(); Value *ResultOp = I.getOperand(0); // If we are tracking the return value of this function, merge it in. if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) { DenseMap<Function*, LatticeVal>::iterator TFRVI = TrackedRetVals.find(F); if (TFRVI != TrackedRetVals.end()) { mergeInValue(TFRVI->second, F, getValueState(ResultOp)); return; } } // Handle functions that return multiple values. if (!TrackedMultipleRetVals.empty()) { if (StructType *STy = dyn_cast<StructType>(ResultOp->getType())) if (MRVFunctionsTracked.count(F)) for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F, getStructValueState(ResultOp, i)); } } void SCCPSolver::visitTerminatorInst(TerminatorInst &TI) { SmallVector<bool, 16> SuccFeasible; getFeasibleSuccessors(TI, SuccFeasible); BasicBlock *BB = TI.getParent(); // Mark all feasible successors executable. for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i) if (SuccFeasible[i]) markEdgeExecutable(BB, TI.getSuccessor(i)); } void SCCPSolver::visitCastInst(CastInst &I) { LatticeVal OpSt = getValueState(I.getOperand(0)); if (OpSt.isOverdefined()) // Inherit overdefinedness of operand markOverdefined(&I); else if (OpSt.isConstant()) // Propagate constant value markConstant(&I, ConstantExpr::getCast(I.getOpcode(), OpSt.getConstant(), I.getType())); } void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) { // If this returns a struct, mark all elements over defined, we don't track // structs in structs. if (EVI.getType()->isStructTy()) return markAnythingOverdefined(&EVI); // If this is extracting from more than one level of struct, we don't know. if (EVI.getNumIndices() != 1) return markOverdefined(&EVI); Value *AggVal = EVI.getAggregateOperand(); if (AggVal->getType()->isStructTy()) { unsigned i = *EVI.idx_begin(); LatticeVal EltVal = getStructValueState(AggVal, i); mergeInValue(getValueState(&EVI), &EVI, EltVal); } else { // Otherwise, must be extracting from an array. return markOverdefined(&EVI); } } void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) { StructType *STy = dyn_cast<StructType>(IVI.getType()); if (!STy) return markOverdefined(&IVI); // If this has more than one index, we can't handle it, drive all results to // undef. if (IVI.getNumIndices() != 1) return markAnythingOverdefined(&IVI); Value *Aggr = IVI.getAggregateOperand(); unsigned Idx = *IVI.idx_begin(); // Compute the result based on what we're inserting. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { // This passes through all values that aren't the inserted element. if (i != Idx) { LatticeVal EltVal = getStructValueState(Aggr, i); mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal); continue; } Value *Val = IVI.getInsertedValueOperand(); if (Val->getType()->isStructTy()) // We don't track structs in structs. markOverdefined(getStructValueState(&IVI, i), &IVI); else { LatticeVal InVal = getValueState(Val); mergeInValue(getStructValueState(&IVI, i), &IVI, InVal); } } } void SCCPSolver::visitSelectInst(SelectInst &I) { // If this select returns a struct, just mark the result overdefined. // TODO: We could do a lot better than this if code actually uses this. if (I.getType()->isStructTy()) return markAnythingOverdefined(&I); LatticeVal CondValue = getValueState(I.getCondition()); if (CondValue.isUndefined()) return; if (ConstantInt *CondCB = CondValue.getConstantInt()) { Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue(); mergeInValue(&I, getValueState(OpVal)); return; } // Otherwise, the condition is overdefined or a constant we can't evaluate. // See if we can produce something better than overdefined based on the T/F // value. LatticeVal TVal = getValueState(I.getTrueValue()); LatticeVal FVal = getValueState(I.getFalseValue()); // select ?, C, C -> C. if (TVal.isConstant() && FVal.isConstant() && TVal.getConstant() == FVal.getConstant()) return markConstant(&I, FVal.getConstant()); if (TVal.isUndefined()) // select ?, undef, X -> X. return mergeInValue(&I, FVal); if (FVal.isUndefined()) // select ?, X, undef -> X. return mergeInValue(&I, TVal); markOverdefined(&I); } // Handle Binary Operators. void SCCPSolver::visitBinaryOperator(Instruction &I) { LatticeVal V1State = getValueState(I.getOperand(0)); LatticeVal V2State = getValueState(I.getOperand(1)); LatticeVal &IV = ValueState[&I]; if (IV.isOverdefined()) return; if (V1State.isConstant() && V2State.isConstant()) return markConstant(IV, &I, ConstantExpr::get(I.getOpcode(), V1State.getConstant(), V2State.getConstant())); // If something is undef, wait for it to resolve. if (!V1State.isOverdefined() && !V2State.isOverdefined()) return; // Otherwise, one of our operands is overdefined. Try to produce something // better than overdefined with some tricks. // If this is an AND or OR with 0 or -1, it doesn't matter that the other // operand is overdefined. if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Or) { LatticeVal *NonOverdefVal = nullptr; if (!V1State.isOverdefined()) NonOverdefVal = &V1State; else if (!V2State.isOverdefined()) NonOverdefVal = &V2State; if (NonOverdefVal) { if (NonOverdefVal->isUndefined()) { // Could annihilate value. if (I.getOpcode() == Instruction::And) markConstant(IV, &I, Constant::getNullValue(I.getType())); else if (VectorType *PT = dyn_cast<VectorType>(I.getType())) markConstant(IV, &I, Constant::getAllOnesValue(PT)); else markConstant(IV, &I, Constant::getAllOnesValue(I.getType())); return; } if (I.getOpcode() == Instruction::And) { // X and 0 = 0 if (NonOverdefVal->getConstant()->isNullValue()) return markConstant(IV, &I, NonOverdefVal->getConstant()); } else { if (ConstantInt *CI = NonOverdefVal->getConstantInt()) if (CI->isAllOnesValue()) // X or -1 = -1 return markConstant(IV, &I, NonOverdefVal->getConstant()); } } } markOverdefined(&I); } // Handle ICmpInst instruction. void SCCPSolver::visitCmpInst(CmpInst &I) { LatticeVal V1State = getValueState(I.getOperand(0)); LatticeVal V2State = getValueState(I.getOperand(1)); LatticeVal &IV = ValueState[&I]; if (IV.isOverdefined()) return; if (V1State.isConstant() && V2State.isConstant()) return markConstant(IV, &I, ConstantExpr::getCompare(I.getPredicate(), V1State.getConstant(), V2State.getConstant())); // If operands are still undefined, wait for it to resolve. if (!V1State.isOverdefined() && !V2State.isOverdefined()) return; markOverdefined(&I); } void SCCPSolver::visitExtractElementInst(ExtractElementInst &I) { // TODO : SCCP does not handle vectors properly. return markOverdefined(&I); #if 0 LatticeVal &ValState = getValueState(I.getOperand(0)); LatticeVal &IdxState = getValueState(I.getOperand(1)); if (ValState.isOverdefined() || IdxState.isOverdefined()) markOverdefined(&I); else if(ValState.isConstant() && IdxState.isConstant()) markConstant(&I, ConstantExpr::getExtractElement(ValState.getConstant(), IdxState.getConstant())); #endif } void SCCPSolver::visitInsertElementInst(InsertElementInst &I) { // TODO : SCCP does not handle vectors properly. return markOverdefined(&I); #if 0 LatticeVal &ValState = getValueState(I.getOperand(0)); LatticeVal &EltState = getValueState(I.getOperand(1)); LatticeVal &IdxState = getValueState(I.getOperand(2)); if (ValState.isOverdefined() || EltState.isOverdefined() || IdxState.isOverdefined()) markOverdefined(&I); else if(ValState.isConstant() && EltState.isConstant() && IdxState.isConstant()) markConstant(&I, ConstantExpr::getInsertElement(ValState.getConstant(), EltState.getConstant(), IdxState.getConstant())); else if (ValState.isUndefined() && EltState.isConstant() && IdxState.isConstant()) markConstant(&I,ConstantExpr::getInsertElement(UndefValue::get(I.getType()), EltState.getConstant(), IdxState.getConstant())); #endif } void SCCPSolver::visitShuffleVectorInst(ShuffleVectorInst &I) { // TODO : SCCP does not handle vectors properly. return markOverdefined(&I); #if 0 LatticeVal &V1State = getValueState(I.getOperand(0)); LatticeVal &V2State = getValueState(I.getOperand(1)); LatticeVal &MaskState = getValueState(I.getOperand(2)); if (MaskState.isUndefined() || (V1State.isUndefined() && V2State.isUndefined())) return; // Undefined output if mask or both inputs undefined. if (V1State.isOverdefined() || V2State.isOverdefined() || MaskState.isOverdefined()) { markOverdefined(&I); } else { // A mix of constant/undef inputs. Constant *V1 = V1State.isConstant() ? V1State.getConstant() : UndefValue::get(I.getType()); Constant *V2 = V2State.isConstant() ? V2State.getConstant() : UndefValue::get(I.getType()); Constant *Mask = MaskState.isConstant() ? MaskState.getConstant() : UndefValue::get(I.getOperand(2)->getType()); markConstant(&I, ConstantExpr::getShuffleVector(V1, V2, Mask)); } #endif } // Handle getelementptr instructions. If all operands are constants then we // can turn this into a getelementptr ConstantExpr. // void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) { if (ValueState[&I].isOverdefined()) return; SmallVector<Constant*, 8> Operands; Operands.reserve(I.getNumOperands()); for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { LatticeVal State = getValueState(I.getOperand(i)); if (State.isUndefined()) return; // Operands are not resolved yet. if (State.isOverdefined()) return markOverdefined(&I); assert(State.isConstant() && "Unknown state!"); Operands.push_back(State.getConstant()); } Constant *Ptr = Operands[0]; auto Indices = makeArrayRef(Operands.begin() + 1, Operands.end()); markConstant(&I, ConstantExpr::getGetElementPtr(I.getSourceElementType(), Ptr, Indices)); } void SCCPSolver::visitStoreInst(StoreInst &SI) { // If this store is of a struct, ignore it. if (SI.getOperand(0)->getType()->isStructTy()) return; if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1))) return; GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1)); DenseMap<GlobalVariable*, LatticeVal>::iterator I = TrackedGlobals.find(GV); if (I == TrackedGlobals.end() || I->second.isOverdefined()) return; // Get the value we are storing into the global, then merge it. mergeInValue(I->second, GV, getValueState(SI.getOperand(0))); if (I->second.isOverdefined()) TrackedGlobals.erase(I); // No need to keep tracking this! } // Handle load instructions. If the operand is a constant pointer to a constant // global, we can replace the load with the loaded constant value! void SCCPSolver::visitLoadInst(LoadInst &I) { // If this load is of a struct, just mark the result overdefined. if (I.getType()->isStructTy()) return markAnythingOverdefined(&I); LatticeVal PtrVal = getValueState(I.getOperand(0)); if (PtrVal.isUndefined()) return; // The pointer is not resolved yet! LatticeVal &IV = ValueState[&I]; if (IV.isOverdefined()) return; if (!PtrVal.isConstant() || I.isVolatile()) return markOverdefined(IV, &I); Constant *Ptr = PtrVal.getConstant(); // load null -> null if (isa<ConstantPointerNull>(Ptr) && I.getPointerAddressSpace() == 0) return markConstant(IV, &I, UndefValue::get(I.getType())); // Transform load (constant global) into the value loaded. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { if (!TrackedGlobals.empty()) { // If we are tracking this global, merge in the known value for it. DenseMap<GlobalVariable*, LatticeVal>::iterator It = TrackedGlobals.find(GV); if (It != TrackedGlobals.end()) { mergeInValue(IV, &I, It->second); return; } } } // Transform load from a constant into a constant if possible. if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL)) return markConstant(IV, &I, C); // Otherwise we cannot say for certain what value this load will produce. // Bail out. markOverdefined(IV, &I); } void SCCPSolver::visitCallSite(CallSite CS) { Function *F = CS.getCalledFunction(); Instruction *I = CS.getInstruction(); // The common case is that we aren't tracking the callee, either because we // are not doing interprocedural analysis or the callee is indirect, or is // external. Handle these cases first. if (!F || F->isDeclaration()) { CallOverdefined: // Void return and not tracking callee, just bail. if (I->getType()->isVoidTy()) return; // Otherwise, if we have a single return value case, and if the function is // a declaration, maybe we can constant fold it. if (F && F->isDeclaration() && !I->getType()->isStructTy() && canConstantFoldCallTo(F)) { SmallVector<Constant*, 8> Operands; for (CallSite::arg_iterator AI = CS.arg_begin(), E = CS.arg_end(); AI != E; ++AI) { LatticeVal State = getValueState(*AI); if (State.isUndefined()) return; // Operands are not resolved yet. if (State.isOverdefined()) return markOverdefined(I); assert(State.isConstant() && "Unknown state!"); Operands.push_back(State.getConstant()); } if (getValueState(I).isOverdefined()) return; // If we can constant fold this, mark the result of the call as a // constant. if (Constant *C = ConstantFoldCall(F, Operands, TLI)) return markConstant(I, C); } // Otherwise, we don't know anything about this call, mark it overdefined. return markAnythingOverdefined(I); } // If this is a local function that doesn't have its address taken, mark its // entry block executable and merge in the actual arguments to the call into // the formal arguments of the function. if (!TrackingIncomingArguments.empty() && TrackingIncomingArguments.count(F)){ MarkBlockExecutable(F->begin()); // Propagate information from this call site into the callee. CallSite::arg_iterator CAI = CS.arg_begin(); for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI, ++CAI) { // If this argument is byval, and if the function is not readonly, there // will be an implicit copy formed of the input aggregate. if (AI->hasByValAttr() && !F->onlyReadsMemory()) { markOverdefined(AI); continue; } if (StructType *STy = dyn_cast<StructType>(AI->getType())) { for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { LatticeVal CallArg = getStructValueState(*CAI, i); mergeInValue(getStructValueState(AI, i), AI, CallArg); } } else { mergeInValue(AI, getValueState(*CAI)); } } } // If this is a single/zero retval case, see if we're tracking the function. if (StructType *STy = dyn_cast<StructType>(F->getReturnType())) { if (!MRVFunctionsTracked.count(F)) goto CallOverdefined; // Not tracking this callee. // If we are tracking this callee, propagate the result of the function // into this call site. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) mergeInValue(getStructValueState(I, i), I, TrackedMultipleRetVals[std::make_pair(F, i)]); } else { DenseMap<Function*, LatticeVal>::iterator TFRVI = TrackedRetVals.find(F); if (TFRVI == TrackedRetVals.end()) goto CallOverdefined; // Not tracking this callee. // If so, propagate the return value of the callee into this call result. mergeInValue(I, TFRVI->second); } } void SCCPSolver::Solve() { // Process the work lists until they are empty! while (!BBWorkList.empty() || !InstWorkList.empty() || !OverdefinedInstWorkList.empty()) { // Process the overdefined instruction's work list first, which drives other // things to overdefined more quickly. while (!OverdefinedInstWorkList.empty()) { Value *I = OverdefinedInstWorkList.pop_back_val(); DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n'); // "I" got into the work list because it either made the transition from // bottom to constant, or to overdefined. // // Anything on this worklist that is overdefined need not be visited // since all of its users will have already been marked as overdefined // Update all of the users of this instruction's value. // for (User *U : I->users()) if (Instruction *UI = dyn_cast<Instruction>(U)) OperandChangedState(UI); } // Process the instruction work list. while (!InstWorkList.empty()) { Value *I = InstWorkList.pop_back_val(); DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n'); // "I" got into the work list because it made the transition from undef to // constant. // // Anything on this worklist that is overdefined need not be visited // since all of its users will have already been marked as overdefined. // Update all of the users of this instruction's value. // if (I->getType()->isStructTy() || !getValueState(I).isOverdefined()) for (User *U : I->users()) if (Instruction *UI = dyn_cast<Instruction>(U)) OperandChangedState(UI); } // Process the basic block work list. while (!BBWorkList.empty()) { BasicBlock *BB = BBWorkList.back(); BBWorkList.pop_back(); DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n'); // Notify all instructions in this basic block that they are newly // executable. visit(BB); } } } /// ResolvedUndefsIn - While solving the dataflow for a function, we assume /// that branches on undef values cannot reach any of their successors. /// However, this is not a safe assumption. After we solve dataflow, this /// method should be use to handle this. If this returns true, the solver /// should be rerun. /// /// This method handles this by finding an unresolved branch and marking it one /// of the edges from the block as being feasible, even though the condition /// doesn't say it would otherwise be. This allows SCCP to find the rest of the /// CFG and only slightly pessimizes the analysis results (by marking one, /// potentially infeasible, edge feasible). This cannot usefully modify the /// constraints on the condition of the branch, as that would impact other users /// of the value. /// /// This scan also checks for values that use undefs, whose results are actually /// defined. For example, 'zext i8 undef to i32' should produce all zeros /// conservatively, as "(zext i8 X -> i32) & 0xFF00" must always return zero, /// even if X isn't defined. bool SCCPSolver::ResolvedUndefsIn(Function &F) { for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { if (!BBExecutable.count(BB)) continue; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { // Look for instructions which produce undef values. if (I->getType()->isVoidTy()) continue; if (StructType *STy = dyn_cast<StructType>(I->getType())) { // Only a few things that can be structs matter for undef. // Tracked calls must never be marked overdefined in ResolvedUndefsIn. if (CallSite CS = CallSite(I)) if (Function *F = CS.getCalledFunction()) if (MRVFunctionsTracked.count(F)) continue; // extractvalue and insertvalue don't need to be marked; they are // tracked as precisely as their operands. if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I)) continue; // Send the results of everything else to overdefined. We could be // more precise than this but it isn't worth bothering. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { LatticeVal &LV = getStructValueState(I, i); if (LV.isUndefined()) markOverdefined(LV, I); } continue; } LatticeVal &LV = getValueState(I); if (!LV.isUndefined()) continue; // extractvalue is safe; check here because the argument is a struct. if (isa<ExtractValueInst>(I)) continue; // Compute the operand LatticeVals, for convenience below. // Anything taking a struct is conservatively assumed to require // overdefined markings. if (I->getOperand(0)->getType()->isStructTy()) { markOverdefined(I); return true; } LatticeVal Op0LV = getValueState(I->getOperand(0)); LatticeVal Op1LV; if (I->getNumOperands() == 2) { if (I->getOperand(1)->getType()->isStructTy()) { markOverdefined(I); return true; } Op1LV = getValueState(I->getOperand(1)); } // If this is an instructions whose result is defined even if the input is // not fully defined, propagate the information. Type *ITy = I->getType(); switch (I->getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Trunc: case Instruction::FPTrunc: case Instruction::BitCast: break; // Any undef -> undef case Instruction::FSub: case Instruction::FAdd: case Instruction::FMul: case Instruction::FDiv: case Instruction::FRem: // Floating-point binary operation: be conservative. if (Op0LV.isUndefined() && Op1LV.isUndefined()) markForcedConstant(I, Constant::getNullValue(ITy)); else markOverdefined(I); return true; case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::FPExt: case Instruction::PtrToInt: case Instruction::IntToPtr: case Instruction::SIToFP: case Instruction::UIToFP: // undef -> 0; some outputs are impossible markForcedConstant(I, Constant::getNullValue(ITy)); return true; case Instruction::Mul: case Instruction::And: // Both operands undef -> undef if (Op0LV.isUndefined() && Op1LV.isUndefined()) break; // undef * X -> 0. X could be zero. // undef & X -> 0. X could be zero. markForcedConstant(I, Constant::getNullValue(ITy)); return true; case Instruction::Or: // Both operands undef -> undef if (Op0LV.isUndefined() && Op1LV.isUndefined()) break; // undef | X -> -1. X could be -1. markForcedConstant(I, Constant::getAllOnesValue(ITy)); return true; case Instruction::Xor: // undef ^ undef -> 0; strictly speaking, this is not strictly // necessary, but we try to be nice to people who expect this // behavior in simple cases if (Op0LV.isUndefined() && Op1LV.isUndefined()) { markForcedConstant(I, Constant::getNullValue(ITy)); return true; } // undef ^ X -> undef break; case Instruction::SDiv: case Instruction::UDiv: case Instruction::SRem: case Instruction::URem: // X / undef -> undef. No change. // X % undef -> undef. No change. if (Op1LV.isUndefined()) break; // undef / X -> 0. X could be maxint. // undef % X -> 0. X could be 1. markForcedConstant(I, Constant::getNullValue(ITy)); return true; case Instruction::AShr: // X >>a undef -> undef. if (Op1LV.isUndefined()) break; // undef >>a X -> all ones markForcedConstant(I, Constant::getAllOnesValue(ITy)); return true; case Instruction::LShr: case Instruction::Shl: // X << undef -> undef. // X >> undef -> undef. if (Op1LV.isUndefined()) break; // undef << X -> 0 // undef >> X -> 0 markForcedConstant(I, Constant::getNullValue(ITy)); return true; case Instruction::Select: Op1LV = getValueState(I->getOperand(1)); // undef ? X : Y -> X or Y. There could be commonality between X/Y. if (Op0LV.isUndefined()) { if (!Op1LV.isConstant()) // Pick the constant one if there is any. Op1LV = getValueState(I->getOperand(2)); } else if (Op1LV.isUndefined()) { // c ? undef : undef -> undef. No change. Op1LV = getValueState(I->getOperand(2)); if (Op1LV.isUndefined()) break; // Otherwise, c ? undef : x -> x. } else { // Leave Op1LV as Operand(1)'s LatticeValue. } if (Op1LV.isConstant()) markForcedConstant(I, Op1LV.getConstant()); else markOverdefined(I); return true; case Instruction::Load: // A load here means one of two things: a load of undef from a global, // a load from an unknown pointer. Either way, having it return undef // is okay. break; case Instruction::ICmp: // X == undef -> undef. Other comparisons get more complicated. if (cast<ICmpInst>(I)->isEquality()) break; markOverdefined(I); return true; case Instruction::Call: case Instruction::Invoke: { // There are two reasons a call can have an undef result // 1. It could be tracked. // 2. It could be constant-foldable. // Because of the way we solve return values, tracked calls must // never be marked overdefined in ResolvedUndefsIn. if (Function *F = CallSite(I).getCalledFunction()) if (TrackedRetVals.count(F)) break; // If the call is constant-foldable, we mark it overdefined because // we do not know what return values are valid. markOverdefined(I); return true; } default: // If we don't know what should happen here, conservatively mark it // overdefined. markOverdefined(I); return true; } } // Check to see if we have a branch or switch on an undefined value. If so // we force the branch to go one way or the other to make the successor // values live. It doesn't really matter which way we force it. TerminatorInst *TI = BB->getTerminator(); if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { if (!BI->isConditional()) continue; if (!getValueState(BI->getCondition()).isUndefined()) continue; // If the input to SCCP is actually branch on undef, fix the undef to // false. if (isa<UndefValue>(BI->getCondition())) { BI->setCondition(ConstantInt::getFalse(BI->getContext())); markEdgeExecutable(BB, TI->getSuccessor(1)); return true; } // Otherwise, it is a branch on a symbolic value which is currently // considered to be undef. Handle this by forcing the input value to the // branch to false. markForcedConstant(BI->getCondition(), ConstantInt::getFalse(TI->getContext())); return true; } if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { if (!SI->getNumCases()) continue; if (!getValueState(SI->getCondition()).isUndefined()) continue; // If the input to SCCP is actually switch on undef, fix the undef to // the first constant. if (isa<UndefValue>(SI->getCondition())) { SI->setCondition(SI->case_begin().getCaseValue()); markEdgeExecutable(BB, SI->case_begin().getCaseSuccessor()); return true; } markForcedConstant(SI->getCondition(), SI->case_begin().getCaseValue()); return true; } } return false; } namespace { //===--------------------------------------------------------------------===// // /// SCCP Class - This class uses the SCCPSolver to implement a per-function /// Sparse Conditional Constant Propagator. /// struct SCCP : public FunctionPass { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<TargetLibraryInfoWrapperPass>(); } static char ID; // Pass identification, replacement for typeid SCCP() : FunctionPass(ID) { initializeSCCPPass(*PassRegistry::getPassRegistry()); } // runOnFunction - Run the Sparse Conditional Constant Propagation // algorithm, and return true if the function was modified. // bool runOnFunction(Function &F) override; }; } // end anonymous namespace char SCCP::ID = 0; INITIALIZE_PASS(SCCP, "sccp", "Sparse Conditional Constant Propagation", false, false) // createSCCPPass - This is the public interface to this file. FunctionPass *llvm::createSCCPPass() { return new SCCP(); } static void DeleteInstructionInBlock(BasicBlock *BB) { DEBUG(dbgs() << " BasicBlock Dead:" << *BB); ++NumDeadBlocks; // Check to see if there are non-terminating instructions to delete. if (isa<TerminatorInst>(BB->begin())) return; // Delete the instructions backwards, as it has a reduced likelihood of having // to update as many def-use and use-def chains. Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. while (EndInst != BB->begin()) { // Delete the next to last instruction. BasicBlock::iterator I = EndInst; Instruction *Inst = --I; if (!Inst->use_empty()) Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); if (isa<LandingPadInst>(Inst)) { EndInst = Inst; continue; } BB->getInstList().erase(Inst); ++NumInstRemoved; } } // runOnFunction() - Run the Sparse Conditional Constant Propagation algorithm, // and return true if the function was modified. // bool SCCP::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n"); const DataLayout &DL = F.getParent()->getDataLayout(); const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); SCCPSolver Solver(DL, TLI); // Mark the first block of the function as being executable. Solver.MarkBlockExecutable(F.begin()); // Mark all arguments to the function as being overdefined. for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); AI != E;++AI) Solver.markAnythingOverdefined(AI); // Solve for constants. bool ResolvedUndefs = true; while (ResolvedUndefs) { Solver.Solve(); DEBUG(dbgs() << "RESOLVING UNDEFs\n"); ResolvedUndefs = Solver.ResolvedUndefsIn(F); } bool MadeChanges = false; // If we decided that there are basic blocks that are dead in this function, // delete their contents now. Note that we cannot actually delete the blocks, // as we cannot modify the CFG of the function. for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { if (!Solver.isBlockExecutable(BB)) { DeleteInstructionInBlock(BB); MadeChanges = true; continue; } // Iterate over all of the instructions in a function, replacing them with // constants if we have found them to be of constant values. // for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { Instruction *Inst = BI++; if (Inst->getType()->isVoidTy() || isa<TerminatorInst>(Inst)) continue; // TODO: Reconstruct structs from their elements. if (Inst->getType()->isStructTy()) continue; LatticeVal IV = Solver.getLatticeValueFor(Inst); if (IV.isOverdefined()) continue; Constant *Const = IV.isConstant() ? IV.getConstant() : UndefValue::get(Inst->getType()); DEBUG(dbgs() << " Constant: " << *Const << " = " << *Inst << '\n'); // Replaces all of the uses of a variable with uses of the constant. Inst->replaceAllUsesWith(Const); // Delete the instruction. Inst->eraseFromParent(); // Hey, we just changed something! MadeChanges = true; ++NumInstRemoved; } } return MadeChanges; } namespace { //===--------------------------------------------------------------------===// // /// IPSCCP Class - This class implements interprocedural Sparse Conditional /// Constant Propagation. /// struct IPSCCP : public ModulePass { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<TargetLibraryInfoWrapperPass>(); } static char ID; IPSCCP() : ModulePass(ID) { initializeIPSCCPPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; }; } // end anonymous namespace char IPSCCP::ID = 0; INITIALIZE_PASS_BEGIN(IPSCCP, "ipsccp", "Interprocedural Sparse Conditional Constant Propagation", false, false) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(IPSCCP, "ipsccp", "Interprocedural Sparse Conditional Constant Propagation", false, false) // createIPSCCPPass - This is the public interface to this file. ModulePass *llvm::createIPSCCPPass() { return new IPSCCP(); } static bool AddressIsTaken(const GlobalValue *GV) { // Delete any dead constantexpr klingons. GV->removeDeadConstantUsers(); for (const Use &U : GV->uses()) { const User *UR = U.getUser(); if (const StoreInst *SI = dyn_cast<StoreInst>(UR)) { if (SI->getOperand(0) == GV || SI->isVolatile()) return true; // Storing addr of GV. } else if (isa<InvokeInst>(UR) || isa<CallInst>(UR)) { // Make sure we are calling the function, not passing the address. ImmutableCallSite CS(cast<Instruction>(UR)); if (!CS.isCallee(&U)) return true; } else if (const LoadInst *LI = dyn_cast<LoadInst>(UR)) { if (LI->isVolatile()) return true; } else if (isa<BlockAddress>(UR)) { // blockaddress doesn't take the address of the function, it takes addr // of label. } else { return true; } } return false; } bool IPSCCP::runOnModule(Module &M) { const DataLayout &DL = M.getDataLayout(); const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); SCCPSolver Solver(DL, TLI); // AddressTakenFunctions - This set keeps track of the address-taken functions // that are in the input. As IPSCCP runs through and simplifies code, // functions that were address taken can end up losing their // address-taken-ness. Because of this, we keep track of their addresses from // the first pass so we can use them for the later simplification pass. SmallPtrSet<Function*, 32> AddressTakenFunctions; // Loop over all functions, marking arguments to those with their addresses // taken or that are external as overdefined. // for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { if (F->isDeclaration()) continue; // If this is a strong or ODR definition of this function, then we can // propagate information about its result into callsites of it. if (!F->mayBeOverridden()) Solver.AddTrackedFunction(F); // If this function only has direct calls that we can see, we can track its // arguments and return value aggressively, and can assume it is not called // unless we see evidence to the contrary. if (F->hasLocalLinkage()) { if (AddressIsTaken(F)) AddressTakenFunctions.insert(F); else { Solver.AddArgumentTrackedFunction(F); continue; } } // Assume the function is called. Solver.MarkBlockExecutable(F->begin()); // Assume nothing about the incoming arguments. for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI) Solver.markAnythingOverdefined(AI); } // Loop over global variables. We inform the solver about any internal global // variables that do not have their 'addresses taken'. If they don't have // their addresses taken, we can propagate constants through them. for (Module::global_iterator G = M.global_begin(), E = M.global_end(); G != E; ++G) if (!G->isConstant() && G->hasLocalLinkage() && !AddressIsTaken(G)) Solver.TrackValueOfGlobalVariable(G); // Solve for constants. bool ResolvedUndefs = true; while (ResolvedUndefs) { Solver.Solve(); DEBUG(dbgs() << "RESOLVING UNDEFS\n"); ResolvedUndefs = false; for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) ResolvedUndefs |= Solver.ResolvedUndefsIn(*F); } bool MadeChanges = false; // Iterate over all of the instructions in the module, replacing them with // constants if we have found them to be of constant values. // SmallVector<BasicBlock*, 512> BlocksToErase; for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { if (Solver.isBlockExecutable(F->begin())) { for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI) { if (AI->use_empty() || AI->getType()->isStructTy()) continue; // TODO: Could use getStructLatticeValueFor to find out if the entire // result is a constant and replace it entirely if so. LatticeVal IV = Solver.getLatticeValueFor(AI); if (IV.isOverdefined()) continue; Constant *CST = IV.isConstant() ? IV.getConstant() : UndefValue::get(AI->getType()); DEBUG(dbgs() << "*** Arg " << *AI << " = " << *CST <<"\n"); // Replaces all of the uses of a variable with uses of the // constant. AI->replaceAllUsesWith(CST); ++IPNumArgsElimed; } } for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { if (!Solver.isBlockExecutable(BB)) { DeleteInstructionInBlock(BB); MadeChanges = true; TerminatorInst *TI = BB->getTerminator(); for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) { BasicBlock *Succ = TI->getSuccessor(i); if (!Succ->empty() && isa<PHINode>(Succ->begin())) TI->getSuccessor(i)->removePredecessor(BB); } if (!TI->use_empty()) TI->replaceAllUsesWith(UndefValue::get(TI->getType())); TI->eraseFromParent(); new UnreachableInst(M.getContext(), BB); if (&*BB != &F->front()) BlocksToErase.push_back(BB); continue; } for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { Instruction *Inst = BI++; if (Inst->getType()->isVoidTy() || Inst->getType()->isStructTy()) continue; // TODO: Could use getStructLatticeValueFor to find out if the entire // result is a constant and replace it entirely if so. LatticeVal IV = Solver.getLatticeValueFor(Inst); if (IV.isOverdefined()) continue; Constant *Const = IV.isConstant() ? IV.getConstant() : UndefValue::get(Inst->getType()); DEBUG(dbgs() << " Constant: " << *Const << " = " << *Inst << '\n'); // Replaces all of the uses of a variable with uses of the // constant. Inst->replaceAllUsesWith(Const); // Delete the instruction. if (!isa<CallInst>(Inst) && !isa<TerminatorInst>(Inst)) Inst->eraseFromParent(); // Hey, we just changed something! MadeChanges = true; ++IPNumInstRemoved; } } // Now that all instructions in the function are constant folded, erase dead // blocks, because we can now use ConstantFoldTerminator to get rid of // in-edges. for (unsigned i = 0, e = BlocksToErase.size(); i != e; ++i) { // If there are any PHI nodes in this successor, drop entries for BB now. BasicBlock *DeadBB = BlocksToErase[i]; for (Value::user_iterator UI = DeadBB->user_begin(), UE = DeadBB->user_end(); UI != UE;) { // Grab the user and then increment the iterator early, as the user // will be deleted. Step past all adjacent uses from the same user. Instruction *I = dyn_cast<Instruction>(*UI); do { ++UI; } while (UI != UE && *UI == I); // Ignore blockaddress users; BasicBlock's dtor will handle them. if (!I) continue; bool Folded = ConstantFoldTerminator(I->getParent()); if (!Folded) { // The constant folder may not have been able to fold the terminator // if this is a branch or switch on undef. Fold it manually as a // branch to the first successor. #ifndef NDEBUG if (BranchInst *BI = dyn_cast<BranchInst>(I)) { assert(BI->isConditional() && isa<UndefValue>(BI->getCondition()) && "Branch should be foldable!"); } else if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { assert(isa<UndefValue>(SI->getCondition()) && "Switch should fold"); } else { llvm_unreachable("Didn't fold away reference to block!"); } #endif // Make this an uncond branch to the first successor. TerminatorInst *TI = I->getParent()->getTerminator(); BranchInst::Create(TI->getSuccessor(0), TI); // Remove entries in successor phi nodes to remove edges. for (unsigned i = 1, e = TI->getNumSuccessors(); i != e; ++i) TI->getSuccessor(i)->removePredecessor(TI->getParent()); // Remove the old terminator. TI->eraseFromParent(); } } // Finally, delete the basic block. F->getBasicBlockList().erase(DeadBB); } BlocksToErase.clear(); } // If we inferred constant or undef return values for a function, we replaced // all call uses with the inferred value. This means we don't need to bother // actually returning anything from the function. Replace all return // instructions with return undef. // // Do this in two stages: first identify the functions we should process, then // actually zap their returns. This is important because we can only do this // if the address of the function isn't taken. In cases where a return is the // last use of a function, the order of processing functions would affect // whether other functions are optimizable. SmallVector<ReturnInst*, 8> ReturnsToZap; // TODO: Process multiple value ret instructions also. const DenseMap<Function*, LatticeVal> &RV = Solver.getTrackedRetVals(); for (DenseMap<Function*, LatticeVal>::const_iterator I = RV.begin(), E = RV.end(); I != E; ++I) { Function *F = I->first; if (I->second.isOverdefined() || F->getReturnType()->isVoidTy()) continue; // We can only do this if we know that nothing else can call the function. if (!F->hasLocalLinkage() || AddressTakenFunctions.count(F)) continue; for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) if (!isa<UndefValue>(RI->getOperand(0))) ReturnsToZap.push_back(RI); } // Zap all returns which we've identified as zap to change. for (unsigned i = 0, e = ReturnsToZap.size(); i != e; ++i) { Function *F = ReturnsToZap[i]->getParent()->getParent(); ReturnsToZap[i]->setOperand(0, UndefValue::get(F->getReturnType())); } // If we inferred constant or undef values for globals variables, we can // delete the global and any stores that remain to it. const DenseMap<GlobalVariable*, LatticeVal> &TG = Solver.getTrackedGlobals(); for (DenseMap<GlobalVariable*, LatticeVal>::const_iterator I = TG.begin(), E = TG.end(); I != E; ++I) { GlobalVariable *GV = I->first; assert(!I->second.isOverdefined() && "Overdefined values should have been taken out of the map!"); DEBUG(dbgs() << "Found that GV '" << GV->getName() << "' is constant!\n"); while (!GV->use_empty()) { StoreInst *SI = cast<StoreInst>(GV->user_back()); SI->eraseFromParent(); } M.getGlobalList().erase(GV); ++IPNumGlobalConst; } return MadeChanges; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
//===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Rewrite an existing set of gc.statepoints such that they make potential // relocations performed by the garbage collector explicit in the IR. // //===----------------------------------------------------------------------===// #include "llvm/Pass.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Statepoint.h" #include "llvm/IR/Value.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/Debug.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #define DEBUG_TYPE "rewrite-statepoints-for-gc" using namespace llvm; // Print tracing output static cl::opt<bool> TraceLSP("trace-rewrite-statepoints", cl::Hidden, cl::init(false)); // Print the liveset found at the insert location static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, cl::init(false)); static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, cl::init(false)); // Print out the base pointers for debugging static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, cl::init(false)); // Cost threshold measuring when it is profitable to rematerialize value instead // of relocating it static cl::opt<unsigned> RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, cl::init(6)); #ifdef XDEBUG static bool ClobberNonLive = true; #else static bool ClobberNonLive = false; #endif static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", cl::location(ClobberNonLive), cl::Hidden); namespace { struct RewriteStatepointsForGC : public ModulePass { static char ID; // Pass identification, replacement for typeid RewriteStatepointsForGC() : ModulePass(ID) { initializeRewriteStatepointsForGCPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F); bool runOnModule(Module &M) override { bool Changed = false; for (Function &F : M) Changed |= runOnFunction(F); if (Changed) { // stripDereferenceabilityInfo asserts that shouldRewriteStatepointsIn // returns true for at least one function in the module. Since at least // one function changed, we know that the precondition is satisfied. stripDereferenceabilityInfo(M); } return Changed; } void getAnalysisUsage(AnalysisUsage &AU) const override { // We add and rewrite a bunch of instructions, but don't really do much // else. We could in theory preserve a lot more analyses here. AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); } /// The IR fed into RewriteStatepointsForGC may have had attributes implying /// dereferenceability that are no longer valid/correct after /// RewriteStatepointsForGC has run. This is because semantically, after /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire /// heap. stripDereferenceabilityInfo (conservatively) restores correctness /// by erasing all attributes in the module that externally imply /// dereferenceability. /// void stripDereferenceabilityInfo(Module &M); // Helpers for stripDereferenceabilityInfo void stripDereferenceabilityInfoFromBody(Function &F); void stripDereferenceabilityInfoFromPrototype(Function &F); }; } // namespace char RewriteStatepointsForGC::ID = 0; ModulePass *llvm::createRewriteStatepointsForGCPass() { return new RewriteStatepointsForGC(); } INITIALIZE_PASS_BEGIN(RewriteStatepointsForGC, "rewrite-statepoints-for-gc", "Make relocations explicit at statepoints", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(RewriteStatepointsForGC, "rewrite-statepoints-for-gc", "Make relocations explicit at statepoints", false, false) namespace { struct GCPtrLivenessData { /// Values defined in this block. DenseMap<BasicBlock *, DenseSet<Value *>> KillSet; /// Values used in this block (and thus live); does not included values /// killed within this block. DenseMap<BasicBlock *, DenseSet<Value *>> LiveSet; /// Values live into this basic block (i.e. used by any /// instruction in this basic block or ones reachable from here) DenseMap<BasicBlock *, DenseSet<Value *>> LiveIn; /// Values live out of this basic block (i.e. live into /// any successor block) DenseMap<BasicBlock *, DenseSet<Value *>> LiveOut; }; // The type of the internal cache used inside the findBasePointers family // of functions. From the callers perspective, this is an opaque type and // should not be inspected. // // In the actual implementation this caches two relations: // - The base relation itself (i.e. this pointer is based on that one) // - The base defining value relation (i.e. before base_phi insertion) // Generally, after the execution of a full findBasePointer call, only the // base relation will remain. Internally, we add a mixture of the two // types, then update all the second type to the first type typedef DenseMap<Value *, Value *> DefiningValueMapTy; typedef DenseSet<llvm::Value *> StatepointLiveSetTy; typedef DenseMap<Instruction *, Value *> RematerializedValueMapTy; struct PartiallyConstructedSafepointRecord { /// The set of values known to be live accross this safepoint StatepointLiveSetTy liveset; /// Mapping from live pointers to a base-defining-value DenseMap<llvm::Value *, llvm::Value *> PointerToBase; /// The *new* gc.statepoint instruction itself. This produces the token /// that normal path gc.relocates and the gc.result are tied to. Instruction *StatepointToken; /// Instruction to which exceptional gc relocates are attached /// Makes it easier to iterate through them during relocationViaAlloca. Instruction *UnwindToken; /// Record live values we are rematerialized instead of relocating. /// They are not included into 'liveset' field. /// Maps rematerialized copy to it's original value. RematerializedValueMapTy RematerializedValues; }; } /// Compute the live-in set for every basic block in the function static void computeLiveInValues(DominatorTree &DT, Function &F, GCPtrLivenessData &Data); /// Given results from the dataflow liveness computation, find the set of live /// Values at a particular instruction. static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, StatepointLiveSetTy &out); // TODO: Once we can get to the GCStrategy, this becomes // Optional<bool> isGCManagedPointer(const Value *V) const override { static bool isGCPointerType(const Type *T) { if (const PointerType *PT = dyn_cast<PointerType>(T)) // For the sake of this example GC, we arbitrarily pick addrspace(1) as our // GC managed heap. We know that a pointer into this heap needs to be // updated and that no other pointer does. return (1 == PT->getAddressSpace()); return false; } // Return true if this type is one which a) is a gc pointer or contains a GC // pointer and b) is of a type this code expects to encounter as a live value. // (The insertion code will assert that a type which matches (a) and not (b) // is not encountered.) static bool isHandledGCPointerType(Type *T) { // We fully support gc pointers if (isGCPointerType(T)) return true; // We partially support vectors of gc pointers. The code will assert if it // can't handle something. if (auto VT = dyn_cast<VectorType>(T)) if (isGCPointerType(VT->getElementType())) return true; return false; } #ifndef NDEBUG /// Returns true if this type contains a gc pointer whether we know how to /// handle that type or not. static bool containsGCPtrType(Type *Ty) { if (isGCPointerType(Ty)) return true; if (VectorType *VT = dyn_cast<VectorType>(Ty)) return isGCPointerType(VT->getScalarType()); if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) return containsGCPtrType(AT->getElementType()); if (StructType *ST = dyn_cast<StructType>(Ty)) return std::any_of( ST->subtypes().begin(), ST->subtypes().end(), [](Type *SubType) { return containsGCPtrType(SubType); }); return false; } // Returns true if this is a type which a) is a gc pointer or contains a GC // pointer and b) is of a type which the code doesn't expect (i.e. first class // aggregates). Used to trip assertions. static bool isUnhandledGCPointerType(Type *Ty) { return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); } #endif static bool order_by_name(llvm::Value *a, llvm::Value *b) { if (a->hasName() && b->hasName()) { return -1 == a->getName().compare(b->getName()); } else if (a->hasName() && !b->hasName()) { return true; } else if (!a->hasName() && b->hasName()) { return false; } else { // Better than nothing, but not stable return a < b; } } // Conservatively identifies any definitions which might be live at the // given instruction. The analysis is performed immediately before the // given instruction. Values defined by that instruction are not considered // live. Values used by that instruction are considered live. static void analyzeParsePointLiveness( DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, const CallSite &CS, PartiallyConstructedSafepointRecord &result) { Instruction *inst = CS.getInstruction(); StatepointLiveSetTy liveset; findLiveSetAtInst(inst, OriginalLivenessData, liveset); if (PrintLiveSet) { // Note: This output is used by several of the test cases // The order of elemtns in a set is not stable, put them in a vec and sort // by name SmallVector<Value *, 64> temp; temp.insert(temp.end(), liveset.begin(), liveset.end()); std::sort(temp.begin(), temp.end(), order_by_name); errs() << "Live Variables:\n"; for (Value *V : temp) { errs() << " " << V->getName(); // no newline V->dump(); } } if (PrintLiveSetSize) { errs() << "Safepoint For: " << CS.getCalledValue()->getName() << "\n"; errs() << "Number live values: " << liveset.size() << "\n"; } result.liveset = liveset; } static Value *findBaseDefiningValue(Value *I); /// Return a base defining value for the 'Index' element of the given vector /// instruction 'I'. If Index is null, returns a BDV for the entire vector /// 'I'. As an optimization, this method will try to determine when the /// element is known to already be a base pointer. If this can be established, /// the second value in the returned pair will be true. Note that either a /// vector or a pointer typed value can be returned. For the former, the /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. /// If the later, the return pointer is a BDV (or possibly a base) for the /// particular element in 'I'. static std::pair<Value *, bool> findBaseDefiningValueOfVector(Value *I, Value *Index = nullptr) { assert(I->getType()->isVectorTy() && cast<VectorType>(I->getType())->getElementType()->isPointerTy() && "Illegal to ask for the base pointer of a non-pointer type"); // Each case parallels findBaseDefiningValue below, see that code for // detailed motivation. if (isa<Argument>(I)) // An incoming argument to the function is a base pointer return std::make_pair(I, true); // We shouldn't see the address of a global as a vector value? assert(!isa<GlobalVariable>(I) && "unexpected global variable found in base of vector"); // inlining could possibly introduce phi node that contains // undef if callee has multiple returns if (isa<UndefValue>(I)) // utterly meaningless, but useful for dealing with partially optimized // code. return std::make_pair(I, true); // Due to inheritance, this must be _after_ the global variable and undef // checks if (Constant *Con = dyn_cast<Constant>(I)) { assert(!isa<GlobalVariable>(I) && !isa<UndefValue>(I) && "order of checks wrong!"); assert(Con->isNullValue() && "null is the only case which makes sense"); return std::make_pair(Con, true); } if (isa<LoadInst>(I)) return std::make_pair(I, true); // For an insert element, we might be able to look through it if we know // something about the indexes. if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(I)) { if (Index) { Value *InsertIndex = IEI->getOperand(2); // This index is inserting the value, look for its BDV if (InsertIndex == Index) return std::make_pair(findBaseDefiningValue(IEI->getOperand(1)), false); // Both constant, and can't be equal per above. This insert is definitely // not relevant, look back at the rest of the vector and keep trying. if (isa<ConstantInt>(Index) && isa<ConstantInt>(InsertIndex)) return findBaseDefiningValueOfVector(IEI->getOperand(0), Index); } // We don't know whether this vector contains entirely base pointers or // not. To be conservatively correct, we treat it as a BDV and will // duplicate code as needed to construct a parallel vector of bases. return std::make_pair(IEI, false); } if (isa<ShuffleVectorInst>(I)) // We don't know whether this vector contains entirely base pointers or // not. To be conservatively correct, we treat it as a BDV and will // duplicate code as needed to construct a parallel vector of bases. // TODO: There a number of local optimizations which could be applied here // for particular sufflevector patterns. return std::make_pair(I, false); // A PHI or Select is a base defining value. The outer findBasePointer // algorithm is responsible for constructing a base value for this BDV. assert((isa<SelectInst>(I) || isa<PHINode>(I)) && "unknown vector instruction - no base found for vector element"); return std::make_pair(I, false); } static bool isKnownBaseResult(Value *V); /// Helper function for findBasePointer - Will return a value which either a) /// defines the base pointer for the input or b) blocks the simple search /// (i.e. a PHI or Select of two derived pointers) static Value *findBaseDefiningValue(Value *I) { if (I->getType()->isVectorTy()) return findBaseDefiningValueOfVector(I).first; assert(I->getType()->isPointerTy() && "Illegal to ask for the base pointer of a non-pointer type"); // This case is a bit of a hack - it only handles extracts from vectors which // trivially contain only base pointers or cases where we can directly match // the index of the original extract element to an insertion into the vector. // See note inside the function for how to improve this. if (auto *EEI = dyn_cast<ExtractElementInst>(I)) { Value *VectorOperand = EEI->getVectorOperand(); Value *Index = EEI->getIndexOperand(); std::pair<Value *, bool> pair = findBaseDefiningValueOfVector(VectorOperand, Index); Value *VectorBase = pair.first; if (VectorBase->getType()->isPointerTy()) // We found a BDV for this specific element with the vector. This is an // optimization, but in practice it covers most of the useful cases // created via scalarization. return VectorBase; else { assert(VectorBase->getType()->isVectorTy()); if (pair.second) // If the entire vector returned is known to be entirely base pointers, // then the extractelement is valid base for this value. return EEI; else { // Otherwise, we have an instruction which potentially produces a // derived pointer and we need findBasePointers to clone code for us // such that we can create an instruction which produces the // accompanying base pointer. // Note: This code is currently rather incomplete. We don't currently // support the general form of shufflevector of insertelement. // Conceptually, these are just 'base defining values' of the same // variety as phi or select instructions. We need to update the // findBasePointers algorithm to insert new 'base-only' versions of the // original instructions. This is relative straight forward to do, but // the case which would motivate the work hasn't shown up in real // workloads yet. assert((isa<PHINode>(VectorBase) || isa<SelectInst>(VectorBase)) && "need to extend findBasePointers for generic vector" "instruction cases"); return VectorBase; } } } if (isa<Argument>(I)) // An incoming argument to the function is a base pointer // We should have never reached here if this argument isn't an gc value return I; if (isa<GlobalVariable>(I)) // base case return I; // inlining could possibly introduce phi node that contains // undef if callee has multiple returns if (isa<UndefValue>(I)) // utterly meaningless, but useful for dealing with // partially optimized code. return I; // Due to inheritance, this must be _after_ the global variable and undef // checks if (Constant *Con = dyn_cast<Constant>(I)) { assert(!isa<GlobalVariable>(I) && !isa<UndefValue>(I) && "order of checks wrong!"); // Note: Finding a constant base for something marked for relocation // doesn't really make sense. The most likely case is either a) some // screwed up the address space usage or b) your validating against // compiled C++ code w/o the proper separation. The only real exception // is a null pointer. You could have generic code written to index of // off a potentially null value and have proven it null. We also use // null pointers in dead paths of relocation phis (which we might later // want to find a base pointer for). assert(isa<ConstantPointerNull>(Con) && "null is the only case which makes sense"); return Con; } if (CastInst *CI = dyn_cast<CastInst>(I)) { Value *Def = CI->stripPointerCasts(); // If we find a cast instruction here, it means we've found a cast which is // not simply a pointer cast (i.e. an inttoptr). We don't know how to // handle int->ptr conversion. assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); return findBaseDefiningValue(Def); } if (isa<LoadInst>(I)) return I; // The value loaded is an gc base itself if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) // The base of this GEP is the base return findBaseDefiningValue(GEP->getPointerOperand()); if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { switch (II->getIntrinsicID()) { case Intrinsic::experimental_gc_result_ptr: default: // fall through to general call handling break; case Intrinsic::experimental_gc_statepoint: case Intrinsic::experimental_gc_result_float: case Intrinsic::experimental_gc_result_int: llvm_unreachable("these don't produce pointers"); case Intrinsic::experimental_gc_relocate: { // Rerunning safepoint insertion after safepoints are already // inserted is not supported. It could probably be made to work, // but why are you doing this? There's no good reason. llvm_unreachable("repeat safepoint insertion is not supported"); } case Intrinsic::gcroot: // Currently, this mechanism hasn't been extended to work with gcroot. // There's no reason it couldn't be, but I haven't thought about the // implications much. llvm_unreachable( "interaction with the gcroot mechanism is not supported"); } } // We assume that functions in the source language only return base // pointers. This should probably be generalized via attributes to support // both source language and internal functions. if (isa<CallInst>(I) || isa<InvokeInst>(I)) return I; // I have absolutely no idea how to implement this part yet. It's not // neccessarily hard, I just haven't really looked at it yet. assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); if (isa<AtomicCmpXchgInst>(I)) // A CAS is effectively a atomic store and load combined under a // predicate. From the perspective of base pointers, we just treat it // like a load. return I; assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " "binary ops which don't apply to pointers"); // The aggregate ops. Aggregates can either be in the heap or on the // stack, but in either case, this is simply a field load. As a result, // this is a defining definition of the base just like a load is. if (isa<ExtractValueInst>(I)) return I; // We should never see an insert vector since that would require we be // tracing back a struct value not a pointer value. assert(!isa<InsertValueInst>(I) && "Base pointer for a struct is meaningless"); // The last two cases here don't return a base pointer. Instead, they // return a value which dynamically selects from amoung several base // derived pointers (each with it's own base potentially). It's the job of // the caller to resolve these. assert((isa<SelectInst>(I) || isa<PHINode>(I)) && "missing instruction case in findBaseDefiningValing"); return I; } /// Returns the base defining value for this value. static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { Value *&Cached = Cache[I]; if (!Cached) { Cached = findBaseDefiningValue(I); } assert(Cache[I] != nullptr); if (TraceLSP) { dbgs() << "fBDV-cached: " << I->getName() << " -> " << Cached->getName() << "\n"; } return Cached; } /// Return a base pointer for this value if known. Otherwise, return it's /// base defining value. static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { Value *Def = findBaseDefiningValueCached(I, Cache); auto Found = Cache.find(Def); if (Found != Cache.end()) { // Either a base-of relation, or a self reference. Caller must check. return Found->second; } // Only a BDV available return Def; } /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, /// is it known to be a base pointer? Or do we need to continue searching. static bool isKnownBaseResult(Value *V) { if (!isa<PHINode>(V) && !isa<SelectInst>(V)) { // no recursion possible return true; } if (isa<Instruction>(V) && cast<Instruction>(V)->getMetadata("is_base_value")) { // This is a previously inserted base phi or select. We know // that this is a base value. return true; } // We need to keep searching return false; } // TODO: find a better name for this namespace { class PhiState { public: enum Status { Unknown, Base, Conflict }; PhiState(Status s, Value *b = nullptr) : status(s), base(b) { assert(status != Base || b); } PhiState(Value *b) : status(Base), base(b) {} PhiState() : status(Unknown), base(nullptr) {} Status getStatus() const { return status; } Value *getBase() const { return base; } bool isBase() const { return getStatus() == Base; } bool isUnknown() const { return getStatus() == Unknown; } bool isConflict() const { return getStatus() == Conflict; } bool operator==(const PhiState &other) const { return base == other.base && status == other.status; } bool operator!=(const PhiState &other) const { return !(*this == other); } void dump() { errs() << status << " (" << base << " - " << (base ? base->getName() : "nullptr") << "): "; } private: Status status; Value *base; // non null only if status == base }; typedef DenseMap<Value *, PhiState> ConflictStateMapTy; // Values of type PhiState form a lattice, and this is a helper // class that implementes the meet operation. The meat of the meet // operation is implemented in MeetPhiStates::pureMeet class MeetPhiStates { public: // phiStates is a mapping from PHINodes and SelectInst's to PhiStates. explicit MeetPhiStates(const ConflictStateMapTy &phiStates) : phiStates(phiStates) {} // Destructively meet the current result with the base V. V can // either be a merge instruction (SelectInst / PHINode), in which // case its status is looked up in the phiStates map; or a regular // SSA value, in which case it is assumed to be a base. void meetWith(Value *V) { PhiState otherState = getStateForBDV(V); assert((MeetPhiStates::pureMeet(otherState, currentResult) == MeetPhiStates::pureMeet(currentResult, otherState)) && "math is wrong: meet does not commute!"); currentResult = MeetPhiStates::pureMeet(otherState, currentResult); } PhiState getResult() const { return currentResult; } private: const ConflictStateMapTy &phiStates; PhiState currentResult; /// Return a phi state for a base defining value. We'll generate a new /// base state for known bases and expect to find a cached state otherwise PhiState getStateForBDV(Value *baseValue) { if (isKnownBaseResult(baseValue)) { return PhiState(baseValue); } else { return lookupFromMap(baseValue); } } PhiState lookupFromMap(Value *V) { auto I = phiStates.find(V); assert(I != phiStates.end() && "lookup failed!"); return I->second; } static PhiState pureMeet(const PhiState &stateA, const PhiState &stateB) { switch (stateA.getStatus()) { case PhiState::Unknown: return stateB; case PhiState::Base: assert(stateA.getBase() && "can't be null"); if (stateB.isUnknown()) return stateA; if (stateB.isBase()) { if (stateA.getBase() == stateB.getBase()) { assert(stateA == stateB && "equality broken!"); return stateA; } return PhiState(PhiState::Conflict); } assert(stateB.isConflict() && "only three states!"); return PhiState(PhiState::Conflict); case PhiState::Conflict: return stateA; } llvm_unreachable("only three states!"); } }; } /// For a given value or instruction, figure out what base ptr it's derived /// from. For gc objects, this is simply itself. On success, returns a value /// which is the base pointer. (This is reliable and can be used for /// relocation.) On failure, returns nullptr. static Value *findBasePointer(Value *I, DefiningValueMapTy &cache) { Value *def = findBaseOrBDV(I, cache); if (isKnownBaseResult(def)) { return def; } // Here's the rough algorithm: // - For every SSA value, construct a mapping to either an actual base // pointer or a PHI which obscures the base pointer. // - Construct a mapping from PHI to unknown TOP state. Use an // optimistic algorithm to propagate base pointer information. Lattice // looks like: // UNKNOWN // b1 b2 b3 b4 // CONFLICT // When algorithm terminates, all PHIs will either have a single concrete // base or be in a conflict state. // - For every conflict, insert a dummy PHI node without arguments. Add // these to the base[Instruction] = BasePtr mapping. For every // non-conflict, add the actual base. // - For every conflict, add arguments for the base[a] of each input // arguments. // // Note: A simpler form of this would be to add the conflict form of all // PHIs without running the optimistic algorithm. This would be // analougous to pessimistic data flow and would likely lead to an // overall worse solution. ConflictStateMapTy states; states[def] = PhiState(); // Recursively fill in all phis & selects reachable from the initial one // for which we don't already know a definite base value for // TODO: This should be rewritten with a worklist bool done = false; while (!done) { done = true; // Since we're adding elements to 'states' as we run, we can't keep // iterators into the set. SmallVector<Value *, 16> Keys; Keys.reserve(states.size()); for (auto Pair : states) { Value *V = Pair.first; Keys.push_back(V); } for (Value *v : Keys) { assert(!isKnownBaseResult(v) && "why did it get added?"); if (PHINode *phi = dyn_cast<PHINode>(v)) { assert(phi->getNumIncomingValues() > 0 && "zero input phis are illegal"); for (Value *InVal : phi->incoming_values()) { Value *local = findBaseOrBDV(InVal, cache); if (!isKnownBaseResult(local) && states.find(local) == states.end()) { states[local] = PhiState(); done = false; } } } else if (SelectInst *sel = dyn_cast<SelectInst>(v)) { Value *local = findBaseOrBDV(sel->getTrueValue(), cache); if (!isKnownBaseResult(local) && states.find(local) == states.end()) { states[local] = PhiState(); done = false; } local = findBaseOrBDV(sel->getFalseValue(), cache); if (!isKnownBaseResult(local) && states.find(local) == states.end()) { states[local] = PhiState(); done = false; } } } } if (TraceLSP) { errs() << "States after initialization:\n"; for (auto Pair : states) { Instruction *v = cast<Instruction>(Pair.first); PhiState state = Pair.second; state.dump(); v->dump(); } } // TODO: come back and revisit the state transitions around inputs which // have reached conflict state. The current version seems too conservative. bool progress = true; while (progress) { #ifndef NDEBUG size_t oldSize = states.size(); #endif progress = false; // We're only changing keys in this loop, thus safe to keep iterators for (auto Pair : states) { MeetPhiStates calculateMeet(states); Value *v = Pair.first; assert(!isKnownBaseResult(v) && "why did it get added?"); if (SelectInst *select = dyn_cast<SelectInst>(v)) { calculateMeet.meetWith(findBaseOrBDV(select->getTrueValue(), cache)); calculateMeet.meetWith(findBaseOrBDV(select->getFalseValue(), cache)); } else for (Value *Val : cast<PHINode>(v)->incoming_values()) calculateMeet.meetWith(findBaseOrBDV(Val, cache)); PhiState oldState = states[v]; PhiState newState = calculateMeet.getResult(); if (oldState != newState) { progress = true; states[v] = newState; } } assert(oldSize <= states.size()); assert(oldSize == states.size() || progress); } if (TraceLSP) { errs() << "States after meet iteration:\n"; for (auto Pair : states) { Instruction *v = cast<Instruction>(Pair.first); PhiState state = Pair.second; state.dump(); v->dump(); } } // Insert Phis for all conflicts // We want to keep naming deterministic in the loop that follows, so // sort the keys before iteration. This is useful in allowing us to // write stable tests. Note that there is no invalidation issue here. SmallVector<Value *, 16> Keys; Keys.reserve(states.size()); for (auto Pair : states) { Value *V = Pair.first; Keys.push_back(V); } std::sort(Keys.begin(), Keys.end(), order_by_name); // TODO: adjust naming patterns to avoid this order of iteration dependency for (Value *V : Keys) { Instruction *v = cast<Instruction>(V); PhiState state = states[V]; assert(!isKnownBaseResult(v) && "why did it get added?"); assert(!state.isUnknown() && "Optimistic algorithm didn't complete!"); if (!state.isConflict()) continue; if (isa<PHINode>(v)) { int num_preds = std::distance(pred_begin(v->getParent()), pred_end(v->getParent())); assert(num_preds > 0 && "how did we reach here"); PHINode *phi = PHINode::Create(v->getType(), num_preds, "base_phi", v); // Add metadata marking this as a base value auto *const_1 = ConstantInt::get( Type::getInt32Ty( v->getParent()->getParent()->getParent()->getContext()), 1); auto MDConst = ConstantAsMetadata::get(const_1); MDNode *md = MDNode::get( v->getParent()->getParent()->getParent()->getContext(), MDConst); phi->setMetadata("is_base_value", md); states[v] = PhiState(PhiState::Conflict, phi); } else { SelectInst *sel = cast<SelectInst>(v); // The undef will be replaced later UndefValue *undef = UndefValue::get(sel->getType()); SelectInst *basesel = SelectInst::Create(sel->getCondition(), undef, undef, "base_select", sel); // Add metadata marking this as a base value auto *const_1 = ConstantInt::get( Type::getInt32Ty( v->getParent()->getParent()->getParent()->getContext()), 1); auto MDConst = ConstantAsMetadata::get(const_1); MDNode *md = MDNode::get( v->getParent()->getParent()->getParent()->getContext(), MDConst); basesel->setMetadata("is_base_value", md); states[v] = PhiState(PhiState::Conflict, basesel); } } // Fixup all the inputs of the new PHIs for (auto Pair : states) { Instruction *v = cast<Instruction>(Pair.first); PhiState state = Pair.second; assert(!isKnownBaseResult(v) && "why did it get added?"); assert(!state.isUnknown() && "Optimistic algorithm didn't complete!"); if (!state.isConflict()) continue; if (PHINode *basephi = dyn_cast<PHINode>(state.getBase())) { PHINode *phi = cast<PHINode>(v); unsigned NumPHIValues = phi->getNumIncomingValues(); for (unsigned i = 0; i < NumPHIValues; i++) { Value *InVal = phi->getIncomingValue(i); BasicBlock *InBB = phi->getIncomingBlock(i); // If we've already seen InBB, add the same incoming value // we added for it earlier. The IR verifier requires phi // nodes with multiple entries from the same basic block // to have the same incoming value for each of those // entries. If we don't do this check here and basephi // has a different type than base, we'll end up adding two // bitcasts (and hence two distinct values) as incoming // values for the same basic block. int blockIndex = basephi->getBasicBlockIndex(InBB); if (blockIndex != -1) { Value *oldBase = basephi->getIncomingValue(blockIndex); basephi->addIncoming(oldBase, InBB); #ifndef NDEBUG Value *base = findBaseOrBDV(InVal, cache); if (!isKnownBaseResult(base)) { // Either conflict or base. assert(states.count(base)); base = states[base].getBase(); assert(base != nullptr && "unknown PhiState!"); } // In essense this assert states: the only way two // values incoming from the same basic block may be // different is by being different bitcasts of the same // value. A cleanup that remains TODO is changing // findBaseOrBDV to return an llvm::Value of the correct // type (and still remain pure). This will remove the // need to add bitcasts. assert(base->stripPointerCasts() == oldBase->stripPointerCasts() && "sanity -- findBaseOrBDV should be pure!"); #endif continue; } // Find either the defining value for the PHI or the normal base for // a non-phi node Value *base = findBaseOrBDV(InVal, cache); if (!isKnownBaseResult(base)) { // Either conflict or base. assert(states.count(base)); base = states[base].getBase(); assert(base != nullptr && "unknown PhiState!"); } assert(base && "can't be null"); // Must use original input BB since base may not be Instruction // The cast is needed since base traversal may strip away bitcasts if (base->getType() != basephi->getType()) { base = new BitCastInst(base, basephi->getType(), "cast", InBB->getTerminator()); } basephi->addIncoming(base, InBB); } assert(basephi->getNumIncomingValues() == NumPHIValues); } else { SelectInst *basesel = cast<SelectInst>(state.getBase()); SelectInst *sel = cast<SelectInst>(v); // Operand 1 & 2 are true, false path respectively. TODO: refactor to // something more safe and less hacky. for (int i = 1; i <= 2; i++) { Value *InVal = sel->getOperand(i); // Find either the defining value for the PHI or the normal base for // a non-phi node Value *base = findBaseOrBDV(InVal, cache); if (!isKnownBaseResult(base)) { // Either conflict or base. assert(states.count(base)); base = states[base].getBase(); assert(base != nullptr && "unknown PhiState!"); } assert(base && "can't be null"); // Must use original input BB since base may not be Instruction // The cast is needed since base traversal may strip away bitcasts if (base->getType() != basesel->getType()) { base = new BitCastInst(base, basesel->getType(), "cast", basesel); } basesel->setOperand(i, base); } } } // Cache all of our results so we can cheaply reuse them // NOTE: This is actually two caches: one of the base defining value // relation and one of the base pointer relation! FIXME for (auto item : states) { Value *v = item.first; Value *base = item.second.getBase(); assert(v && base); assert(!isKnownBaseResult(v) && "why did it get added?"); if (TraceLSP) { std::string fromstr = cache.count(v) ? (cache[v]->hasName() ? cache[v]->getName() : "") : "none"; errs() << "Updating base value cache" << " for: " << (v->hasName() ? v->getName() : "") << " from: " << fromstr << " to: " << (base->hasName() ? base->getName() : "") << "\n"; } assert(isKnownBaseResult(base) && "must be something we 'know' is a base pointer"); if (cache.count(v)) { // Once we transition from the BDV relation being store in the cache to // the base relation being stored, it must be stable assert((!isKnownBaseResult(cache[v]) || cache[v] == base) && "base relation should be stable"); } cache[v] = base; } assert(cache.find(def) != cache.end()); return cache[def]; } // For a set of live pointers (base and/or derived), identify the base // pointer of the object which they are derived from. This routine will // mutate the IR graph as needed to make the 'base' pointer live at the // definition site of 'derived'. This ensures that any use of 'derived' can // also use 'base'. This may involve the insertion of a number of // additional PHI nodes. // // preconditions: live is a set of pointer type Values // // side effects: may insert PHI nodes into the existing CFG, will preserve // CFG, will not remove or mutate any existing nodes // // post condition: PointerToBase contains one (derived, base) pair for every // pointer in live. Note that derived can be equal to base if the original // pointer was a base pointer. static void findBasePointers(const StatepointLiveSetTy &live, DenseMap<llvm::Value *, llvm::Value *> &PointerToBase, DominatorTree *DT, DefiningValueMapTy &DVCache) { // For the naming of values inserted to be deterministic - which makes for // much cleaner and more stable tests - we need to assign an order to the // live values. DenseSets do not provide a deterministic order across runs. SmallVector<Value *, 64> Temp; Temp.insert(Temp.end(), live.begin(), live.end()); std::sort(Temp.begin(), Temp.end(), order_by_name); for (Value *ptr : Temp) { Value *base = findBasePointer(ptr, DVCache); assert(base && "failed to find base pointer"); PointerToBase[ptr] = base; assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || DT->dominates(cast<Instruction>(base)->getParent(), cast<Instruction>(ptr)->getParent())) && "The base we found better dominate the derived pointer"); // If you see this trip and like to live really dangerously, the code should // be correct, just with idioms the verifier can't handle. You can try // disabling the verifier at your own substaintial risk. assert(!isa<ConstantPointerNull>(base) && "the relocation code needs adjustment to handle the relocation of " "a null pointer constant without causing false positives in the " "safepoint ir verifier."); } } /// Find the required based pointers (and adjust the live set) for the given /// parse point. static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, const CallSite &CS, PartiallyConstructedSafepointRecord &result) { DenseMap<llvm::Value *, llvm::Value *> PointerToBase; findBasePointers(result.liveset, PointerToBase, &DT, DVCache); if (PrintBasePointers) { // Note: Need to print these in a stable order since this is checked in // some tests. errs() << "Base Pairs (w/o Relocation):\n"; SmallVector<Value *, 64> Temp; Temp.reserve(PointerToBase.size()); for (auto Pair : PointerToBase) { Temp.push_back(Pair.first); } std::sort(Temp.begin(), Temp.end(), order_by_name); for (Value *Ptr : Temp) { Value *Base = PointerToBase[Ptr]; errs() << " derived %" << Ptr->getName() << " base %" << Base->getName() << "\n"; } } result.PointerToBase = PointerToBase; } /// Given an updated version of the dataflow liveness results, update the /// liveset and base pointer maps for the call site CS. static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, const CallSite &CS, PartiallyConstructedSafepointRecord &result); static void recomputeLiveInValues( Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate, MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { // TODO-PERF: reuse the original liveness, then simply run the dataflow // again. The old values are still live and will help it stablize quickly. GCPtrLivenessData RevisedLivenessData; computeLiveInValues(DT, F, RevisedLivenessData); for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; const CallSite &CS = toUpdate[i]; recomputeLiveInValues(RevisedLivenessData, CS, info); } } // When inserting gc.relocate calls, we need to ensure there are no uses // of the original value between the gc.statepoint and the gc.relocate call. // One case which can arise is a phi node starting one of the successor blocks. // We also need to be able to insert the gc.relocates only on the path which // goes through the statepoint. We might need to split an edge to make this // possible. static BasicBlock * normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, DominatorTree &DT) { BasicBlock *Ret = BB; if (!BB->getUniquePredecessor()) { Ret = SplitBlockPredecessors(BB, InvokeParent, "", nullptr, &DT); } // Now that 'ret' has unique predecessor we can safely remove all phi nodes // from it FoldSingleEntryPHINodes(Ret); assert(!isa<PHINode>(Ret->begin())); // At this point, we can safely insert a gc.relocate as the first instruction // in Ret if needed. return Ret; } static int find_index(ArrayRef<Value *> livevec, Value *val) { auto itr = std::find(livevec.begin(), livevec.end(), val); assert(livevec.end() != itr); size_t index = std::distance(livevec.begin(), itr); assert(index < livevec.size()); return index; } // Create new attribute set containing only attributes which can be transfered // from original call to the safepoint. static AttributeSet legalizeCallAttributes(AttributeSet AS) { AttributeSet ret; for (unsigned Slot = 0; Slot < AS.getNumSlots(); Slot++) { unsigned index = AS.getSlotIndex(Slot); if (index == AttributeSet::ReturnIndex || index == AttributeSet::FunctionIndex) { for (auto it = AS.begin(Slot), it_end = AS.end(Slot); it != it_end; ++it) { Attribute attr = *it; // Do not allow certain attributes - just skip them // Safepoint can not be read only or read none. if (attr.hasAttribute(Attribute::ReadNone) || attr.hasAttribute(Attribute::ReadOnly)) continue; ret = ret.addAttributes( AS.getContext(), index, AttributeSet::get(AS.getContext(), index, AttrBuilder(attr))); } } // Just skip parameter attributes for now } return ret; } /// Helper function to place all gc relocates necessary for the given /// statepoint. /// Inputs: /// liveVariables - list of variables to be relocated. /// liveStart - index of the first live variable. /// basePtrs - base pointers. /// statepointToken - statepoint instruction to which relocates should be /// bound. /// Builder - Llvm IR builder to be used to construct new calls. static void CreateGCRelocates(ArrayRef<llvm::Value *> LiveVariables, const int LiveStart, ArrayRef<llvm::Value *> BasePtrs, Instruction *StatepointToken, IRBuilder<> Builder) { SmallVector<Instruction *, 64> NewDefs; NewDefs.reserve(LiveVariables.size()); Module *M = StatepointToken->getParent()->getParent()->getParent(); for (unsigned i = 0; i < LiveVariables.size(); i++) { // We generate a (potentially) unique declaration for every pointer type // combination. This results is some blow up the function declarations in // the IR, but removes the need for argument bitcasts which shrinks the IR // greatly and makes it much more readable. SmallVector<Type *, 1> Types; // one per 'any' type // All gc_relocate are set to i8 addrspace(1)* type. This could help avoid // cases where the actual value's type mangling is not supported by llvm. A // bitcast is added later to convert gc_relocate to the actual value's type. Types.push_back(Type::getInt8PtrTy(M->getContext(), 1)); Value *GCRelocateDecl = Intrinsic::getDeclaration( M, Intrinsic::experimental_gc_relocate, Types); // Generate the gc.relocate call and save the result Value *BaseIdx = ConstantInt::get(Type::getInt32Ty(M->getContext()), LiveStart + find_index(LiveVariables, BasePtrs[i])); Value *LiveIdx = ConstantInt::get( Type::getInt32Ty(M->getContext()), LiveStart + find_index(LiveVariables, LiveVariables[i])); // only specify a debug name if we can give a useful one Value *Reloc = Builder.CreateCall( GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, LiveVariables[i]->hasName() ? LiveVariables[i]->getName() + ".relocated" : ""); // Trick CodeGen into thinking there are lots of free registers at this // fake call. cast<CallInst>(Reloc)->setCallingConv(CallingConv::Cold); NewDefs.push_back(cast<Instruction>(Reloc)); } assert(NewDefs.size() == LiveVariables.size() && "missing or extra redefinition at safepoint"); } static void makeStatepointExplicitImpl(const CallSite &CS, /* to replace */ const SmallVectorImpl<llvm::Value *> &basePtrs, const SmallVectorImpl<llvm::Value *> &liveVariables, Pass *P, PartiallyConstructedSafepointRecord &result) { assert(basePtrs.size() == liveVariables.size()); assert(isStatepoint(CS) && "This method expects to be rewriting a statepoint"); BasicBlock *BB = CS.getInstruction()->getParent(); assert(BB); Function *F = BB->getParent(); assert(F && "must be set"); Module *M = F->getParent(); (void)M; assert(M && "must be set"); // We're not changing the function signature of the statepoint since the gc // arguments go into the var args section. Function *gc_statepoint_decl = CS.getCalledFunction(); // Then go ahead and use the builder do actually do the inserts. We insert // immediately before the previous instruction under the assumption that all // arguments will be available here. We can't insert afterwards since we may // be replacing a terminator. Instruction *insertBefore = CS.getInstruction(); IRBuilder<> Builder(insertBefore); // Copy all of the arguments from the original statepoint - this includes the // target, call args, and deopt args SmallVector<llvm::Value *, 64> args; args.insert(args.end(), CS.arg_begin(), CS.arg_end()); // TODO: Clear the 'needs rewrite' flag // add all the pointers to be relocated (gc arguments) // Capture the start of the live variable list for use in the gc_relocates const int live_start = args.size(); args.insert(args.end(), liveVariables.begin(), liveVariables.end()); // Create the statepoint given all the arguments Instruction *token = nullptr; AttributeSet return_attributes; if (CS.isCall()) { CallInst *toReplace = cast<CallInst>(CS.getInstruction()); CallInst *call = Builder.CreateCall(gc_statepoint_decl, args, "safepoint_token"); call->setTailCall(toReplace->isTailCall()); call->setCallingConv(toReplace->getCallingConv()); // Currently we will fail on parameter attributes and on certain // function attributes. AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); // In case if we can handle this set of sttributes - set up function attrs // directly on statepoint and return attrs later for gc_result intrinsic. call->setAttributes(new_attrs.getFnAttributes()); return_attributes = new_attrs.getRetAttributes(); token = call; // Put the following gc_result and gc_relocate calls immediately after the // the old call (which we're about to delete) BasicBlock::iterator next(toReplace); assert(BB->end() != next && "not a terminator, must have next"); next++; Instruction *IP = &*(next); Builder.SetInsertPoint(IP); Builder.SetCurrentDebugLocation(IP->getDebugLoc()); } else { InvokeInst *toReplace = cast<InvokeInst>(CS.getInstruction()); // Insert the new invoke into the old block. We'll remove the old one in a // moment at which point this will become the new terminator for the // original block. InvokeInst *invoke = InvokeInst::Create( gc_statepoint_decl, toReplace->getNormalDest(), toReplace->getUnwindDest(), args, "", toReplace->getParent()); invoke->setCallingConv(toReplace->getCallingConv()); // Currently we will fail on parameter attributes and on certain // function attributes. AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); // In case if we can handle this set of sttributes - set up function attrs // directly on statepoint and return attrs later for gc_result intrinsic. invoke->setAttributes(new_attrs.getFnAttributes()); return_attributes = new_attrs.getRetAttributes(); token = invoke; // Generate gc relocates in exceptional path BasicBlock *unwindBlock = toReplace->getUnwindDest(); assert(!isa<PHINode>(unwindBlock->begin()) && unwindBlock->getUniquePredecessor() && "can't safely insert in this block!"); Instruction *IP = &*(unwindBlock->getFirstInsertionPt()); Builder.SetInsertPoint(IP); Builder.SetCurrentDebugLocation(toReplace->getDebugLoc()); // Extract second element from landingpad return value. We will attach // exceptional gc relocates to it. const unsigned idx = 1; Instruction *exceptional_token = cast<Instruction>(Builder.CreateExtractValue( unwindBlock->getLandingPadInst(), idx, "relocate_token")); result.UnwindToken = exceptional_token; // Just throw away return value. We will use the one we got for normal // block. (void)CreateGCRelocates(liveVariables, live_start, basePtrs, exceptional_token, Builder); // Generate gc relocates and returns for normal block BasicBlock *normalDest = toReplace->getNormalDest(); assert(!isa<PHINode>(normalDest->begin()) && normalDest->getUniquePredecessor() && "can't safely insert in this block!"); IP = &*(normalDest->getFirstInsertionPt()); Builder.SetInsertPoint(IP); // gc relocates will be generated later as if it were regular call // statepoint } assert(token); // Take the name of the original value call if it had one. token->takeName(CS.getInstruction()); // The GCResult is already inserted, we just need to find it #ifndef NDEBUG Instruction *toReplace = CS.getInstruction(); assert((toReplace->hasNUses(0) || toReplace->hasNUses(1)) && "only valid use before rewrite is gc.result"); assert(!toReplace->hasOneUse() || isGCResult(cast<Instruction>(*toReplace->user_begin()))); #endif // Update the gc.result of the original statepoint (if any) to use the newly // inserted statepoint. This is safe to do here since the token can't be // considered a live reference. CS.getInstruction()->replaceAllUsesWith(token); result.StatepointToken = token; // Second, create a gc.relocate for every live variable CreateGCRelocates(liveVariables, live_start, basePtrs, token, Builder); } namespace { struct name_ordering { Value *base; Value *derived; bool operator()(name_ordering const &a, name_ordering const &b) { return -1 == a.derived->getName().compare(b.derived->getName()); } }; } static void stablize_order(SmallVectorImpl<Value *> &basevec, SmallVectorImpl<Value *> &livevec) { assert(basevec.size() == livevec.size()); SmallVector<name_ordering, 64> temp; for (size_t i = 0; i < basevec.size(); i++) { name_ordering v; v.base = basevec[i]; v.derived = livevec[i]; temp.push_back(v); } std::sort(temp.begin(), temp.end(), name_ordering()); for (size_t i = 0; i < basevec.size(); i++) { basevec[i] = temp[i].base; livevec[i] = temp[i].derived; } } // Replace an existing gc.statepoint with a new one and a set of gc.relocates // which make the relocations happening at this safepoint explicit. // // WARNING: Does not do any fixup to adjust users of the original live // values. That's the callers responsibility. static void makeStatepointExplicit(DominatorTree &DT, const CallSite &CS, Pass *P, PartiallyConstructedSafepointRecord &result) { auto liveset = result.liveset; auto PointerToBase = result.PointerToBase; // Convert to vector for efficient cross referencing. SmallVector<Value *, 64> basevec, livevec; livevec.reserve(liveset.size()); basevec.reserve(liveset.size()); for (Value *L : liveset) { livevec.push_back(L); assert(PointerToBase.find(L) != PointerToBase.end()); Value *base = PointerToBase[L]; basevec.push_back(base); } assert(livevec.size() == basevec.size()); // To make the output IR slightly more stable (for use in diffs), ensure a // fixed order of the values in the safepoint (by sorting the value name). // The order is otherwise meaningless. stablize_order(basevec, livevec); // Do the actual rewriting and delete the old statepoint makeStatepointExplicitImpl(CS, basevec, livevec, P, result); CS.getInstruction()->eraseFromParent(); } // Helper function for the relocationViaAlloca. // It receives iterator to the statepoint gc relocates and emits store to the // assigned // location (via allocaMap) for the each one of them. // Add visited values into the visitedLiveValues set we will later use them // for sanity check. static void insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, DenseMap<Value *, Value *> &AllocaMap, DenseSet<Value *> &VisitedLiveValues) { for (User *U : GCRelocs) { if (!isa<IntrinsicInst>(U)) continue; IntrinsicInst *RelocatedValue = cast<IntrinsicInst>(U); // We only care about relocates if (RelocatedValue->getIntrinsicID() != Intrinsic::experimental_gc_relocate) { continue; } GCRelocateOperands RelocateOperands(RelocatedValue); Value *OriginalValue = const_cast<Value *>(RelocateOperands.getDerivedPtr()); assert(AllocaMap.count(OriginalValue)); Value *Alloca = AllocaMap[OriginalValue]; // Emit store into the related alloca // All gc_relocate are i8 addrspace(1)* typed, and it must be bitcasted to // the correct type according to alloca. assert(RelocatedValue->getNextNode() && "Should always have one since it's not a terminator"); IRBuilder<> Builder(RelocatedValue->getNextNode()); Value *CastedRelocatedValue = Builder.CreateBitCast(RelocatedValue, cast<AllocaInst>(Alloca)->getAllocatedType(), RelocatedValue->hasName() ? RelocatedValue->getName() + ".casted" : ""); StoreInst *Store = new StoreInst(CastedRelocatedValue, Alloca); Store->insertAfter(cast<Instruction>(CastedRelocatedValue)); #ifndef NDEBUG VisitedLiveValues.insert(OriginalValue); #endif } } // Helper function for the "relocationViaAlloca". Similar to the // "insertRelocationStores" but works for rematerialized values. static void insertRematerializationStores( RematerializedValueMapTy RematerializedValues, DenseMap<Value *, Value *> &AllocaMap, DenseSet<Value *> &VisitedLiveValues) { for (auto RematerializedValuePair: RematerializedValues) { Instruction *RematerializedValue = RematerializedValuePair.first; Value *OriginalValue = RematerializedValuePair.second; assert(AllocaMap.count(OriginalValue) && "Can not find alloca for rematerialized value"); Value *Alloca = AllocaMap[OriginalValue]; StoreInst *Store = new StoreInst(RematerializedValue, Alloca); Store->insertAfter(RematerializedValue); #ifndef NDEBUG VisitedLiveValues.insert(OriginalValue); #endif } } /// do all the relocation update via allocas and mem2reg static void relocationViaAlloca( Function &F, DominatorTree &DT, ArrayRef<Value *> Live, ArrayRef<struct PartiallyConstructedSafepointRecord> Records) { #ifndef NDEBUG // record initial number of (static) allocas; we'll check we have the same // number when we get done. int InitialAllocaNum = 0; for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E; I++) if (isa<AllocaInst>(*I)) InitialAllocaNum++; #endif // TODO-PERF: change data structures, reserve DenseMap<Value *, Value *> AllocaMap; SmallVector<AllocaInst *, 200> PromotableAllocas; // Used later to chack that we have enough allocas to store all values std::size_t NumRematerializedValues = 0; PromotableAllocas.reserve(Live.size()); // Emit alloca for "LiveValue" and record it in "allocaMap" and // "PromotableAllocas" auto emitAllocaFor = [&](Value *LiveValue) { AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), "", F.getEntryBlock().getFirstNonPHI()); AllocaMap[LiveValue] = Alloca; PromotableAllocas.push_back(Alloca); }; // emit alloca for each live gc pointer for (unsigned i = 0; i < Live.size(); i++) { emitAllocaFor(Live[i]); } // emit allocas for rematerialized values for (size_t i = 0; i < Records.size(); i++) { const struct PartiallyConstructedSafepointRecord &Info = Records[i]; for (auto RematerializedValuePair : Info.RematerializedValues) { Value *OriginalValue = RematerializedValuePair.second; if (AllocaMap.count(OriginalValue) != 0) continue; emitAllocaFor(OriginalValue); ++NumRematerializedValues; } } // The next two loops are part of the same conceptual operation. We need to // insert a store to the alloca after the original def and at each // redefinition. We need to insert a load before each use. These are split // into distinct loops for performance reasons. // update gc pointer after each statepoint // either store a relocated value or null (if no relocated value found for // this gc pointer and it is not a gc_result) // this must happen before we update the statepoint with load of alloca // otherwise we lose the link between statepoint and old def for (size_t i = 0; i < Records.size(); i++) { const struct PartiallyConstructedSafepointRecord &Info = Records[i]; Value *Statepoint = Info.StatepointToken; // This will be used for consistency check DenseSet<Value *> VisitedLiveValues; // Insert stores for normal statepoint gc relocates insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); // In case if it was invoke statepoint // we will insert stores for exceptional path gc relocates. if (isa<InvokeInst>(Statepoint)) { insertRelocationStores(Info.UnwindToken->users(), AllocaMap, VisitedLiveValues); } // Do similar thing with rematerialized values insertRematerializationStores(Info.RematerializedValues, AllocaMap, VisitedLiveValues); if (ClobberNonLive) { // As a debuging aid, pretend that an unrelocated pointer becomes null at // the gc.statepoint. This will turn some subtle GC problems into // slightly easier to debug SEGVs. Note that on large IR files with // lots of gc.statepoints this is extremely costly both memory and time // wise. SmallVector<AllocaInst *, 64> ToClobber; for (auto Pair : AllocaMap) { Value *Def = Pair.first; AllocaInst *Alloca = cast<AllocaInst>(Pair.second); // This value was relocated if (VisitedLiveValues.count(Def)) { continue; } ToClobber.push_back(Alloca); } auto InsertClobbersAt = [&](Instruction *IP) { for (auto *AI : ToClobber) { auto AIType = cast<PointerType>(AI->getType()); auto PT = cast<PointerType>(AIType->getElementType()); Constant *CPN = ConstantPointerNull::get(PT); StoreInst *Store = new StoreInst(CPN, AI); Store->insertBefore(IP); } }; // Insert the clobbering stores. These may get intermixed with the // gc.results and gc.relocates, but that's fine. if (auto II = dyn_cast<InvokeInst>(Statepoint)) { InsertClobbersAt(II->getNormalDest()->getFirstInsertionPt()); InsertClobbersAt(II->getUnwindDest()->getFirstInsertionPt()); } else { BasicBlock::iterator Next(cast<CallInst>(Statepoint)); Next++; InsertClobbersAt(Next); } } } // update use with load allocas and add store for gc_relocated for (auto Pair : AllocaMap) { Value *Def = Pair.first; Value *Alloca = Pair.second; // we pre-record the uses of allocas so that we dont have to worry about // later update // that change the user information. SmallVector<Instruction *, 20> Uses; // PERF: trade a linear scan for repeated reallocation Uses.reserve(std::distance(Def->user_begin(), Def->user_end())); for (User *U : Def->users()) { if (!isa<ConstantExpr>(U)) { // If the def has a ConstantExpr use, then the def is either a // ConstantExpr use itself or null. In either case // (recursively in the first, directly in the second), the oop // it is ultimately dependent on is null and this particular // use does not need to be fixed up. Uses.push_back(cast<Instruction>(U)); } } std::sort(Uses.begin(), Uses.end()); auto Last = std::unique(Uses.begin(), Uses.end()); Uses.erase(Last, Uses.end()); for (Instruction *Use : Uses) { if (isa<PHINode>(Use)) { PHINode *Phi = cast<PHINode>(Use); for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { if (Def == Phi->getIncomingValue(i)) { LoadInst *Load = new LoadInst( Alloca, "", Phi->getIncomingBlock(i)->getTerminator()); Phi->setIncomingValue(i, Load); } } } else { LoadInst *Load = new LoadInst(Alloca, "", Use); Use->replaceUsesOfWith(Def, Load); } } // emit store for the initial gc value // store must be inserted after load, otherwise store will be in alloca's // use list and an extra load will be inserted before it StoreInst *Store = new StoreInst(Def, Alloca); if (Instruction *Inst = dyn_cast<Instruction>(Def)) { if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { // InvokeInst is a TerminatorInst so the store need to be inserted // into its normal destination block. BasicBlock *NormalDest = Invoke->getNormalDest(); Store->insertBefore(NormalDest->getFirstNonPHI()); } else { assert(!Inst->isTerminator() && "The only TerminatorInst that can produce a value is " "InvokeInst which is handled above."); Store->insertAfter(Inst); } } else { assert(isa<Argument>(Def)); Store->insertAfter(cast<Instruction>(Alloca)); } } assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && "we must have the same allocas with lives"); if (!PromotableAllocas.empty()) { // apply mem2reg to promote alloca to SSA PromoteMemToReg(PromotableAllocas, DT); } #ifndef NDEBUG for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E; I++) if (isa<AllocaInst>(*I)) InitialAllocaNum--; assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); #endif } /// Implement a unique function which doesn't require we sort the input /// vector. Doing so has the effect of changing the output of a couple of /// tests in ways which make them less useful in testing fused safepoints. template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { SmallSet<T, 8> Seen; Vec.erase(std::remove_if(Vec.begin(), Vec.end(), [&](const T &V) { return !Seen.insert(V).second; }), Vec.end()); } /// Insert holders so that each Value is obviously live through the entire /// lifetime of the call. static void insertUseHolderAfter(CallSite &CS, const ArrayRef<Value *> Values, SmallVectorImpl<CallInst *> &Holders) { if (Values.empty()) // No values to hold live, might as well not insert the empty holder return; Module *M = CS.getInstruction()->getParent()->getParent()->getParent(); // Use a dummy vararg function to actually hold the values live Function *Func = cast<Function>(M->getOrInsertFunction( "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true))); if (CS.isCall()) { // For call safepoints insert dummy calls right after safepoint BasicBlock::iterator Next(CS.getInstruction()); Next++; Holders.push_back(CallInst::Create(Func, Values, "", Next)); return; } // For invoke safepooints insert dummy calls both in normal and // exceptional destination blocks auto *II = cast<InvokeInst>(CS.getInstruction()); Holders.push_back(CallInst::Create( Func, Values, "", II->getNormalDest()->getFirstInsertionPt())); Holders.push_back(CallInst::Create( Func, Values, "", II->getUnwindDest()->getFirstInsertionPt())); } static void findLiveReferences( Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate, MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { GCPtrLivenessData OriginalLivenessData; computeLiveInValues(DT, F, OriginalLivenessData); for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; const CallSite &CS = toUpdate[i]; analyzeParsePointLiveness(DT, OriginalLivenessData, CS, info); } } /// Remove any vector of pointers from the liveset by scalarizing them over the /// statepoint instruction. Adds the scalarized pieces to the liveset. It /// would be preferrable to include the vector in the statepoint itself, but /// the lowering code currently does not handle that. Extending it would be /// slightly non-trivial since it requires a format change. Given how rare /// such cases are (for the moment?) scalarizing is an acceptable comprimise. static void splitVectorValues(Instruction *StatepointInst, StatepointLiveSetTy &LiveSet, DenseMap<Value *, Value *>& PointerToBase, DominatorTree &DT) { SmallVector<Value *, 16> ToSplit; for (Value *V : LiveSet) if (isa<VectorType>(V->getType())) ToSplit.push_back(V); if (ToSplit.empty()) return; DenseMap<Value *, SmallVector<Value *, 16>> ElementMapping; Function &F = *(StatepointInst->getParent()->getParent()); DenseMap<Value *, AllocaInst *> AllocaMap; // First is normal return, second is exceptional return (invoke only) DenseMap<Value *, std::pair<Value *, Value *>> Replacements; for (Value *V : ToSplit) { AllocaInst *Alloca = new AllocaInst(V->getType(), "", F.getEntryBlock().getFirstNonPHI()); AllocaMap[V] = Alloca; VectorType *VT = cast<VectorType>(V->getType()); IRBuilder<> Builder(StatepointInst); SmallVector<Value *, 16> Elements; for (unsigned i = 0; i < VT->getNumElements(); i++) Elements.push_back(Builder.CreateExtractElement(V, Builder.getInt32(i))); ElementMapping[V] = Elements; auto InsertVectorReform = [&](Instruction *IP) { Builder.SetInsertPoint(IP); Builder.SetCurrentDebugLocation(IP->getDebugLoc()); Value *ResultVec = UndefValue::get(VT); for (unsigned i = 0; i < VT->getNumElements(); i++) ResultVec = Builder.CreateInsertElement(ResultVec, Elements[i], Builder.getInt32(i)); return ResultVec; }; if (isa<CallInst>(StatepointInst)) { BasicBlock::iterator Next(StatepointInst); Next++; Instruction *IP = &*(Next); Replacements[V].first = InsertVectorReform(IP); Replacements[V].second = nullptr; } else { InvokeInst *Invoke = cast<InvokeInst>(StatepointInst); // We've already normalized - check that we don't have shared destination // blocks BasicBlock *NormalDest = Invoke->getNormalDest(); assert(!isa<PHINode>(NormalDest->begin())); BasicBlock *UnwindDest = Invoke->getUnwindDest(); assert(!isa<PHINode>(UnwindDest->begin())); // Insert insert element sequences in both successors Instruction *IP = &*(NormalDest->getFirstInsertionPt()); Replacements[V].first = InsertVectorReform(IP); IP = &*(UnwindDest->getFirstInsertionPt()); Replacements[V].second = InsertVectorReform(IP); } } for (Value *V : ToSplit) { AllocaInst *Alloca = AllocaMap[V]; // Capture all users before we start mutating use lists SmallVector<Instruction *, 16> Users; for (User *U : V->users()) Users.push_back(cast<Instruction>(U)); for (Instruction *I : Users) { if (auto Phi = dyn_cast<PHINode>(I)) { for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) if (V == Phi->getIncomingValue(i)) { LoadInst *Load = new LoadInst( Alloca, "", Phi->getIncomingBlock(i)->getTerminator()); Phi->setIncomingValue(i, Load); } } else { LoadInst *Load = new LoadInst(Alloca, "", I); I->replaceUsesOfWith(V, Load); } } // Store the original value and the replacement value into the alloca StoreInst *Store = new StoreInst(V, Alloca); if (auto I = dyn_cast<Instruction>(V)) Store->insertAfter(I); else Store->insertAfter(Alloca); // Normal return for invoke, or call return Instruction *Replacement = cast<Instruction>(Replacements[V].first); (new StoreInst(Replacement, Alloca))->insertAfter(Replacement); // Unwind return for invoke only Replacement = cast_or_null<Instruction>(Replacements[V].second); if (Replacement) (new StoreInst(Replacement, Alloca))->insertAfter(Replacement); } // apply mem2reg to promote alloca to SSA SmallVector<AllocaInst *, 16> Allocas; for (Value *V : ToSplit) Allocas.push_back(AllocaMap[V]); PromoteMemToReg(Allocas, DT); // Update our tracking of live pointers and base mappings to account for the // changes we just made. for (Value *V : ToSplit) { auto &Elements = ElementMapping[V]; LiveSet.erase(V); LiveSet.insert(Elements.begin(), Elements.end()); // We need to update the base mapping as well. assert(PointerToBase.count(V)); Value *OldBase = PointerToBase[V]; auto &BaseElements = ElementMapping[OldBase]; PointerToBase.erase(V); assert(Elements.size() == BaseElements.size()); for (unsigned i = 0; i < Elements.size(); i++) { Value *Elem = Elements[i]; PointerToBase[Elem] = BaseElements[i]; } } } // Helper function for the "rematerializeLiveValues". It walks use chain // starting from the "CurrentValue" until it meets "BaseValue". Only "simple" // values are visited (currently it is GEP's and casts). Returns true if it // sucessfully reached "BaseValue" and false otherwise. // Fills "ChainToBase" array with all visited values. "BaseValue" is not // recorded. static bool findRematerializableChainToBasePointer( SmallVectorImpl<Instruction*> &ChainToBase, Value *CurrentValue, Value *BaseValue) { // We have found a base value if (CurrentValue == BaseValue) { return true; } if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { ChainToBase.push_back(GEP); return findRematerializableChainToBasePointer(ChainToBase, GEP->getPointerOperand(), BaseValue); } if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { Value *Def = CI->stripPointerCasts(); // This two checks are basically similar. First one is here for the // consistency with findBasePointers logic. assert(!isa<CastInst>(Def) && "not a pointer cast found"); if (!CI->isNoopCast(CI->getModule()->getDataLayout())) return false; ChainToBase.push_back(CI); return findRematerializableChainToBasePointer(ChainToBase, Def, BaseValue); } // Not supported instruction in the chain return false; } // Helper function for the "rematerializeLiveValues". Compute cost of the use // chain we are going to rematerialize. static unsigned chainToBasePointerCost(SmallVectorImpl<Instruction*> &Chain, TargetTransformInfo &TTI) { unsigned Cost = 0; for (Instruction *Instr : Chain) { if (CastInst *CI = dyn_cast<CastInst>(Instr)) { assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && "non noop cast is found during rematerialization"); Type *SrcTy = CI->getOperand(0)->getType(); Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy); } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { // Cost of the address calculation Type *ValTy = GEP->getPointerOperandType()->getPointerElementType(); Cost += TTI.getAddressComputationCost(ValTy); // And cost of the GEP itself // TODO: Use TTI->getGEPCost here (it exists, but appears to be not // allowed for the external usage) if (!GEP->hasAllConstantIndices()) Cost += 2; } else { llvm_unreachable("unsupported instruciton type during rematerialization"); } } return Cost; } // From the statepoint liveset pick values that are cheaper to recompute then to // relocate. Remove this values from the liveset, rematerialize them after // statepoint and record them in "Info" structure. Note that similar to // relocated values we don't do any user adjustments here. static void rematerializeLiveValues(CallSite CS, PartiallyConstructedSafepointRecord &Info, TargetTransformInfo &TTI) { const unsigned int ChainLengthThreshold = 10; // Record values we are going to delete from this statepoint live set. // We can not di this in following loop due to iterator invalidation. SmallVector<Value *, 32> LiveValuesToBeDeleted; for (Value *LiveValue: Info.liveset) { // For each live pointer find it's defining chain SmallVector<Instruction *, 3> ChainToBase; assert(Info.PointerToBase.find(LiveValue) != Info.PointerToBase.end()); bool FoundChain = findRematerializableChainToBasePointer(ChainToBase, LiveValue, Info.PointerToBase[LiveValue]); // Nothing to do, or chain is too long if (!FoundChain || ChainToBase.size() == 0 || ChainToBase.size() > ChainLengthThreshold) continue; // Compute cost of this chain unsigned Cost = chainToBasePointerCost(ChainToBase, TTI); // TODO: We can also account for cases when we will be able to remove some // of the rematerialized values by later optimization passes. I.e if // we rematerialized several intersecting chains. Or if original values // don't have any uses besides this statepoint. // For invokes we need to rematerialize each chain twice - for normal and // for unwind basic blocks. Model this by multiplying cost by two. if (CS.isInvoke()) { Cost *= 2; } // If it's too expensive - skip it if (Cost >= RematerializationThreshold) continue; // Remove value from the live set LiveValuesToBeDeleted.push_back(LiveValue); // Clone instructions and record them inside "Info" structure // Walk backwards to visit top-most instructions first std::reverse(ChainToBase.begin(), ChainToBase.end()); // Utility function which clones all instructions from "ChainToBase" // and inserts them before "InsertBefore". Returns rematerialized value // which should be used after statepoint. auto rematerializeChain = [&ChainToBase](Instruction *InsertBefore) { Instruction *LastClonedValue = nullptr; Instruction *LastValue = nullptr; for (Instruction *Instr: ChainToBase) { // Only GEP's and casts are suported as we need to be careful to not // introduce any new uses of pointers not in the liveset. // Note that it's fine to introduce new uses of pointers which were // otherwise not used after this statepoint. assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); Instruction *ClonedValue = Instr->clone(); ClonedValue->insertBefore(InsertBefore); ClonedValue->setName(Instr->getName() + ".remat"); // If it is not first instruction in the chain then it uses previously // cloned value. We should update it to use cloned value. if (LastClonedValue) { assert(LastValue); ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); #ifndef NDEBUG // Assert that cloned instruction does not use any instructions from // this chain other than LastClonedValue for (auto OpValue : ClonedValue->operand_values()) { assert(std::find(ChainToBase.begin(), ChainToBase.end(), OpValue) == ChainToBase.end() && "incorrect use in rematerialization chain"); } #endif } LastClonedValue = ClonedValue; LastValue = Instr; } assert(LastClonedValue); return LastClonedValue; }; // Different cases for calls and invokes. For invokes we need to clone // instructions both on normal and unwind path. if (CS.isCall()) { Instruction *InsertBefore = CS.getInstruction()->getNextNode(); assert(InsertBefore); Instruction *RematerializedValue = rematerializeChain(InsertBefore); Info.RematerializedValues[RematerializedValue] = LiveValue; } else { InvokeInst *Invoke = cast<InvokeInst>(CS.getInstruction()); Instruction *NormalInsertBefore = Invoke->getNormalDest()->getFirstInsertionPt(); Instruction *UnwindInsertBefore = Invoke->getUnwindDest()->getFirstInsertionPt(); Instruction *NormalRematerializedValue = rematerializeChain(NormalInsertBefore); Instruction *UnwindRematerializedValue = rematerializeChain(UnwindInsertBefore); Info.RematerializedValues[NormalRematerializedValue] = LiveValue; Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; } } // Remove rematerializaed values from the live set for (auto LiveValue: LiveValuesToBeDeleted) { Info.liveset.erase(LiveValue); } } static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P, SmallVectorImpl<CallSite> &toUpdate) { #ifndef NDEBUG // sanity check the input std::set<CallSite> uniqued; uniqued.insert(toUpdate.begin(), toUpdate.end()); assert(uniqued.size() == toUpdate.size() && "no duplicates please!"); for (size_t i = 0; i < toUpdate.size(); i++) { CallSite &CS = toUpdate[i]; assert(CS.getInstruction()->getParent()->getParent() == &F); assert(isStatepoint(CS) && "expected to already be a deopt statepoint"); } #endif // When inserting gc.relocates for invokes, we need to be able to insert at // the top of the successor blocks. See the comment on // normalForInvokeSafepoint on exactly what is needed. Note that this step // may restructure the CFG. for (CallSite CS : toUpdate) { if (!CS.isInvoke()) continue; InvokeInst *invoke = cast<InvokeInst>(CS.getInstruction()); normalizeForInvokeSafepoint(invoke->getNormalDest(), invoke->getParent(), DT); normalizeForInvokeSafepoint(invoke->getUnwindDest(), invoke->getParent(), DT); } // A list of dummy calls added to the IR to keep various values obviously // live in the IR. We'll remove all of these when done. SmallVector<CallInst *, 64> holders; // Insert a dummy call with all of the arguments to the vm_state we'll need // for the actual safepoint insertion. This ensures reference arguments in // the deopt argument list are considered live through the safepoint (and // thus makes sure they get relocated.) for (size_t i = 0; i < toUpdate.size(); i++) { CallSite &CS = toUpdate[i]; Statepoint StatepointCS(CS); SmallVector<Value *, 64> DeoptValues; for (Use &U : StatepointCS.vm_state_args()) { Value *Arg = cast<Value>(&U); assert(!isUnhandledGCPointerType(Arg->getType()) && "support for FCA unimplemented"); if (isHandledGCPointerType(Arg->getType())) DeoptValues.push_back(Arg); } insertUseHolderAfter(CS, DeoptValues, holders); } SmallVector<struct PartiallyConstructedSafepointRecord, 64> records; records.reserve(toUpdate.size()); for (size_t i = 0; i < toUpdate.size(); i++) { struct PartiallyConstructedSafepointRecord info; records.push_back(info); } assert(records.size() == toUpdate.size()); // A) Identify all gc pointers which are staticly live at the given call // site. findLiveReferences(F, DT, P, toUpdate, records); // B) Find the base pointers for each live pointer /* scope for caching */ { // Cache the 'defining value' relation used in the computation and // insertion of base phis and selects. This ensures that we don't insert // large numbers of duplicate base_phis. DefiningValueMapTy DVCache; for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; CallSite &CS = toUpdate[i]; findBasePointers(DT, DVCache, CS, info); } } // end of cache scope // The base phi insertion logic (for any safepoint) may have inserted new // instructions which are now live at some safepoint. The simplest such // example is: // loop: // phi a <-- will be a new base_phi here // safepoint 1 <-- that needs to be live here // gep a + 1 // safepoint 2 // br loop // We insert some dummy calls after each safepoint to definitely hold live // the base pointers which were identified for that safepoint. We'll then // ask liveness for _every_ base inserted to see what is now live. Then we // remove the dummy calls. holders.reserve(holders.size() + records.size()); for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; CallSite &CS = toUpdate[i]; SmallVector<Value *, 128> Bases; for (auto Pair : info.PointerToBase) { Bases.push_back(Pair.second); } insertUseHolderAfter(CS, Bases, holders); } // By selecting base pointers, we've effectively inserted new uses. Thus, we // need to rerun liveness. We may *also* have inserted new defs, but that's // not the key issue. recomputeLiveInValues(F, DT, P, toUpdate, records); if (PrintBasePointers) { for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; errs() << "Base Pairs: (w/Relocation)\n"; for (auto Pair : info.PointerToBase) { errs() << " derived %" << Pair.first->getName() << " base %" << Pair.second->getName() << "\n"; } } } for (size_t i = 0; i < holders.size(); i++) { holders[i]->eraseFromParent(); holders[i] = nullptr; } holders.clear(); // Do a limited scalarization of any live at safepoint vector values which // contain pointers. This enables this pass to run after vectorization at // the cost of some possible performance loss. TODO: it would be nice to // natively support vectors all the way through the backend so we don't need // to scalarize here. for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; Instruction *statepoint = toUpdate[i].getInstruction(); splitVectorValues(cast<Instruction>(statepoint), info.liveset, info.PointerToBase, DT); } // In order to reduce live set of statepoint we might choose to rematerialize // some values instead of relocating them. This is purelly an optimization and // does not influence correctness. TargetTransformInfo &TTI = P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; CallSite &CS = toUpdate[i]; rematerializeLiveValues(CS, info, TTI); } // Now run through and replace the existing statepoints with new ones with // the live variables listed. We do not yet update uses of the values being // relocated. We have references to live variables that need to // survive to the last iteration of this loop. (By construction, the // previous statepoint can not be a live variable, thus we can and remove // the old statepoint calls as we go.) for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; CallSite &CS = toUpdate[i]; makeStatepointExplicit(DT, CS, P, info); } toUpdate.clear(); // prevent accident use of invalid CallSites // Do all the fixups of the original live variables to their relocated selves SmallVector<Value *, 128> live; for (size_t i = 0; i < records.size(); i++) { struct PartiallyConstructedSafepointRecord &info = records[i]; // We can't simply save the live set from the original insertion. One of // the live values might be the result of a call which needs a safepoint. // That Value* no longer exists and we need to use the new gc_result. // Thankfully, the liveset is embedded in the statepoint (and updated), so // we just grab that. Statepoint statepoint(info.StatepointToken); live.insert(live.end(), statepoint.gc_args_begin(), statepoint.gc_args_end()); #ifndef NDEBUG // Do some basic sanity checks on our liveness results before performing // relocation. Relocation can and will turn mistakes in liveness results // into non-sensical code which is must harder to debug. // TODO: It would be nice to test consistency as well assert(DT.isReachableFromEntry(info.StatepointToken->getParent()) && "statepoint must be reachable or liveness is meaningless"); for (Value *V : statepoint.gc_args()) { if (!isa<Instruction>(V)) // Non-instruction values trivial dominate all possible uses continue; auto LiveInst = cast<Instruction>(V); assert(DT.isReachableFromEntry(LiveInst->getParent()) && "unreachable values should never be live"); assert(DT.dominates(LiveInst, info.StatepointToken) && "basic SSA liveness expectation violated by liveness analysis"); } #endif } unique_unsorted(live); #ifndef NDEBUG // sanity check for (auto ptr : live) { assert(isGCPointerType(ptr->getType()) && "must be a gc pointer type"); } #endif relocationViaAlloca(F, DT, live, records); return !records.empty(); } // Handles both return values and arguments for Functions and CallSites. template <typename AttrHolder> static void RemoveDerefAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, unsigned Index) { AttrBuilder R; if (AH.getDereferenceableBytes(Index)) R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable, AH.getDereferenceableBytes(Index))); if (AH.getDereferenceableOrNullBytes(Index)) R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull, AH.getDereferenceableOrNullBytes(Index))); if (!R.empty()) AH.setAttributes(AH.getAttributes().removeAttributes( Ctx, Index, AttributeSet::get(Ctx, Index, R))); } void RewriteStatepointsForGC::stripDereferenceabilityInfoFromPrototype(Function &F) { LLVMContext &Ctx = F.getContext(); for (Argument &A : F.args()) if (isa<PointerType>(A.getType())) RemoveDerefAttrAtIndex(Ctx, F, A.getArgNo() + 1); if (isa<PointerType>(F.getReturnType())) RemoveDerefAttrAtIndex(Ctx, F, AttributeSet::ReturnIndex); } void RewriteStatepointsForGC::stripDereferenceabilityInfoFromBody(Function &F) { if (F.empty()) return; LLVMContext &Ctx = F.getContext(); MDBuilder Builder(Ctx); for (Instruction &I : inst_range(F)) { if (const MDNode *MD = I.getMetadata(LLVMContext::MD_tbaa)) { assert(MD->getNumOperands() < 5 && "unrecognized metadata shape!"); bool IsImmutableTBAA = MD->getNumOperands() == 4 && mdconst::extract<ConstantInt>(MD->getOperand(3))->getValue() == 1; if (!IsImmutableTBAA) continue; // no work to do, MD_tbaa is already marked mutable MDNode *Base = cast<MDNode>(MD->getOperand(0)); MDNode *Access = cast<MDNode>(MD->getOperand(1)); uint64_t Offset = mdconst::extract<ConstantInt>(MD->getOperand(2))->getZExtValue(); MDNode *MutableTBAA = Builder.createTBAAStructTagNode(Base, Access, Offset); I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); } if (CallSite CS = CallSite(&I)) { for (int i = 0, e = CS.arg_size(); i != e; i++) if (isa<PointerType>(CS.getArgument(i)->getType())) RemoveDerefAttrAtIndex(Ctx, CS, i + 1); if (isa<PointerType>(CS.getType())) RemoveDerefAttrAtIndex(Ctx, CS, AttributeSet::ReturnIndex); } } } /// Returns true if this function should be rewritten by this pass. The main /// point of this function is as an extension point for custom logic. static bool shouldRewriteStatepointsIn(Function &F) { // TODO: This should check the GCStrategy if (F.hasGC()) { const char *FunctionGCName = F.getGC(); const StringRef StatepointExampleName("statepoint-example"); const StringRef CoreCLRName("coreclr"); return (StatepointExampleName == FunctionGCName) || (CoreCLRName == FunctionGCName); } else return false; } void RewriteStatepointsForGC::stripDereferenceabilityInfo(Module &M) { #ifndef NDEBUG assert(std::any_of(M.begin(), M.end(), shouldRewriteStatepointsIn) && "precondition!"); #endif for (Function &F : M) stripDereferenceabilityInfoFromPrototype(F); for (Function &F : M) stripDereferenceabilityInfoFromBody(F); } bool RewriteStatepointsForGC::runOnFunction(Function &F) { // Nothing to do for declarations. if (F.isDeclaration() || F.empty()) return false; // Policy choice says not to rewrite - the most common reason is that we're // compiling code without a GCStrategy. if (!shouldRewriteStatepointsIn(F)) return false; DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); // Gather all the statepoints which need rewritten. Be careful to only // consider those in reachable code since we need to ask dominance queries // when rewriting. We'll delete the unreachable ones in a moment. SmallVector<CallSite, 64> ParsePointNeeded; bool HasUnreachableStatepoint = false; for (Instruction &I : inst_range(F)) { // TODO: only the ones with the flag set! if (isStatepoint(I)) { if (DT.isReachableFromEntry(I.getParent())) ParsePointNeeded.push_back(CallSite(&I)); else HasUnreachableStatepoint = true; } } bool MadeChange = false; // Delete any unreachable statepoints so that we don't have unrewritten // statepoints surviving this pass. This makes testing easier and the // resulting IR less confusing to human readers. Rather than be fancy, we // just reuse a utility function which removes the unreachable blocks. if (HasUnreachableStatepoint) MadeChange |= removeUnreachableBlocks(F); // Return early if no work to do. if (ParsePointNeeded.empty()) return MadeChange; // As a prepass, go ahead and aggressively destroy single entry phi nodes. // These are created by LCSSA. They have the effect of increasing the size // of liveness sets for no good reason. It may be harder to do this post // insertion since relocations and base phis can confuse things. for (BasicBlock &BB : F) if (BB.getUniquePredecessor()) { MadeChange = true; FoldSingleEntryPHINodes(&BB); } MadeChange |= insertParsePoints(F, DT, this, ParsePointNeeded); return MadeChange; } // liveness computation via standard dataflow // ------------------------------------------------------------------- // TODO: Consider using bitvectors for liveness, the set of potentially // interesting values should be small and easy to pre-compute. /// Compute the live-in set for the location rbegin starting from /// the live-out set of the basic block static void computeLiveInValues(BasicBlock::reverse_iterator rbegin, BasicBlock::reverse_iterator rend, DenseSet<Value *> &LiveTmp) { for (BasicBlock::reverse_iterator ritr = rbegin; ritr != rend; ritr++) { Instruction *I = &*ritr; // KILL/Def - Remove this definition from LiveIn LiveTmp.erase(I); // Don't consider *uses* in PHI nodes, we handle their contribution to // predecessor blocks when we seed the LiveOut sets if (isa<PHINode>(I)) continue; // USE - Add to the LiveIn set for this instruction for (Value *V : I->operands()) { assert(!isUnhandledGCPointerType(V->getType()) && "support for FCA unimplemented"); if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { // The choice to exclude all things constant here is slightly subtle. // There are two idependent reasons: // - We assume that things which are constant (from LLVM's definition) // do not move at runtime. For example, the address of a global // variable is fixed, even though it's contents may not be. // - Second, we can't disallow arbitrary inttoptr constants even // if the language frontend does. Optimization passes are free to // locally exploit facts without respect to global reachability. This // can create sections of code which are dynamically unreachable and // contain just about anything. (see constants.ll in tests) LiveTmp.insert(V); } } } } static void computeLiveOutSeed(BasicBlock *BB, DenseSet<Value *> &LiveTmp) { for (BasicBlock *Succ : successors(BB)) { const BasicBlock::iterator E(Succ->getFirstNonPHI()); for (BasicBlock::iterator I = Succ->begin(); I != E; I++) { PHINode *Phi = cast<PHINode>(&*I); Value *V = Phi->getIncomingValueForBlock(BB); assert(!isUnhandledGCPointerType(V->getType()) && "support for FCA unimplemented"); if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { LiveTmp.insert(V); } } } } static DenseSet<Value *> computeKillSet(BasicBlock *BB) { DenseSet<Value *> KillSet; for (Instruction &I : *BB) if (isHandledGCPointerType(I.getType())) KillSet.insert(&I); return KillSet; } #ifndef NDEBUG /// Check that the items in 'Live' dominate 'TI'. This is used as a basic /// sanity check for the liveness computation. static void checkBasicSSA(DominatorTree &DT, DenseSet<Value *> &Live, TerminatorInst *TI, bool TermOkay = false) { for (Value *V : Live) { if (auto *I = dyn_cast<Instruction>(V)) { // The terminator can be a member of the LiveOut set. LLVM's definition // of instruction dominance states that V does not dominate itself. As // such, we need to special case this to allow it. if (TermOkay && TI == I) continue; assert(DT.dominates(I, TI) && "basic SSA liveness expectation violated by liveness analysis"); } } } /// Check that all the liveness sets used during the computation of liveness /// obey basic SSA properties. This is useful for finding cases where we miss /// a def. static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, BasicBlock &BB) { checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); } #endif static void computeLiveInValues(DominatorTree &DT, Function &F, GCPtrLivenessData &Data) { SmallSetVector<BasicBlock *, 200> Worklist; auto AddPredsToWorklist = [&](BasicBlock *BB) { // We use a SetVector so that we don't have duplicates in the worklist. Worklist.insert(pred_begin(BB), pred_end(BB)); }; auto NextItem = [&]() { BasicBlock *BB = Worklist.back(); Worklist.pop_back(); return BB; }; // Seed the liveness for each individual block for (BasicBlock &BB : F) { Data.KillSet[&BB] = computeKillSet(&BB); Data.LiveSet[&BB].clear(); computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); #ifndef NDEBUG for (Value *Kill : Data.KillSet[&BB]) assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); #endif Data.LiveOut[&BB] = DenseSet<Value *>(); computeLiveOutSeed(&BB, Data.LiveOut[&BB]); Data.LiveIn[&BB] = Data.LiveSet[&BB]; set_union(Data.LiveIn[&BB], Data.LiveOut[&BB]); set_subtract(Data.LiveIn[&BB], Data.KillSet[&BB]); if (!Data.LiveIn[&BB].empty()) AddPredsToWorklist(&BB); } // Propagate that liveness until stable while (!Worklist.empty()) { BasicBlock *BB = NextItem(); // Compute our new liveout set, then exit early if it hasn't changed // despite the contribution of our successor. DenseSet<Value *> LiveOut = Data.LiveOut[BB]; const auto OldLiveOutSize = LiveOut.size(); for (BasicBlock *Succ : successors(BB)) { assert(Data.LiveIn.count(Succ)); set_union(LiveOut, Data.LiveIn[Succ]); } // assert OutLiveOut is a subset of LiveOut if (OldLiveOutSize == LiveOut.size()) { // If the sets are the same size, then we didn't actually add anything // when unioning our successors LiveIn Thus, the LiveIn of this block // hasn't changed. continue; } Data.LiveOut[BB] = LiveOut; // Apply the effects of this basic block DenseSet<Value *> LiveTmp = LiveOut; set_union(LiveTmp, Data.LiveSet[BB]); set_subtract(LiveTmp, Data.KillSet[BB]); assert(Data.LiveIn.count(BB)); const DenseSet<Value *> &OldLiveIn = Data.LiveIn[BB]; // assert: OldLiveIn is a subset of LiveTmp if (OldLiveIn.size() != LiveTmp.size()) { Data.LiveIn[BB] = LiveTmp; AddPredsToWorklist(BB); } } // while( !worklist.empty() ) #ifndef NDEBUG // Sanity check our ouput against SSA properties. This helps catch any // missing kills during the above iteration. for (BasicBlock &BB : F) { checkBasicSSA(DT, Data, BB); } #endif } static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, StatepointLiveSetTy &Out) { BasicBlock *BB = Inst->getParent(); // Note: The copy is intentional and required assert(Data.LiveOut.count(BB)); DenseSet<Value *> LiveOut = Data.LiveOut[BB]; // We want to handle the statepoint itself oddly. It's // call result is not live (normal), nor are it's arguments // (unless they're used again later). This adjustment is // specifically what we need to relocate BasicBlock::reverse_iterator rend(Inst); computeLiveInValues(BB->rbegin(), rend, LiveOut); LiveOut.erase(Inst); Out.insert(LiveOut.begin(), LiveOut.end()); } static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, const CallSite &CS, PartiallyConstructedSafepointRecord &Info) { Instruction *Inst = CS.getInstruction(); StatepointLiveSetTy Updated; findLiveSetAtInst(Inst, RevisedLivenessData, Updated); #ifndef NDEBUG DenseSet<Value *> Bases; for (auto KVPair : Info.PointerToBase) { Bases.insert(KVPair.second); } #endif // We may have base pointers which are now live that weren't before. We need // to update the PointerToBase structure to reflect this. for (auto V : Updated) if (!Info.PointerToBase.count(V)) { assert(Bases.count(V) && "can't find base for unexpected live value"); Info.PointerToBase[V] = V; continue; } #ifndef NDEBUG for (auto V : Updated) { assert(Info.PointerToBase.count(V) && "must be able to find base for live value"); } #endif // Remove any stale base mappings - this can happen since our liveness is // more precise then the one inherent in the base pointer analysis DenseSet<Value *> ToErase; for (auto KVPair : Info.PointerToBase) if (!Updated.count(KVPair.first)) ToErase.insert(KVPair.first); for (auto V : ToErase) Info.PointerToBase.erase(V); #ifndef NDEBUG for (auto KVPair : Info.PointerToBase) assert(Updated.count(KVPair.first) && "record for non-live value"); #endif Info.liveset = Updated; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DeadStoreElimination.cpp
//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a trivial dead store elimination that only considers // basic-block local redundant stores. // // FIXME: This should eventually be extended to be a post-dominator tree // traversal. Doing so would be pretty trivial. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "dse" STATISTIC(NumFastStores, "Number of stores deleted"); STATISTIC(NumFastOther , "Number of other instrs removed"); namespace { struct DSE : public FunctionPass { AliasAnalysis *AA; MemoryDependenceAnalysis *MD; DominatorTree *DT; const TargetLibraryInfo *TLI; unsigned ScanLimit; // HLSL Change - Add ScanLimit static char ID; // Pass identification, replacement for typeid DSE(unsigned ScanLimit = 0) : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr), ScanLimit(ScanLimit) {// HLSL Change - Add ScanLimit initializeDSEPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override { if (skipOptnoneFunction(F)) return false; AA = &getAnalysis<AliasAnalysis>(); MD = &getAnalysis<MemoryDependenceAnalysis>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); TLI = AA->getTargetLibraryInfo(); bool Changed = false; for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) // Only check non-dead blocks. Dead blocks may have strange pointer // cycles that will confuse alias analysis. if (DT->isReachableFromEntry(I)) Changed |= runOnBasicBlock(*I); AA = nullptr; MD = nullptr; DT = nullptr; return Changed; } bool runOnBasicBlock(BasicBlock &BB); bool HandleFree(CallInst *F); bool handleEndBlock(BasicBlock &BB); void RemoveAccessedObjects(const MemoryLocation &LoadedLoc, SmallSetVector<Value *, 16> &DeadStackObjects, const DataLayout &DL); void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<AliasAnalysis>(); AU.addRequired<MemoryDependenceAnalysis>(); AU.addPreserved<AliasAnalysis>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<MemoryDependenceAnalysis>(); } }; } char DSE::ID = 0; INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false) FunctionPass *llvm::createDeadStoreEliminationPass(unsigned ScanLimit) { return new DSE(ScanLimit); } // HLSL Change - add ScanLimit //===----------------------------------------------------------------------===// // Helper functions //===----------------------------------------------------------------------===// /// DeleteDeadInstruction - Delete this instruction. Before we do, go through /// and zero out all the operands of this instruction. If any of them become /// dead, delete them and the computation tree that feeds them. /// /// If ValueSet is non-null, remove any deleted instructions from it as well. /// static void DeleteDeadInstruction(Instruction *I, MemoryDependenceAnalysis &MD, const TargetLibraryInfo *TLI, SmallSetVector<Value*, 16> *ValueSet = nullptr) { SmallVector<Instruction*, 32> NowDeadInsts; NowDeadInsts.push_back(I); --NumFastOther; // Before we touch this instruction, remove it from memdep! do { Instruction *DeadInst = NowDeadInsts.pop_back_val(); ++NumFastOther; // This instruction is dead, zap it, in stages. Start by removing it from // MemDep, which needs to know the operands and needs it to be in the // function. MD.removeInstruction(DeadInst); for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { Value *Op = DeadInst->getOperand(op); DeadInst->setOperand(op, nullptr); // If this operand just became dead, add it to the NowDeadInsts list. if (!Op->use_empty()) continue; if (Instruction *OpI = dyn_cast<Instruction>(Op)) if (isInstructionTriviallyDead(OpI, TLI)) NowDeadInsts.push_back(OpI); } DeadInst->eraseFromParent(); if (ValueSet) ValueSet->remove(DeadInst); } while (!NowDeadInsts.empty()); } /// hasMemoryWrite - Does this instruction write some memory? This only returns /// true for things that we can analyze with other helpers below. static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) { if (isa<StoreInst>(I)) return true; if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { switch (II->getIntrinsicID()) { default: return false; case Intrinsic::memset: case Intrinsic::memmove: case Intrinsic::memcpy: case Intrinsic::init_trampoline: case Intrinsic::lifetime_end: return true; } } if (auto CS = CallSite(I)) { if (Function *F = CS.getCalledFunction()) { if (TLI && TLI->has(LibFunc::strcpy) && F->getName() == TLI->getName(LibFunc::strcpy)) { return true; } if (TLI && TLI->has(LibFunc::strncpy) && F->getName() == TLI->getName(LibFunc::strncpy)) { return true; } if (TLI && TLI->has(LibFunc::strcat) && F->getName() == TLI->getName(LibFunc::strcat)) { return true; } if (TLI && TLI->has(LibFunc::strncat) && F->getName() == TLI->getName(LibFunc::strncat)) { return true; } } } return false; } /// getLocForWrite - Return a Location stored to by the specified instruction. /// If isRemovable returns true, this function and getLocForRead completely /// describe the memory operations for this instruction. static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) return MemoryLocation::get(SI); if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) { // memcpy/memmove/memset. MemoryLocation Loc = MemoryLocation::getForDest(MI); return Loc; } IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst); if (!II) return MemoryLocation(); switch (II->getIntrinsicID()) { default: return MemoryLocation(); // Unhandled intrinsic. case Intrinsic::init_trampoline: // FIXME: We don't know the size of the trampoline, so we can't really // handle it here. return MemoryLocation(II->getArgOperand(0)); case Intrinsic::lifetime_end: { uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); return MemoryLocation(II->getArgOperand(1), Len); } } } /// getLocForRead - Return the location read by the specified "hasMemoryWrite" /// instruction if any. static MemoryLocation getLocForRead(Instruction *Inst, AliasAnalysis &AA) { assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) && "Unknown instruction case"); // The only instructions that both read and write are the mem transfer // instructions (memcpy/memmove). if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst)) return MemoryLocation::getForSource(MTI); return MemoryLocation(); } /// isRemovable - If the value of this instruction and the memory it writes to /// is unused, may we delete this instruction? static bool isRemovable(Instruction *I) { // Don't remove volatile/atomic stores. if (StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->isUnordered(); if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { switch (II->getIntrinsicID()) { default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); case Intrinsic::lifetime_end: // Never remove dead lifetime_end's, e.g. because it is followed by a // free. return false; case Intrinsic::init_trampoline: // Always safe to remove init_trampoline. return true; case Intrinsic::memset: case Intrinsic::memmove: case Intrinsic::memcpy: // Don't remove volatile memory intrinsics. return !cast<MemIntrinsic>(II)->isVolatile(); } } if (auto CS = CallSite(I)) return CS.getInstruction()->use_empty(); return false; } /// isShortenable - Returns true if this instruction can be safely shortened in /// length. static bool isShortenable(Instruction *I) { // Don't shorten stores for now if (isa<StoreInst>(I)) return false; if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { switch (II->getIntrinsicID()) { default: return false; case Intrinsic::memset: case Intrinsic::memcpy: // Do shorten memory intrinsics. return true; } } // Don't shorten libcalls calls for now. return false; } /// getStoredPointerOperand - Return the pointer that is being written to. static Value *getStoredPointerOperand(Instruction *I) { if (StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->getPointerOperand(); if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) return MI->getDest(); if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { switch (II->getIntrinsicID()) { default: llvm_unreachable("Unexpected intrinsic!"); case Intrinsic::init_trampoline: return II->getArgOperand(0); } } CallSite CS(I); // All the supported functions so far happen to have dest as their first // argument. return CS.getArgument(0); } static uint64_t getPointerSize(const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI) { uint64_t Size; if (getObjectSize(V, Size, DL, TLI)) return Size; return MemoryLocation::UnknownSize; } namespace { enum OverwriteResult { OverwriteComplete, OverwriteEnd, OverwriteUnknown }; } /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location /// completely overwrites a store to the 'Earlier' location. /// 'OverwriteEnd' if the end of the 'Earlier' location is completely /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined static OverwriteResult isOverwrite(const MemoryLocation &Later, const MemoryLocation &Earlier, const DataLayout &DL, const TargetLibraryInfo *TLI, int64_t &EarlierOff, int64_t &LaterOff) { const Value *P1 = Earlier.Ptr->stripPointerCasts(); const Value *P2 = Later.Ptr->stripPointerCasts(); // If the start pointers are the same, we just have to compare sizes to see if // the later store was larger than the earlier store. if (P1 == P2) { // If we don't know the sizes of either access, then we can't do a // comparison. if (Later.Size == MemoryLocation::UnknownSize || Earlier.Size == MemoryLocation::UnknownSize) return OverwriteUnknown; // Make sure that the Later size is >= the Earlier size. if (Later.Size >= Earlier.Size) return OverwriteComplete; } // Otherwise, we have to have size information, and the later store has to be // larger than the earlier one. if (Later.Size == MemoryLocation::UnknownSize || Earlier.Size == MemoryLocation::UnknownSize) return OverwriteUnknown; // Check to see if the later store is to the entire object (either a global, // an alloca, or a byval/inalloca argument). If so, then it clearly // overwrites any other store to the same object. const Value *UO1 = GetUnderlyingObject(P1, DL), *UO2 = GetUnderlyingObject(P2, DL); // If we can't resolve the same pointers to the same object, then we can't // analyze them at all. if (UO1 != UO2) return OverwriteUnknown; // If the "Later" store is to a recognizable object, get its size. uint64_t ObjectSize = getPointerSize(UO2, DL, TLI); if (ObjectSize != MemoryLocation::UnknownSize) if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size) return OverwriteComplete; // Okay, we have stores to two completely different pointers. Try to // decompose the pointer into a "base + constant_offset" form. If the base // pointers are equal, then we can reason about the two stores. EarlierOff = 0; LaterOff = 0; const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL); const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL); // If the base pointers still differ, we have two completely different stores. if (BP1 != BP2) return OverwriteUnknown; // The later store completely overlaps the earlier store if: // // 1. Both start at the same offset and the later one's size is greater than // or equal to the earlier one's, or // // |--earlier--| // |-- later --| // // 2. The earlier store has an offset greater than the later offset, but which // still lies completely within the later store. // // |--earlier--| // |----- later ------| // // We have to be careful here as *Off is signed while *.Size is unsigned. if (EarlierOff >= LaterOff && Later.Size >= Earlier.Size && uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) return OverwriteComplete; // The other interesting case is if the later store overwrites the end of // the earlier store // // |--earlier--| // |-- later --| // // In this case we may want to trim the size of earlier to avoid generating // writes to addresses which will definitely be overwritten later if (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) && int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)) return OverwriteEnd; // Otherwise, they don't completely overlap. return OverwriteUnknown; } /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a /// memory region into an identical pointer) then it doesn't actually make its /// input dead in the traditional sense. Consider this case: /// /// memcpy(A <- B) /// memcpy(A <- A) /// /// In this case, the second store to A does not make the first store to A dead. /// The usual situation isn't an explicit A<-A store like this (which can be /// trivially removed) but a case where two pointers may alias. /// /// This function detects when it is unsafe to remove a dependent instruction /// because the DSE inducing instruction may be a self-read. static bool isPossibleSelfRead(Instruction *Inst, const MemoryLocation &InstStoreLoc, Instruction *DepWrite, AliasAnalysis &AA) { // Self reads can only happen for instructions that read memory. Get the // location read. MemoryLocation InstReadLoc = getLocForRead(Inst, AA); if (!InstReadLoc.Ptr) return false; // Not a reading instruction. // If the read and written loc obviously don't alias, it isn't a read. if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false; // Okay, 'Inst' may copy over itself. However, we can still remove a the // DepWrite instruction if we can prove that it reads from the same location // as Inst. This handles useful cases like: // memcpy(A <- B) // memcpy(A <- B) // Here we don't know if A/B may alias, but we do know that B/B are must // aliases, so removing the first memcpy is safe (assuming it writes <= # // bytes as the second one. MemoryLocation DepReadLoc = getLocForRead(DepWrite, AA); if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr)) return false; // If DepWrite doesn't read memory or if we can't prove it is a must alias, // then it can't be considered dead. return true; } //===----------------------------------------------------------------------===// // DSE Pass //===----------------------------------------------------------------------===// bool DSE::runOnBasicBlock(BasicBlock &BB) { bool MadeChange = false; // Do a top-down walk on the BB. for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) { Instruction *Inst = BBI++; // Handle 'free' calls specially. if (CallInst *F = isFreeCall(Inst, TLI)) { MadeChange |= HandleFree(F); continue; } // If we find something that writes memory, get its memory dependence. if (!hasMemoryWrite(Inst, TLI)) continue; MemDepResult InstDep = MD->getDependency(Inst, ScanLimit); // Ignore any store where we can't find a local dependence. // FIXME: cross-block DSE would be fun. :) if (!InstDep.isDef() && !InstDep.isClobber()) continue; // If we're storing the same value back to a pointer that we just // loaded from, then the store can be removed. if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) { if (SI->getPointerOperand() == DepLoad->getPointerOperand() && SI->getOperand(0) == DepLoad && isRemovable(SI)) { DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n " << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n'); // DeleteDeadInstruction can delete the current instruction. Save BBI // in case we need it. WeakTrackingVH NextInst(BBI); DeleteDeadInstruction(SI, *MD, TLI); if (!NextInst) // Next instruction deleted. BBI = BB.begin(); else if (BBI != BB.begin()) // Revisit this instruction if possible. --BBI; ++NumFastStores; MadeChange = true; continue; } } } // Figure out what location is being stored to. MemoryLocation Loc = getLocForWrite(Inst, *AA); // If we didn't get a useful location, fail. if (!Loc.Ptr) continue; while (InstDep.isDef() || InstDep.isClobber()) { // Get the memory clobbered by the instruction we depend on. MemDep will // skip any instructions that 'Loc' clearly doesn't interact with. If we // end up depending on a may- or must-aliased load, then we can't optimize // away the store and we bail out. However, if we depend on on something // that overwrites the memory location we *can* potentially optimize it. // // Find out what memory location the dependent instruction stores. Instruction *DepWrite = InstDep.getInst(); MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA); // If we didn't get a useful location, or if it isn't a size, bail out. if (!DepLoc.Ptr) break; // If we find a write that is a) removable (i.e., non-volatile), b) is // completely obliterated by the store to 'Loc', and c) which we know that // 'Inst' doesn't load from, then we can remove it. if (isRemovable(DepWrite) && !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) { int64_t InstWriteOffset, DepWriteOffset; const DataLayout &DL = BB.getModule()->getDataLayout(); OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, AA->getTargetLibraryInfo(), DepWriteOffset, InstWriteOffset); if (OR == OverwriteComplete) { DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite << "\n KILLER: " << *Inst << '\n'); // Delete the store and now-dead instructions that feed it. DeleteDeadInstruction(DepWrite, *MD, TLI); ++NumFastStores; MadeChange = true; // DeleteDeadInstruction can delete the current instruction in loop // cases, reset BBI. BBI = Inst; if (BBI != BB.begin()) --BBI; break; } else if (OR == OverwriteEnd && isShortenable(DepWrite)) { // TODO: base this on the target vector size so that if the earlier // store was too small to get vector writes anyway then its likely // a good idea to shorten it // Power of 2 vector writes are probably always a bad idea to optimize // as any store/memset/memcpy is likely using vector instructions so // shortening it to not vector size is likely to be slower MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite); unsigned DepWriteAlign = DepIntrinsic->getAlignment(); if (llvm::isPowerOf2_64(InstWriteOffset) || ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) { DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: " << *DepWrite << "\n KILLER (offset " << InstWriteOffset << ", " << DepLoc.Size << ")" << *Inst << '\n'); Value* DepWriteLength = DepIntrinsic->getLength(); Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(), InstWriteOffset - DepWriteOffset); DepIntrinsic->setLength(TrimmedLength); MadeChange = true; } } } // If this is a may-aliased store that is clobbering the store value, we // can keep searching past it for another must-aliased pointer that stores // to the same location. For example, in: // store -> P // store -> Q // store -> P // we can remove the first store to P even though we don't know if P and Q // alias. if (DepWrite == &BB.front()) break; // Can't look past this instruction if it might read 'Loc'. if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref) break; InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB); } } // If this block ends in a return, unwind, or unreachable, all allocas are // dead at its end, which means stores to them are also dead. if (BB.getTerminator()->getNumSuccessors() == 0) MadeChange |= handleEndBlock(BB); return MadeChange; } /// Find all blocks that will unconditionally lead to the block BB and append /// them to F. static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks, BasicBlock *BB, DominatorTree *DT) { for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { BasicBlock *Pred = *I; if (Pred == BB) continue; TerminatorInst *PredTI = Pred->getTerminator(); if (PredTI->getNumSuccessors() != 1) continue; if (DT->isReachableFromEntry(Pred)) Blocks.push_back(Pred); } } /// HandleFree - Handle frees of entire structures whose dependency is a store /// to a field of that structure. bool DSE::HandleFree(CallInst *F) { bool MadeChange = false; MemoryLocation Loc = MemoryLocation(F->getOperand(0)); SmallVector<BasicBlock *, 16> Blocks; Blocks.push_back(F->getParent()); const DataLayout &DL = F->getModule()->getDataLayout(); while (!Blocks.empty()) { BasicBlock *BB = Blocks.pop_back_val(); Instruction *InstPt = BB->getTerminator(); if (BB == F->getParent()) InstPt = F; MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB); while (Dep.isDef() || Dep.isClobber()) { Instruction *Dependency = Dep.getInst(); if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency)) break; Value *DepPointer = GetUnderlyingObject(getStoredPointerOperand(Dependency), DL); // Check for aliasing. if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) break; Instruction *Next = std::next(BasicBlock::iterator(Dependency)); // DCE instructions only used to calculate that store DeleteDeadInstruction(Dependency, *MD, TLI); ++NumFastStores; MadeChange = true; // Inst's old Dependency is now deleted. Compute the next dependency, // which may also be dead, as in // s[0] = 0; // s[1] = 0; // This has just been deleted. // free(s); Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB); } if (Dep.isNonLocal()) FindUnconditionalPreds(Blocks, BB, DT); } return MadeChange; } /// handleEndBlock - Remove dead stores to stack-allocated locations in the /// function end block. Ex: /// %A = alloca i32 /// ... /// store i32 1, i32* %A /// ret void bool DSE::handleEndBlock(BasicBlock &BB) { bool MadeChange = false; // Keep track of all of the stack objects that are dead at the end of the // function. SmallSetVector<Value*, 16> DeadStackObjects; // Find all of the alloca'd pointers in the entry block. BasicBlock *Entry = BB.getParent()->begin(); for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) { if (isa<AllocaInst>(I)) DeadStackObjects.insert(I); // Okay, so these are dead heap objects, but if the pointer never escapes // then it's leaked by this function anyways. else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true)) DeadStackObjects.insert(I); } // Treat byval or inalloca arguments the same, stores to them are dead at the // end of the function. for (Function::arg_iterator AI = BB.getParent()->arg_begin(), AE = BB.getParent()->arg_end(); AI != AE; ++AI) if (AI->hasByValOrInAllocaAttr()) DeadStackObjects.insert(AI); const DataLayout &DL = BB.getModule()->getDataLayout(); // Scan the basic block backwards for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ --BBI; // If we find a store, check to see if it points into a dead stack value. if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) { // See through pointer-to-pointer bitcasts SmallVector<Value *, 4> Pointers; GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers, DL); // Stores to stack values are valid candidates for removal. bool AllDead = true; for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) if (!DeadStackObjects.count(*I)) { AllDead = false; break; } if (AllDead) { Instruction *Dead = BBI++; DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " << *Dead << "\n Objects: "; for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), E = Pointers.end(); I != E; ++I) { dbgs() << **I; if (std::next(I) != E) dbgs() << ", "; } dbgs() << '\n'); // DCE instructions only used to calculate that store. DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; } } // Remove any dead non-memory-mutating instructions. if (isInstructionTriviallyDead(BBI, TLI)) { Instruction *Inst = BBI++; DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; } if (isa<AllocaInst>(BBI)) { // Remove allocas from the list of dead stack objects; there can't be // any references before the definition. DeadStackObjects.remove(BBI); continue; } if (auto CS = CallSite(BBI)) { // Remove allocation function calls from the list of dead stack objects; // there can't be any references before the definition. if (isAllocLikeFn(BBI, TLI)) DeadStackObjects.remove(BBI); // If this call does not access memory, it can't be loading any of our // pointers. if (AA->doesNotAccessMemory(CS)) continue; // If the call might load from any of our allocas, then any store above // the call is live. DeadStackObjects.remove_if([&](Value *I) { // See if the call site touches the value. AliasAnalysis::ModRefResult A = AA->getModRefInfo( CS, I, getPointerSize(I, DL, AA->getTargetLibraryInfo())); return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref; }); // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; continue; } MemoryLocation LoadedLoc; // If we encounter a use of the pointer, it is no longer considered dead if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { if (!L->isUnordered()) // Be conservative with atomic/volatile load break; LoadedLoc = MemoryLocation::get(L); } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { LoadedLoc = MemoryLocation::get(V); } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) { LoadedLoc = MemoryLocation::getForSource(MTI); } else if (!BBI->mayReadFromMemory()) { // Instruction doesn't read memory. Note that stores that weren't removed // above will hit this case. continue; } else { // Unknown inst; assume it clobbers everything. break; } // Remove any allocas from the DeadPointer set that are loaded, as this // makes any stores above the access live. RemoveAccessedObjects(LoadedLoc, DeadStackObjects, DL); // If all of the allocas were clobbered by the access then we're not going // to find anything else to process. if (DeadStackObjects.empty()) break; } return MadeChange; } /// RemoveAccessedObjects - Check to see if the specified location may alias any /// of the stack objects in the DeadStackObjects set. If so, they become live /// because the location is being loaded. void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc, SmallSetVector<Value *, 16> &DeadStackObjects, const DataLayout &DL) { const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL); // A constant can't be in the dead pointer set. if (isa<Constant>(UnderlyingPointer)) return; // If the kill pointer can be easily reduced to an alloca, don't bother doing // extraneous AA queries. if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) { DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer)); return; } // Remove objects that could alias LoadedLoc. DeadStackObjects.remove_if([&](Value *I) { // See if the loaded location could alias the stack location. MemoryLocation StackLoc(I, getPointerSize(I, DL, AA->getTargetLibraryInfo())); return !AA->isNoAlias(StackLoc, LoadedLoc); }); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopStrengthReduce.cpp
//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This transformation analyzes and transforms the induction variables (and // computations derived from them) into forms suitable for efficient execution // on the target. // // This pass performs a strength reduction on array references inside loops that // have as one or more of their components the loop induction variable, it // rewrites expressions to take advantage of scaled-index addressing modes // available on the target, and it performs a variety of other optimizations // related to loop induction variables. // // Terminology note: this code has a lot of handling for "post-increment" or // "post-inc" users. This is not talking about post-increment addressing modes; // it is instead talking about code like this: // // %i = phi [ 0, %entry ], [ %i.next, %latch ] // ... // %i.next = add %i, 1 // %c = icmp eq %i.next, %n // // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however // it's useful to think about these as the same register, with some uses using // the value of the register before the add and some using it after. In this // example, the icmp is a post-increment user, since it uses %i.next, which is // the value of the induction variable after the increment. The other common // case of post-increment users is users outside the loop. // // TODO: More sophistication in the way Formulae are generated and filtered. // // TODO: Handle multiple loops at a time. // // TODO: Should the addressing mode BaseGV be changed to a ConstantExpr instead // of a GlobalValue? // // TODO: When truncation is free, truncate ICmp users' operands to make it a // smaller encoding (on x86 at least). // // TODO: When a negated register is used by an add (such as in a list of // multiple base registers, or as the increment expression in an addrec), // we may not actually need both reg and (-1 * reg) in registers; the // negation can be implemented by using a sub instead of an add. The // lack of support for taking this into consideration when making // register pressure decisions is partly worked around by the "Special" // use kind. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/Analysis/IVUsers.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include <algorithm> #include <memory> // HLSL Change using namespace llvm; #define DEBUG_TYPE "loop-reduce" /// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for /// bail out. This threshold is far beyond the number of users that LSR can /// conceivably solve, so it should not affect generated code, but catches the /// worst cases before LSR burns too much compile time and stack space. static const unsigned MaxIVUsers = 200; // Temporary flag to cleanup congruent phis after LSR phi expansion. // It's currently disabled until we can determine whether it's truly useful or // not. The flag should be removed after the v3.0 release. // This is now needed for ivchains. static cl::opt<bool> EnablePhiElim( "enable-lsr-phielim", cl::Hidden, cl::init(true), cl::desc("Enable LSR phi elimination")); #ifndef NDEBUG // Stress test IV chain generation. static cl::opt<bool> StressIVChain( "stress-ivchain", cl::Hidden, cl::init(false), cl::desc("Stress test LSR IV chains")); #else static bool StressIVChain = false; #endif namespace { /// RegSortData - This class holds data which is used to order reuse candidates. class RegSortData { public: /// UsedByIndices - This represents the set of LSRUse indices which reference /// a particular register. SmallBitVector UsedByIndices; void print(raw_ostream &OS) const; void dump() const; }; } void RegSortData::print(raw_ostream &OS) const { OS << "[NumUses=" << UsedByIndices.count() << ']'; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void RegSortData::dump() const { print(errs()); errs() << '\n'; } #endif namespace { /// RegUseTracker - Map register candidates to information about how they are /// used. class RegUseTracker { typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; RegUsesTy RegUsesMap; SmallVector<const SCEV *, 16> RegSequence; public: void CountRegister(const SCEV *Reg, size_t LUIdx); void DropRegister(const SCEV *Reg, size_t LUIdx); void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx); bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; void clear(); typedef SmallVectorImpl<const SCEV *>::iterator iterator; typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; iterator begin() { return RegSequence.begin(); } iterator end() { return RegSequence.end(); } const_iterator begin() const { return RegSequence.begin(); } const_iterator end() const { return RegSequence.end(); } }; } void RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { std::pair<RegUsesTy::iterator, bool> Pair = RegUsesMap.insert(std::make_pair(Reg, RegSortData())); RegSortData &RSD = Pair.first->second; if (Pair.second) RegSequence.push_back(Reg); RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); RSD.UsedByIndices.set(LUIdx); } void RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { RegUsesTy::iterator It = RegUsesMap.find(Reg); assert(It != RegUsesMap.end()); RegSortData &RSD = It->second; assert(RSD.UsedByIndices.size() > LUIdx); RSD.UsedByIndices.reset(LUIdx); } void RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) { assert(LUIdx <= LastLUIdx); // Update RegUses. The data structure is not optimized for this purpose; // we must iterate through it and update each of the bit vectors. for (auto &Pair : RegUsesMap) { SmallBitVector &UsedByIndices = Pair.second.UsedByIndices; if (LUIdx < UsedByIndices.size()) UsedByIndices[LUIdx] = LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0; UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); } } bool RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { RegUsesTy::const_iterator I = RegUsesMap.find(Reg); if (I == RegUsesMap.end()) return false; const SmallBitVector &UsedByIndices = I->second.UsedByIndices; int i = UsedByIndices.find_first(); if (i == -1) return false; if ((size_t)i != LUIdx) return true; return UsedByIndices.find_next(i) != -1; } const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { RegUsesTy::const_iterator I = RegUsesMap.find(Reg); assert(I != RegUsesMap.end() && "Unknown register!"); return I->second.UsedByIndices; } void RegUseTracker::clear() { RegUsesMap.clear(); RegSequence.clear(); } namespace { /// Formula - This class holds information that describes a formula for /// computing satisfying a use. It may include broken-out immediates and scaled /// registers. struct Formula { /// Global base address used for complex addressing. GlobalValue *BaseGV; /// Base offset for complex addressing. int64_t BaseOffset; /// Whether any complex addressing has a base register. bool HasBaseReg; /// The scale of any complex addressing. int64_t Scale; /// BaseRegs - The list of "base" registers for this use. When this is /// non-empty. The canonical representation of a formula is /// 1. BaseRegs.size > 1 implies ScaledReg != NULL and /// 2. ScaledReg != NULL implies Scale != 1 || !BaseRegs.empty(). /// #1 enforces that the scaled register is always used when at least two /// registers are needed by the formula: e.g., reg1 + reg2 is reg1 + 1 * reg2. /// #2 enforces that 1 * reg is reg. /// This invariant can be temporarly broken while building a formula. /// However, every formula inserted into the LSRInstance must be in canonical /// form. SmallVector<const SCEV *, 4> BaseRegs; /// ScaledReg - The 'scaled' register for this use. This should be non-null /// when Scale is not zero. const SCEV *ScaledReg; /// UnfoldedOffset - An additional constant offset which added near the /// use. This requires a temporary register, but the offset itself can /// live in an add immediate field rather than a register. int64_t UnfoldedOffset; Formula() : BaseGV(nullptr), BaseOffset(0), HasBaseReg(false), Scale(0), ScaledReg(nullptr), UnfoldedOffset(0) {} void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); bool isCanonical() const; void Canonicalize(); bool Unscale(); size_t getNumRegs() const; Type *getType() const; void DeleteBaseReg(const SCEV *&S); bool referencesReg(const SCEV *S) const; bool hasRegsUsedByUsesOtherThan(size_t LUIdx, const RegUseTracker &RegUses) const; void print(raw_ostream &OS) const; void dump() const; }; } /// DoInitialMatch - Recursion helper for InitialMatch. static void DoInitialMatch(const SCEV *S, Loop *L, SmallVectorImpl<const SCEV *> &Good, SmallVectorImpl<const SCEV *> &Bad, ScalarEvolution &SE) { // Collect expressions which properly dominate the loop header. if (SE.properlyDominates(S, L->getHeader())) { Good.push_back(S); return; } // Look at add operands. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { for (const SCEV *S : Add->operands()) DoInitialMatch(S, L, Good, Bad, SE); return; } // Look at addrec operands. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) if (!AR->getStart()->isZero()) { DoInitialMatch(AR->getStart(), L, Good, Bad, SE); DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), AR->getStepRecurrence(SE), // FIXME: AR->getNoWrapFlags() AR->getLoop(), SCEV::FlagAnyWrap), L, Good, Bad, SE); return; } // Handle a multiplication by -1 (negation) if it didn't fold. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) if (Mul->getOperand(0)->isAllOnesValue()) { SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); const SCEV *NewMul = SE.getMulExpr(Ops); SmallVector<const SCEV *, 4> MyGood; SmallVector<const SCEV *, 4> MyBad; DoInitialMatch(NewMul, L, MyGood, MyBad, SE); const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( SE.getEffectiveSCEVType(NewMul->getType()))); for (const SCEV *S : MyGood) Good.push_back(SE.getMulExpr(NegOne, S)); for (const SCEV *S : MyBad) Bad.push_back(SE.getMulExpr(NegOne, S)); return; } // Ok, we can't do anything interesting. Just stuff the whole thing into a // register and hope for the best. Bad.push_back(S); } /// InitialMatch - Incorporate loop-variant parts of S into this Formula, /// attempting to keep all loop-invariant and loop-computable values in a /// single base register. void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { SmallVector<const SCEV *, 4> Good; SmallVector<const SCEV *, 4> Bad; DoInitialMatch(S, L, Good, Bad, SE); if (!Good.empty()) { const SCEV *Sum = SE.getAddExpr(Good); if (!Sum->isZero()) BaseRegs.push_back(Sum); HasBaseReg = true; } if (!Bad.empty()) { const SCEV *Sum = SE.getAddExpr(Bad); if (!Sum->isZero()) BaseRegs.push_back(Sum); HasBaseReg = true; } Canonicalize(); } /// \brief Check whether or not this formula statisfies the canonical /// representation. /// \see Formula::BaseRegs. bool Formula::isCanonical() const { if (ScaledReg) return Scale != 1 || !BaseRegs.empty(); return BaseRegs.size() <= 1; } /// \brief Helper method to morph a formula into its canonical representation. /// \see Formula::BaseRegs. /// Every formula having more than one base register, must use the ScaledReg /// field. Otherwise, we would have to do special cases everywhere in LSR /// to treat reg1 + reg2 + ... the same way as reg1 + 1*reg2 + ... /// On the other hand, 1*reg should be canonicalized into reg. void Formula::Canonicalize() { if (isCanonical()) return; // So far we did not need this case. This is easy to implement but it is // useless to maintain dead code. Beside it could hurt compile time. assert(!BaseRegs.empty() && "1*reg => reg, should not be needed."); // Keep the invariant sum in BaseRegs and one of the variant sum in ScaledReg. ScaledReg = BaseRegs.back(); BaseRegs.pop_back(); Scale = 1; size_t BaseRegsSize = BaseRegs.size(); size_t Try = 0; // If ScaledReg is an invariant, try to find a variant expression. while (Try < BaseRegsSize && !isa<SCEVAddRecExpr>(ScaledReg)) std::swap(ScaledReg, BaseRegs[Try++]); } /// \brief Get rid of the scale in the formula. /// In other words, this method morphes reg1 + 1*reg2 into reg1 + reg2. /// \return true if it was possible to get rid of the scale, false otherwise. /// \note After this operation the formula may not be in the canonical form. bool Formula::Unscale() { if (Scale != 1) return false; Scale = 0; BaseRegs.push_back(ScaledReg); ScaledReg = nullptr; return true; } /// getNumRegs - Return the total number of register operands used by this /// formula. This does not include register uses implied by non-constant /// addrec strides. size_t Formula::getNumRegs() const { return !!ScaledReg + BaseRegs.size(); } /// getType - Return the type of this formula, if it has one, or null /// otherwise. This type is meaningless except for the bit size. Type *Formula::getType() const { return !BaseRegs.empty() ? BaseRegs.front()->getType() : ScaledReg ? ScaledReg->getType() : BaseGV ? BaseGV->getType() : nullptr; } /// DeleteBaseReg - Delete the given base reg from the BaseRegs list. void Formula::DeleteBaseReg(const SCEV *&S) { if (&S != &BaseRegs.back()) std::swap(S, BaseRegs.back()); BaseRegs.pop_back(); } /// referencesReg - Test if this formula references the given register. bool Formula::referencesReg(const SCEV *S) const { return S == ScaledReg || std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); } /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers /// which are used by uses other than the use with the given index. bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, const RegUseTracker &RegUses) const { if (ScaledReg) if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) return true; for (const SCEV *BaseReg : BaseRegs) if (RegUses.isRegUsedByUsesOtherThan(BaseReg, LUIdx)) return true; return false; } void Formula::print(raw_ostream &OS) const { bool First = true; if (BaseGV) { if (!First) OS << " + "; else First = false; BaseGV->printAsOperand(OS, /*PrintType=*/false); } if (BaseOffset != 0) { if (!First) OS << " + "; else First = false; OS << BaseOffset; } for (const SCEV *BaseReg : BaseRegs) { if (!First) OS << " + "; else First = false; OS << "reg(" << *BaseReg << ')'; } if (HasBaseReg && BaseRegs.empty()) { if (!First) OS << " + "; else First = false; OS << "**error: HasBaseReg**"; } else if (!HasBaseReg && !BaseRegs.empty()) { if (!First) OS << " + "; else First = false; OS << "**error: !HasBaseReg**"; } if (Scale != 0) { if (!First) OS << " + "; else First = false; OS << Scale << "*reg("; if (ScaledReg) OS << *ScaledReg; else OS << "<unknown>"; OS << ')'; } if (UnfoldedOffset != 0) { if (!First) OS << " + "; OS << "imm(" << UnfoldedOffset << ')'; } } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Formula::dump() const { print(errs()); errs() << '\n'; } #endif /// isAddRecSExtable - Return true if the given addrec can be sign-extended /// without changing its value. static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { Type *WideTy = IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); } /// isAddSExtable - Return true if the given add can be sign-extended /// without changing its value. static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { Type *WideTy = IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); } /// isMulSExtable - Return true if the given mul can be sign-extended /// without changing its value. static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { Type *WideTy = IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); } /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined /// and if the remainder is known to be zero, or null otherwise. If /// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified /// to Y, ignoring that the multiplication may overflow, which is useful when /// the result will be used in a context where the most significant bits are /// ignored. static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE, bool IgnoreSignificantBits = false) { // Handle the trivial case, which works for any SCEV type. if (LHS == RHS) return SE.getConstant(LHS->getType(), 1); // Handle a few RHS special cases. const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); if (RC) { const APInt &RA = RC->getValue()->getValue(); // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do // some folding. if (RA.isAllOnesValue()) return SE.getMulExpr(LHS, RC); // Handle x /s 1 as x. if (RA == 1) return LHS; } // Check for a division of a constant by a constant. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { if (!RC) return nullptr; const APInt &LA = C->getValue()->getValue(); const APInt &RA = RC->getValue()->getValue(); if (LA.srem(RA) != 0) return nullptr; return SE.getConstant(LA.sdiv(RA)); } // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, IgnoreSignificantBits); if (!Step) return nullptr; const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, IgnoreSignificantBits); if (!Start) return nullptr; // FlagNW is independent of the start value, step direction, and is // preserved with smaller magnitude steps. // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); } return nullptr; } // Distribute the sdiv over add operands, if the add doesn't overflow. if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { SmallVector<const SCEV *, 8> Ops; for (const SCEV *S : Add->operands()) { const SCEV *Op = getExactSDiv(S, RHS, SE, IgnoreSignificantBits); if (!Op) return nullptr; Ops.push_back(Op); } return SE.getAddExpr(Ops); } return nullptr; } // Check for a multiply operand that we can pull RHS out of. if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { SmallVector<const SCEV *, 4> Ops; bool Found = false; for (const SCEV *S : Mul->operands()) { if (!Found) if (const SCEV *Q = getExactSDiv(S, RHS, SE, IgnoreSignificantBits)) { S = Q; Found = true; } Ops.push_back(S); } return Found ? SE.getMulExpr(Ops) : nullptr; } return nullptr; } // Otherwise we don't know. return nullptr; } /// ExtractImmediate - If S involves the addition of a constant integer value, /// return that integer value, and mutate S to point to a new SCEV with that /// value excluded. static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { if (C->getValue()->getValue().getMinSignedBits() <= 64) { S = SE.getConstant(C->getType(), 0); return C->getValue()->getSExtValue(); } } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); int64_t Result = ExtractImmediate(NewOps.front(), SE); if (Result != 0) S = SE.getAddExpr(NewOps); return Result; } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); int64_t Result = ExtractImmediate(NewOps.front(), SE); if (Result != 0) S = SE.getAddRecExpr(NewOps, AR->getLoop(), // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) SCEV::FlagAnyWrap); return Result; } return 0; } /// ExtractSymbol - If S involves the addition of a GlobalValue address, /// return that symbol, and mutate S to point to a new SCEV with that /// value excluded. static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { S = SE.getConstant(GV->getType(), 0); return GV; } } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); if (Result) S = SE.getAddExpr(NewOps); return Result; } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); if (Result) S = SE.getAddRecExpr(NewOps, AR->getLoop(), // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) SCEV::FlagAnyWrap); return Result; } return nullptr; } /// isAddressUse - Returns true if the specified instruction is using the /// specified value as an address. static bool isAddressUse(Instruction *Inst, Value *OperandVal) { bool isAddress = isa<LoadInst>(Inst); if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { if (SI->getOperand(1) == OperandVal) isAddress = true; } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { // Addressing modes can also be folded into prefetches and a variety // of intrinsics. switch (II->getIntrinsicID()) { default: break; case Intrinsic::prefetch: #if 0 // HLSL Change - remove platform intrinsics case Intrinsic::x86_sse_storeu_ps: case Intrinsic::x86_sse2_storeu_pd: case Intrinsic::x86_sse2_storeu_dq: case Intrinsic::x86_sse2_storel_dq: #endif // HLSL Change - remove platform intrinsics if (II->getArgOperand(0) == OperandVal) isAddress = true; break; } } return isAddress; } /// getAccessType - Return the type of the memory being accessed. static Type *getAccessType(const Instruction *Inst) { Type *AccessTy = Inst->getType(); if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { AccessTy = SI->getOperand(0)->getType(); #if 0 // HLSL Change - remove platform intrinsics } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { // Addressing modes can also be folded into prefetches and a variety // of intrinsics. switch (II->getIntrinsicID()) { default: break; case Intrinsic::x86_sse_storeu_ps: case Intrinsic::x86_sse2_storeu_pd: case Intrinsic::x86_sse2_storeu_dq: case Intrinsic::x86_sse2_storel_dq: AccessTy = II->getArgOperand(0)->getType(); break; } #endif // HLSL Change - remove platform intrinsics } // All pointers have the same requirements, so canonicalize them to an // arbitrary pointer type to minimize variation. if (PointerType *PTy = dyn_cast<PointerType>(AccessTy)) AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), PTy->getAddressSpace()); return AccessTy; } /// isExistingPhi - Return true if this AddRec is already a phi in its loop. static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) { if (SE.isSCEVable(PN->getType()) && (SE.getEffectiveSCEVType(PN->getType()) == SE.getEffectiveSCEVType(AR->getType())) && SE.getSCEV(PN) == AR) return true; } return false; } /// Check if expanding this expression is likely to incur significant cost. This /// is tricky because SCEV doesn't track which expressions are actually computed /// by the current IR. /// /// We currently allow expansion of IV increments that involve adds, /// multiplication by constants, and AddRecs from existing phis. /// /// TODO: Allow UDivExpr if we can find an existing IV increment that is an /// obvious multiple of the UDivExpr. static bool isHighCostExpansion(const SCEV *S, SmallPtrSetImpl<const SCEV*> &Processed, ScalarEvolution &SE) { // Zero/One operand expressions switch (S->getSCEVType()) { case scUnknown: case scConstant: return false; case scTruncate: return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(), Processed, SE); case scZeroExtend: return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(), Processed, SE); case scSignExtend: return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(), Processed, SE); } if (!Processed.insert(S).second) return false; if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { for (const SCEV *S : Add->operands()) { if (isHighCostExpansion(S, Processed, SE)) return true; } return false; } if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { if (Mul->getNumOperands() == 2) { // Multiplication by a constant is ok if (isa<SCEVConstant>(Mul->getOperand(0))) return isHighCostExpansion(Mul->getOperand(1), Processed, SE); // If we have the value of one operand, check if an existing // multiplication already generates this expression. if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) { Value *UVal = U->getValue(); for (User *UR : UVal->users()) { // If U is a constant, it may be used by a ConstantExpr. Instruction *UI = dyn_cast<Instruction>(UR); if (UI && UI->getOpcode() == Instruction::Mul && SE.isSCEVable(UI->getType())) { return SE.getSCEV(UI) == Mul; } } } } } if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { if (isExistingPhi(AR, SE)) return false; } // Fow now, consider any other type of expression (div/mul/min/max) high cost. return true; } /// DeleteTriviallyDeadInstructions - If any of the instructions is the /// specified set are trivially dead, delete them and see if this makes any of /// their operands subsequently dead. static bool DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakTrackingVH> &DeadInsts) { bool Changed = false; while (!DeadInsts.empty()) { Value *V = DeadInsts.pop_back_val(); Instruction *I = dyn_cast_or_null<Instruction>(V); if (!I || !isInstructionTriviallyDead(I)) continue; for (Use &O : I->operands()) if (Instruction *U = dyn_cast<Instruction>(O)) { O = nullptr; if (U->use_empty()) DeadInsts.emplace_back(U); } I->eraseFromParent(); Changed = true; } return Changed; } namespace { class LSRUse; } /// \brief Check if the addressing mode defined by \p F is completely /// folded in \p LU at isel time. /// This includes address-mode folding and special icmp tricks. /// This function returns true if \p LU can accommodate what \p F /// defines and up to 1 base + 1 scaled + offset. /// In other words, if \p F has several base registers, this function may /// still return true. Therefore, users still need to account for /// additional base registers and/or unfolded offsets to derive an /// accurate cost model. static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F); // Get the cost of the scaling factor used in F for LU. static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F); namespace { /// Cost - This class is used to measure and compare candidate formulae. class Cost { /// TODO: Some of these could be merged. Also, a lexical ordering /// isn't always optimal. unsigned NumRegs; unsigned AddRecCost; unsigned NumIVMuls; unsigned NumBaseAdds; unsigned ImmCost; unsigned SetupCost; unsigned ScaleCost; public: Cost() : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), SetupCost(0), ScaleCost(0) {} bool operator<(const Cost &Other) const; void Lose(); #ifndef NDEBUG // Once any of the metrics loses, they must all remain losers. bool isValid() { return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds | ImmCost | SetupCost | ScaleCost) != ~0u) || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds & ImmCost & SetupCost & ScaleCost) == ~0u); } #endif bool isLoser() { assert(isValid() && "invalid cost"); return NumRegs == ~0u; } void RateFormula(const TargetTransformInfo &TTI, const Formula &F, SmallPtrSetImpl<const SCEV *> &Regs, const DenseSet<const SCEV *> &VisitedRegs, const Loop *L, const SmallVectorImpl<int64_t> &Offsets, ScalarEvolution &SE, DominatorTree &DT, const LSRUse &LU, SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr); void print(raw_ostream &OS) const; void dump() const; private: void RateRegister(const SCEV *Reg, SmallPtrSetImpl<const SCEV *> &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT); void RatePrimaryRegister(const SCEV *Reg, SmallPtrSetImpl<const SCEV *> &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT, SmallPtrSetImpl<const SCEV *> *LoserRegs); }; } /// RateRegister - Tally up interesting quantities from the given register. void Cost::RateRegister(const SCEV *Reg, SmallPtrSetImpl<const SCEV *> &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT) { if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { // If this is an addrec for another loop, don't second-guess its addrec phi // nodes. LSR isn't currently smart enough to reason about more than one // loop at a time. LSR has already run on inner loops, will not run on outer // loops, and cannot be expected to change sibling loops. if (AR->getLoop() != L) { // If the AddRec exists, consider it's register free and leave it alone. if (isExistingPhi(AR, SE)) return; // Otherwise, do not consider this formula at all. Lose(); return; } AddRecCost += 1; /// TODO: This should be a function of the stride. // Add the step value register, if it needs one. // TODO: The non-affine case isn't precisely modeled here. if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { if (!Regs.count(AR->getOperand(1))) { RateRegister(AR->getOperand(1), Regs, L, SE, DT); if (isLoser()) return; } } } ++NumRegs; // Rough heuristic; favor registers which don't require extra setup // instructions in the preheader. if (!isa<SCEVUnknown>(Reg) && !isa<SCEVConstant>(Reg) && !(isa<SCEVAddRecExpr>(Reg) && (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) ++SetupCost; NumIVMuls += isa<SCEVMulExpr>(Reg) && SE.hasComputableLoopEvolution(Reg, L); } /// RatePrimaryRegister - Record this register in the set. If we haven't seen it /// before, rate it. Optional LoserRegs provides a way to declare any formula /// that refers to one of those regs an instant loser. void Cost::RatePrimaryRegister(const SCEV *Reg, SmallPtrSetImpl<const SCEV *> &Regs, const Loop *L, ScalarEvolution &SE, DominatorTree &DT, SmallPtrSetImpl<const SCEV *> *LoserRegs) { if (LoserRegs && LoserRegs->count(Reg)) { Lose(); return; } if (Regs.insert(Reg).second) { RateRegister(Reg, Regs, L, SE, DT); if (LoserRegs && isLoser()) LoserRegs->insert(Reg); } } void Cost::RateFormula(const TargetTransformInfo &TTI, const Formula &F, SmallPtrSetImpl<const SCEV *> &Regs, const DenseSet<const SCEV *> &VisitedRegs, const Loop *L, const SmallVectorImpl<int64_t> &Offsets, ScalarEvolution &SE, DominatorTree &DT, const LSRUse &LU, SmallPtrSetImpl<const SCEV *> *LoserRegs) { assert(F.isCanonical() && "Cost is accurate only for canonical formula"); // Tally up the registers. if (const SCEV *ScaledReg = F.ScaledReg) { if (VisitedRegs.count(ScaledReg)) { Lose(); return; } RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs); if (isLoser()) return; } for (const SCEV *BaseReg : F.BaseRegs) { if (VisitedRegs.count(BaseReg)) { Lose(); return; } RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs); if (isLoser()) return; } // Determine how many (unfolded) adds we'll need inside the loop. size_t NumBaseParts = F.getNumRegs(); if (NumBaseParts > 1) // Do not count the base and a possible second register if the target // allows to fold 2 registers. NumBaseAdds += NumBaseParts - (1 + (F.Scale && isAMCompletelyFolded(TTI, LU, F))); NumBaseAdds += (F.UnfoldedOffset != 0); // Accumulate non-free scaling amounts. ScaleCost += getScalingFactorCost(TTI, LU, F); // Tally up the non-zero immediates. for (int64_t O : Offsets) { int64_t Offset = (uint64_t)O + F.BaseOffset; if (F.BaseGV) ImmCost += 64; // Handle symbolic values conservatively. // TODO: This should probably be the pointer size. else if (Offset != 0) ImmCost += APInt(64, Offset, true).getMinSignedBits(); } assert(isValid() && "invalid cost"); } /// Lose - Set this cost to a losing value. void Cost::Lose() { NumRegs = ~0u; AddRecCost = ~0u; NumIVMuls = ~0u; NumBaseAdds = ~0u; ImmCost = ~0u; SetupCost = ~0u; ScaleCost = ~0u; } /// operator< - Choose the lower cost. bool Cost::operator<(const Cost &Other) const { return std::tie(NumRegs, AddRecCost, NumIVMuls, NumBaseAdds, ScaleCost, ImmCost, SetupCost) < std::tie(Other.NumRegs, Other.AddRecCost, Other.NumIVMuls, Other.NumBaseAdds, Other.ScaleCost, Other.ImmCost, Other.SetupCost); } void Cost::print(raw_ostream &OS) const { OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); if (AddRecCost != 0) OS << ", with addrec cost " << AddRecCost; if (NumIVMuls != 0) OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); if (NumBaseAdds != 0) OS << ", plus " << NumBaseAdds << " base add" << (NumBaseAdds == 1 ? "" : "s"); if (ScaleCost != 0) OS << ", plus " << ScaleCost << " scale cost"; if (ImmCost != 0) OS << ", plus " << ImmCost << " imm cost"; if (SetupCost != 0) OS << ", plus " << SetupCost << " setup cost"; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Cost::dump() const { print(errs()); errs() << '\n'; } #endif namespace { /// LSRFixup - An operand value in an instruction which is to be replaced /// with some equivalent, possibly strength-reduced, replacement. struct LSRFixup { /// UserInst - The instruction which will be updated. Instruction *UserInst; /// OperandValToReplace - The operand of the instruction which will /// be replaced. The operand may be used more than once; every instance /// will be replaced. Value *OperandValToReplace; /// PostIncLoops - If this user is to use the post-incremented value of an /// induction variable, this variable is non-null and holds the loop /// associated with the induction variable. PostIncLoopSet PostIncLoops; /// LUIdx - The index of the LSRUse describing the expression which /// this fixup needs, minus an offset (below). size_t LUIdx; /// Offset - A constant offset to be added to the LSRUse expression. /// This allows multiple fixups to share the same LSRUse with different /// offsets, for example in an unrolled loop. int64_t Offset; bool isUseFullyOutsideLoop(const Loop *L) const; LSRFixup(); void print(raw_ostream &OS) const; void dump() const; }; } LSRFixup::LSRFixup() : UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)), Offset(0) {} /// isUseFullyOutsideLoop - Test whether this fixup always uses its /// value outside of the given loop. bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { // PHI nodes use their value in their incoming blocks. if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == OperandValToReplace && L->contains(PN->getIncomingBlock(i))) return false; return true; } return !L->contains(UserInst); } void LSRFixup::print(raw_ostream &OS) const { OS << "UserInst="; // Store is common and interesting enough to be worth special-casing. if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { OS << "store "; Store->getOperand(0)->printAsOperand(OS, /*PrintType=*/false); } else if (UserInst->getType()->isVoidTy()) OS << UserInst->getOpcodeName(); else UserInst->printAsOperand(OS, /*PrintType=*/false); OS << ", OperandValToReplace="; OperandValToReplace->printAsOperand(OS, /*PrintType=*/false); for (const Loop *PIL : PostIncLoops) { OS << ", PostIncLoop="; PIL->getHeader()->printAsOperand(OS, /*PrintType=*/false); } if (LUIdx != ~size_t(0)) OS << ", LUIdx=" << LUIdx; if (Offset != 0) OS << ", Offset=" << Offset; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRFixup::dump() const { print(errs()); errs() << '\n'; } #endif namespace { /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. struct UniquifierDenseMapInfo { static SmallVector<const SCEV *, 4> getEmptyKey() { SmallVector<const SCEV *, 4> V; V.push_back(reinterpret_cast<const SCEV *>(-1)); return V; } static SmallVector<const SCEV *, 4> getTombstoneKey() { SmallVector<const SCEV *, 4> V; V.push_back(reinterpret_cast<const SCEV *>(-2)); return V; } static unsigned getHashValue(const SmallVector<const SCEV *, 4> &V) { return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); } static bool isEqual(const SmallVector<const SCEV *, 4> &LHS, const SmallVector<const SCEV *, 4> &RHS) { return LHS == RHS; } }; /// LSRUse - This class holds the state that LSR keeps for each use in /// IVUsers, as well as uses invented by LSR itself. It includes information /// about what kinds of things can be folded into the user, information about /// the user itself, and information about how the use may be satisfied. /// TODO: Represent multiple users of the same expression in common? class LSRUse { DenseSet<SmallVector<const SCEV *, 4>, UniquifierDenseMapInfo> Uniquifier; public: /// KindType - An enum for a kind of use, indicating what types of /// scaled and immediate operands it might support. enum KindType { Basic, ///< A normal use, with no folding. Special, ///< A special case of basic, allowing -1 scales. Address, ///< An address use; folding according to TargetLowering ICmpZero ///< An equality icmp with both operands folded into one. // TODO: Add a generic icmp too? }; typedef PointerIntPair<const SCEV *, 2, KindType> SCEVUseKindPair; KindType Kind; Type *AccessTy; SmallVector<int64_t, 8> Offsets; int64_t MinOffset; int64_t MaxOffset; /// AllFixupsOutsideLoop - This records whether all of the fixups using this /// LSRUse are outside of the loop, in which case some special-case heuristics /// may be used. bool AllFixupsOutsideLoop; /// RigidFormula is set to true to guarantee that this use will be associated /// with a single formula--the one that initially matched. Some SCEV /// expressions cannot be expanded. This allows LSR to consider the registers /// used by those expressions without the need to expand them later after /// changing the formula. bool RigidFormula; /// WidestFixupType - This records the widest use type for any fixup using /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different /// max fixup widths to be equivalent, because the narrower one may be relying /// on the implicit truncation to truncate away bogus bits. Type *WidestFixupType; /// Formulae - A list of ways to build a value that can satisfy this user. /// After the list is populated, one of these is selected heuristically and /// used to formulate a replacement for OperandValToReplace in UserInst. SmallVector<Formula, 12> Formulae; /// Regs - The set of register candidates used by all formulae in this LSRUse. SmallPtrSet<const SCEV *, 4> Regs; LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T), MinOffset(INT64_MAX), MaxOffset(INT64_MIN), AllFixupsOutsideLoop(true), RigidFormula(false), WidestFixupType(nullptr) {} bool HasFormulaWithSameRegs(const Formula &F) const; bool InsertFormula(const Formula &F); void DeleteFormula(Formula &F); void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); void print(raw_ostream &OS) const; void dump() const; }; } /// HasFormula - Test whether this use as a formula which has the same /// registers as the given formula. bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { SmallVector<const SCEV *, 4> Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for uniquifying. std::sort(Key.begin(), Key.end()); return Uniquifier.count(Key); } /// InsertFormula - If the given formula has not yet been inserted, add it to /// the list, and return true. Return false otherwise. /// The formula must be in canonical form. bool LSRUse::InsertFormula(const Formula &F) { assert(F.isCanonical() && "Invalid canonical representation"); if (!Formulae.empty() && RigidFormula) return false; SmallVector<const SCEV *, 4> Key = F.BaseRegs; if (F.ScaledReg) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for uniquifying. std::sort(Key.begin(), Key.end()); if (!Uniquifier.insert(Key).second) return false; // Using a register to hold the value of 0 is not profitable. assert((!F.ScaledReg || !F.ScaledReg->isZero()) && "Zero allocated in a scaled register!"); #ifndef NDEBUG for (const SCEV *BaseReg : F.BaseRegs) assert(!BaseReg->isZero() && "Zero allocated in a base register!"); #endif // Add the formula to the list. Formulae.push_back(F); // Record registers now being used by this use. Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); if (F.ScaledReg) Regs.insert(F.ScaledReg); return true; } /// DeleteFormula - Remove the given formula from this use's list. void LSRUse::DeleteFormula(Formula &F) { if (&F != &Formulae.back()) std::swap(F, Formulae.back()); Formulae.pop_back(); } /// RecomputeRegs - Recompute the Regs field, and update RegUses. void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { // Now that we've filtered out some formulae, recompute the Regs set. SmallPtrSet<const SCEV *, 4> OldRegs = std::move(Regs); Regs.clear(); for (const Formula &F : Formulae) { if (F.ScaledReg) Regs.insert(F.ScaledReg); Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); } // Update the RegTracker. for (const SCEV *S : OldRegs) if (!Regs.count(S)) RegUses.DropRegister(S, LUIdx); } void LSRUse::print(raw_ostream &OS) const { OS << "LSR Use: Kind="; switch (Kind) { case Basic: OS << "Basic"; break; case Special: OS << "Special"; break; case ICmpZero: OS << "ICmpZero"; break; case Address: OS << "Address of "; if (AccessTy->isPointerTy()) OS << "pointer"; // the full pointer type could be really verbose else OS << *AccessTy; } OS << ", Offsets={"; bool NeedComma = false; for (int64_t O : Offsets) { if (NeedComma) OS << ','; OS << O; NeedComma = true; } OS << '}'; if (AllFixupsOutsideLoop) OS << ", all-fixups-outside-loop"; if (WidestFixupType) OS << ", widest fixup type: " << *WidestFixupType; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRUse::dump() const { print(errs()); errs() << '\n'; } #endif static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, LSRUse::KindType Kind, Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { switch (Kind) { case LSRUse::Address: return TTI.isLegalAddressingMode(AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); case LSRUse::ICmpZero: // There's not even a target hook for querying whether it would be legal to // fold a GV into an ICmp. if (BaseGV) return false; // ICmp only has two operands; don't allow more than two non-trivial parts. if (Scale != 0 && HasBaseReg && BaseOffset != 0) return false; // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by // putting the scaled register in the other operand of the icmp. if (Scale != 0 && Scale != -1) return false; // If we have low-level target information, ask the target if it can fold an // integer immediate on an icmp. if (BaseOffset != 0) { // We have one of: // ICmpZero BaseReg + BaseOffset => ICmp BaseReg, -BaseOffset // ICmpZero -1*ScaleReg + BaseOffset => ICmp ScaleReg, BaseOffset // Offs is the ICmp immediate. if (Scale == 0) // The cast does the right thing with INT64_MIN. BaseOffset = -(uint64_t)BaseOffset; return TTI.isLegalICmpImmediate(BaseOffset); } // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg return true; case LSRUse::Basic: // Only handle single-register values. return !BaseGV && Scale == 0 && BaseOffset == 0; case LSRUse::Special: // Special case Basic to handle -1 scales. return !BaseGV && (Scale == 0 || Scale == -1) && BaseOffset == 0; } llvm_unreachable("Invalid LSRUse Kind!"); } static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, int64_t MinOffset, int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { // Check for overflow. if (((int64_t)((uint64_t)BaseOffset + MinOffset) > BaseOffset) != (MinOffset > 0)) return false; MinOffset = (uint64_t)BaseOffset + MinOffset; if (((int64_t)((uint64_t)BaseOffset + MaxOffset) > BaseOffset) != (MaxOffset > 0)) return false; MaxOffset = (uint64_t)BaseOffset + MaxOffset; return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MinOffset, HasBaseReg, Scale) && isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, MaxOffset, HasBaseReg, Scale); } static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, int64_t MinOffset, int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, const Formula &F) { // For the purpose of isAMCompletelyFolded either having a canonical formula // or a scale not equal to zero is correct. // Problems may arise from non canonical formulae having a scale == 0. // Strictly speaking it would best to just rely on canonical formulae. // However, when we generate the scaled formulae, we first check that the // scaling factor is profitable before computing the actual ScaledReg for // compile time sake. assert((F.isCanonical() || F.Scale != 0)); return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); } /// isLegalUse - Test whether we know how to expand the current formula. static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale) { // We know how to expand completely foldable formulae. return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale) || // Or formulae that use a base register produced by a sum of base // registers. (Scale == 1 && isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, BaseOffset, true, 0)); } static bool isLegalUse(const TargetTransformInfo &TTI, int64_t MinOffset, int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, const Formula &F) { return isLegalUse(TTI, MinOffset, MaxOffset, Kind, AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); } static bool isAMCompletelyFolded(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F) { return isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F.BaseGV, F.BaseOffset, F.HasBaseReg, F.Scale); } static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F) { if (!F.Scale) return 0; // If the use is not completely folded in that instruction, we will have to // pay an extra cost only for scale != 1. if (!isAMCompletelyFolded(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) return F.Scale != 1; switch (LU.Kind) { case LSRUse::Address: { // Check the scaling factor cost with both the min and max offsets. int ScaleCostMinOffset = TTI.getScalingFactorCost(LU.AccessTy, F.BaseGV, F.BaseOffset + LU.MinOffset, F.HasBaseReg, F.Scale); int ScaleCostMaxOffset = TTI.getScalingFactorCost(LU.AccessTy, F.BaseGV, F.BaseOffset + LU.MaxOffset, F.HasBaseReg, F.Scale); assert(ScaleCostMinOffset >= 0 && ScaleCostMaxOffset >= 0 && "Legal addressing mode has an illegal cost!"); return std::max(ScaleCostMinOffset, ScaleCostMaxOffset); } case LSRUse::ICmpZero: case LSRUse::Basic: case LSRUse::Special: // The use is completely folded, i.e., everything is folded into the // instruction. return 0; } llvm_unreachable("Invalid LSRUse Kind!"); } static bool isAlwaysFoldable(const TargetTransformInfo &TTI, LSRUse::KindType Kind, Type *AccessTy, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg) { // Fast-path: zero is always foldable. if (BaseOffset == 0 && !BaseGV) return true; // Conservatively, create an address with an immediate and a // base and a scale. int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; // Canonicalize a scale of 1 to a base register if the formula doesn't // already have a base register. if (!HasBaseReg && Scale == 1) { Scale = 0; HasBaseReg = true; } return isAMCompletelyFolded(TTI, Kind, AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); } static bool isAlwaysFoldable(const TargetTransformInfo &TTI, ScalarEvolution &SE, int64_t MinOffset, int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, const SCEV *S, bool HasBaseReg) { // Fast-path: zero is always foldable. if (S->isZero()) return true; // Conservatively, create an address with an immediate and a // base and a scale. int64_t BaseOffset = ExtractImmediate(S, SE); GlobalValue *BaseGV = ExtractSymbol(S, SE); // If there's anything else involved, it's not foldable. if (!S->isZero()) return false; // Fast-path: zero is always foldable. if (BaseOffset == 0 && !BaseGV) return true; // Conservatively, create an address with an immediate and a // base and a scale. int64_t Scale = Kind == LSRUse::ICmpZero ? -1 : 1; return isAMCompletelyFolded(TTI, MinOffset, MaxOffset, Kind, AccessTy, BaseGV, BaseOffset, HasBaseReg, Scale); } namespace { /// IVInc - An individual increment in a Chain of IV increments. /// Relate an IV user to an expression that computes the IV it uses from the IV /// used by the previous link in the Chain. /// /// For the head of a chain, IncExpr holds the absolute SCEV expression for the /// original IVOperand. The head of the chain's IVOperand is only valid during /// chain collection, before LSR replaces IV users. During chain generation, /// IncExpr can be used to find the new IVOperand that computes the same /// expression. struct IVInc { Instruction *UserInst; Value* IVOperand; const SCEV *IncExpr; IVInc(Instruction *U, Value *O, const SCEV *E): UserInst(U), IVOperand(O), IncExpr(E) {} }; // IVChain - The list of IV increments in program order. // We typically add the head of a chain without finding subsequent links. struct IVChain { SmallVector<IVInc,1> Incs; const SCEV *ExprBase; IVChain() : ExprBase(nullptr) {} IVChain(const IVInc &Head, const SCEV *Base) : Incs(1, Head), ExprBase(Base) {} typedef SmallVectorImpl<IVInc>::const_iterator const_iterator; // begin - return the first increment in the chain. const_iterator begin() const { assert(!Incs.empty()); return std::next(Incs.begin()); } const_iterator end() const { return Incs.end(); } // hasIncs - Returns true if this chain contains any increments. bool hasIncs() const { return Incs.size() >= 2; } // add - Add an IVInc to the end of this chain. void add(const IVInc &X) { Incs.push_back(X); } // tailUserInst - Returns the last UserInst in the chain. Instruction *tailUserInst() const { return Incs.back().UserInst; } // isProfitableIncrement - Returns true if IncExpr can be profitably added to // this chain. bool isProfitableIncrement(const SCEV *OperExpr, const SCEV *IncExpr, ScalarEvolution&); }; /// ChainUsers - Helper for CollectChains to track multiple IV increment uses. /// Distinguish between FarUsers that definitely cross IV increments and /// NearUsers that may be used between IV increments. struct ChainUsers { SmallPtrSet<Instruction*, 4> FarUsers; SmallPtrSet<Instruction*, 4> NearUsers; }; /// LSRInstance - This class holds state for the main loop strength reduction /// logic. class LSRInstance { IVUsers &IU; ScalarEvolution &SE; DominatorTree &DT; LoopInfo &LI; const TargetTransformInfo &TTI; Loop *const L; bool Changed; /// IVIncInsertPos - This is the insert position that the current loop's /// induction variable increment should be placed. In simple loops, this is /// the latch block's terminator. But in more complicated cases, this is a /// position which will dominate all the in-loop post-increment users. Instruction *IVIncInsertPos; /// Factors - Interesting factors between use strides. SmallSetVector<int64_t, 8> Factors; /// Types - Interesting use types, to facilitate truncation reuse. SmallSetVector<Type *, 4> Types; /// Fixups - The list of operands which are to be replaced. SmallVector<LSRFixup, 16> Fixups; /// Uses - The list of interesting uses. SmallVector<LSRUse, 16> Uses; /// RegUses - Track which uses use which register candidates. RegUseTracker RegUses; // Limit the number of chains to avoid quadratic behavior. We don't expect to // have more than a few IV increment chains in a loop. Missing a Chain falls // back to normal LSR behavior for those uses. static const unsigned MaxChains = 8; /// IVChainVec - IV users can form a chain of IV increments. SmallVector<IVChain, MaxChains> IVChainVec; /// IVIncSet - IV users that belong to profitable IVChains. SmallPtrSet<Use*, MaxChains> IVIncSet; void OptimizeShadowIV(); bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); void OptimizeLoopTermCond(); void ChainInstruction(Instruction *UserInst, Instruction *IVOper, SmallVectorImpl<ChainUsers> &ChainUsersVec); void FinalizeChain(IVChain &Chain); void CollectChains(); void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts); void CollectInterestingTypesAndFactors(); void CollectFixupsAndInitialFormulae(); LSRFixup &getNewFixup() { Fixups.push_back(LSRFixup()); return Fixups.back(); } // Support for sharing of LSRUses between LSRFixups. typedef DenseMap<LSRUse::SCEVUseKindPair, size_t> UseMapTy; UseMapTy UseMap; bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, LSRUse::KindType Kind, Type *AccessTy); std::pair<size_t, int64_t> getUse(const SCEV *&Expr, LSRUse::KindType Kind, Type *AccessTy); void DeleteUse(LSRUse &LU, size_t LUIdx); LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); void CountRegisters(const Formula &F, size_t LUIdx); bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); void CollectLoopInvariantFixupsAndFormulae(); void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, unsigned Depth = 0); void GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, const Formula &Base, unsigned Depth, size_t Idx, bool IsScaledReg = false); void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, const Formula &Base, size_t Idx, bool IsScaledReg = false); void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateConstantOffsetsImpl(LSRUse &LU, unsigned LUIdx, const Formula &Base, const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg = false); void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); void GenerateCrossUseConstantOffsets(); void GenerateAllReuseFormulae(); void FilterOutUndesirableDedicatedRegisters(); size_t EstimateSearchSpaceComplexity() const; void NarrowSearchSpaceByDetectingSupersets(); void NarrowSearchSpaceByCollapsingUnrolledCode(); void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); void NarrowSearchSpaceByPickingWinnerRegs(); void NarrowSearchSpaceUsingHeuristics(); void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, Cost &SolutionCost, SmallVectorImpl<const Formula *> &Workspace, const Cost &CurCost, const SmallPtrSet<const SCEV *, 16> &CurRegs, DenseSet<const SCEV *> &VisitedRegs) const; void Solve(SmallVectorImpl<const Formula *> &Solution) const; BasicBlock::iterator HoistInsertPosition(BasicBlock::iterator IP, const SmallVectorImpl<Instruction *> &Inputs) const; BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP, const LSRFixup &LF, const LSRUse &LU, SCEVExpander &Rewriter) const; Value *Expand(const LSRFixup &LF, const Formula &F, BasicBlock::iterator IP, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const; void RewriteForPHI(PHINode *PN, const LSRFixup &LF, const Formula &F, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts, Pass *P) const; void Rewrite(const LSRFixup &LF, const Formula &F, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts, Pass *P) const; void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, Pass *P); public: LSRInstance(Loop *L, Pass *P); bool getChanged() const { return Changed; } void print_factors_and_types(raw_ostream &OS) const; void print_fixups(raw_ostream &OS) const; void print_uses(raw_ostream &OS) const; void print(raw_ostream &OS) const; void dump() const; }; } /// OptimizeShadowIV - If IV is used in a int-to-float cast /// inside the loop then try to eliminate the cast operation. void LSRInstance::OptimizeShadowIV() { const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) return; for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; /* empty */) { IVUsers::const_iterator CandidateUI = UI; ++UI; Instruction *ShadowUse = CandidateUI->getUser(); Type *DestTy = nullptr; bool IsSigned = false; /* If shadow use is a int->float cast then insert a second IV to eliminate this cast. for (unsigned i = 0; i < n; ++i) foo((double)i); is transformed into double d = 0.0; for (unsigned i = 0; i < n; ++i, ++d) foo(d); */ if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { IsSigned = false; DestTy = UCast->getDestTy(); } else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { IsSigned = true; DestTy = SCast->getDestTy(); } if (!DestTy) continue; // If target does not support DestTy natively then do not apply // this transformation. if (!TTI.isTypeLegal(DestTy)) continue; PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); if (!PH) continue; if (PH->getNumIncomingValues() != 2) continue; Type *SrcTy = PH->getType(); int Mantissa = DestTy->getFPMantissaWidth(); if (Mantissa == -1) continue; if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) continue; unsigned Entry, Latch; if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { Entry = 0; Latch = 1; } else { Entry = 1; Latch = 0; } ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); if (!Init) continue; Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? (double)Init->getSExtValue() : (double)Init->getZExtValue()); BinaryOperator *Incr = dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); if (!Incr) continue; if (Incr->getOpcode() != Instruction::Add && Incr->getOpcode() != Instruction::Sub) continue; /* Initialize new IV, double d = 0.0 in above example. */ ConstantInt *C = nullptr; if (Incr->getOperand(0) == PH) C = dyn_cast<ConstantInt>(Incr->getOperand(1)); else if (Incr->getOperand(1) == PH) C = dyn_cast<ConstantInt>(Incr->getOperand(0)); else continue; if (!C) continue; // Ignore negative constants, as the code below doesn't handle them // correctly. TODO: Remove this restriction. if (!C->getValue().isStrictlyPositive()) continue; /* Add new PHINode. */ PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); /* create new increment. '++d' in above example. */ Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); BinaryOperator *NewIncr = BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? Instruction::FAdd : Instruction::FSub, NewPH, CFP, "IV.S.next.", Incr); NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); /* Remove cast operation */ ShadowUse->replaceAllUsesWith(NewPH); ShadowUse->eraseFromParent(); Changed = true; break; } } /// FindIVUserForCond - If Cond has an operand that is an expression of an IV, /// set the IV user and stride information and return true, otherwise return /// false. bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { for (IVStrideUse &U : IU) if (U.getUser() == Cond) { // NOTE: we could handle setcc instructions with multiple uses here, but // InstCombine does it as well for simple uses, it's not clear that it // occurs enough in real life to handle. CondUse = &U; return true; } return false; } /// OptimizeMax - Rewrite the loop's terminating condition if it uses /// a max computation. /// /// This is a narrow solution to a specific, but acute, problem. For loops /// like this: /// /// i = 0; /// do { /// p[i] = 0.0; /// } while (++i < n); /// /// the trip count isn't just 'n', because 'n' might not be positive. And /// unfortunately this can come up even for loops where the user didn't use /// a C do-while loop. For example, seemingly well-behaved top-test loops /// will commonly be lowered like this: // /// if (n > 0) { /// i = 0; /// do { /// p[i] = 0.0; /// } while (++i < n); /// } /// /// and then it's possible for subsequent optimization to obscure the if /// test in such a way that indvars can't find it. /// /// When indvars can't find the if test in loops like this, it creates a /// max expression, which allows it to give the loop a canonical /// induction variable: /// /// i = 0; /// max = n < 1 ? 1 : n; /// do { /// p[i] = 0.0; /// } while (++i != max); /// /// Canonical induction variables are necessary because the loop passes /// are designed around them. The most obvious example of this is the /// LoopInfo analysis, which doesn't remember trip count values. It /// expects to be able to rediscover the trip count each time it is /// needed, and it does this using a simple analysis that only succeeds if /// the loop has a canonical induction variable. /// /// However, when it comes time to generate code, the maximum operation /// can be quite costly, especially if it's inside of an outer loop. /// /// This function solves this problem by detecting this type of loop and /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting /// the instructions for the maximum computation. /// ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { // Check that the loop matches the pattern we're looking for. if (Cond->getPredicate() != CmpInst::ICMP_EQ && Cond->getPredicate() != CmpInst::ICMP_NE) return Cond; SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); if (!Sel || !Sel->hasOneUse()) return Cond; const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) return Cond; const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); // Add one to the backedge-taken count to get the trip count. const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); if (IterationCount != SE.getSCEV(Sel)) return Cond; // Check for a max calculation that matches the pattern. There's no check // for ICMP_ULE here because the comparison would be with zero, which // isn't interesting. CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; const SCEVNAryExpr *Max = nullptr; if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { Pred = ICmpInst::ICMP_SLE; Max = S; } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { Pred = ICmpInst::ICMP_SLT; Max = S; } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { Pred = ICmpInst::ICMP_ULT; Max = U; } else { // No match; bail. return Cond; } // To handle a max with more than two operands, this optimization would // require additional checking and setup. if (Max->getNumOperands() != 2) return Cond; const SCEV *MaxLHS = Max->getOperand(0); const SCEV *MaxRHS = Max->getOperand(1); // ScalarEvolution canonicalizes constants to the left. For < and >, look // for a comparison with 1. For <= and >=, a comparison with zero. if (!MaxLHS || (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) return Cond; // Check the relevant induction variable for conformance to // the pattern. const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); if (!AR || !AR->isAffine() || AR->getStart() != One || AR->getStepRecurrence(SE) != One) return Cond; assert(AR->getLoop() == L && "Loop condition operand is an addrec in a different loop!"); // Check the right operand of the select, and remember it, as it will // be used in the new comparison instruction. Value *NewRHS = nullptr; if (ICmpInst::isTrueWhenEqual(Pred)) { // Look for n+1, and grab n. if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) NewRHS = BO->getOperand(0); if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) if (ConstantInt *BO1 = dyn_cast<ConstantInt>(BO->getOperand(1))) if (BO1->isOne() && SE.getSCEV(BO->getOperand(0)) == MaxRHS) NewRHS = BO->getOperand(0); if (!NewRHS) return Cond; } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) NewRHS = Sel->getOperand(1); else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) NewRHS = Sel->getOperand(2); else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) NewRHS = SU->getValue(); else // Max doesn't match expected pattern. return Cond; // Determine the new comparison opcode. It may be signed or unsigned, // and the original comparison may be either equality or inequality. if (Cond->getPredicate() == CmpInst::ICMP_EQ) Pred = CmpInst::getInversePredicate(Pred); // Ok, everything looks ok to change the condition into an SLT or SGE and // delete the max calculation. ICmpInst *NewCond = new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); // Delete the max calculation instructions. Cond->replaceAllUsesWith(NewCond); CondUse->setUser(NewCond); Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); Cond->eraseFromParent(); Sel->eraseFromParent(); if (Cmp->use_empty()) Cmp->eraseFromParent(); return NewCond; } /// OptimizeLoopTermCond - Change loop terminating condition to use the /// postinc iv when possible. void LSRInstance::OptimizeLoopTermCond() { SmallPtrSet<Instruction *, 4> PostIncs; BasicBlock *LatchBlock = L->getLoopLatch(); SmallVector<BasicBlock*, 8> ExitingBlocks; L->getExitingBlocks(ExitingBlocks); for (BasicBlock *ExitingBlock : ExitingBlocks) { // Get the terminating condition for the loop if possible. If we // can, we want to change it to use a post-incremented version of its // induction variable, to allow coalescing the live ranges for the IV into // one register value. BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); if (!TermBr) continue; // FIXME: Overly conservative, termination condition could be an 'or' etc.. if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) continue; // Search IVUsesByStride to find Cond's IVUse if there is one. IVStrideUse *CondUse = nullptr; ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); if (!FindIVUserForCond(Cond, CondUse)) continue; // If the trip count is computed in terms of a max (due to ScalarEvolution // being unable to find a sufficient guard, for example), change the loop // comparison to use SLT or ULT instead of NE. // One consequence of doing this now is that it disrupts the count-down // optimization. That's not always a bad thing though, because in such // cases it may still be worthwhile to avoid a max. Cond = OptimizeMax(Cond, CondUse); // If this exiting block dominates the latch block, it may also use // the post-inc value if it won't be shared with other uses. // Check for dominance. if (!DT.dominates(ExitingBlock, LatchBlock)) continue; // Conservatively avoid trying to use the post-inc value in non-latch // exits if there may be pre-inc users in intervening blocks. if (LatchBlock != ExitingBlock) for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) // Test if the use is reachable from the exiting block. This dominator // query is a conservative approximation of reachability. if (&*UI != CondUse && !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { // Conservatively assume there may be reuse if the quotient of their // strides could be a legal scale. const SCEV *A = IU.getStride(*CondUse, L); const SCEV *B = IU.getStride(*UI, L); if (!A || !B) continue; if (SE.getTypeSizeInBits(A->getType()) != SE.getTypeSizeInBits(B->getType())) { if (SE.getTypeSizeInBits(A->getType()) > SE.getTypeSizeInBits(B->getType())) B = SE.getSignExtendExpr(B, A->getType()); else A = SE.getSignExtendExpr(A, B->getType()); } if (const SCEVConstant *D = dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { const ConstantInt *C = D->getValue(); // Stride of one or negative one can have reuse with non-addresses. if (C->isOne() || C->isAllOnesValue()) goto decline_post_inc; // Avoid weird situations. if (C->getValue().getMinSignedBits() >= 64 || C->getValue().isMinSignedValue()) goto decline_post_inc; // Check for possible scaled-address reuse. Type *AccessTy = getAccessType(UI->getUser()); int64_t Scale = C->getSExtValue(); if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr, /*BaseOffset=*/ 0, /*HasBaseReg=*/ false, Scale)) goto decline_post_inc; Scale = -Scale; if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr, /*BaseOffset=*/ 0, /*HasBaseReg=*/ false, Scale)) goto decline_post_inc; } } DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " << *Cond << '\n'); // It's possible for the setcc instruction to be anywhere in the loop, and // possible for it to have multiple users. If it is not immediately before // the exiting block branch, move it. if (&*++BasicBlock::iterator(Cond) != TermBr) { if (Cond->hasOneUse()) { Cond->moveBefore(TermBr); } else { // Clone the terminating condition and insert into the loopend. ICmpInst *OldCond = Cond; Cond = cast<ICmpInst>(Cond->clone()); Cond->setName(L->getHeader()->getName() + ".termcond"); ExitingBlock->getInstList().insert(TermBr, Cond); // Clone the IVUse, as the old use still exists! CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); TermBr->replaceUsesOfWith(OldCond, Cond); } } // If we get to here, we know that we can transform the setcc instruction to // use the post-incremented version of the IV, allowing us to coalesce the // live ranges for the IV correctly. CondUse->transformToPostInc(L); Changed = true; PostIncs.insert(Cond); decline_post_inc:; } // Determine an insertion point for the loop induction variable increment. It // must dominate all the post-inc comparisons we just set up, and it must // dominate the loop latch edge. IVIncInsertPos = L->getLoopLatch()->getTerminator(); for (Instruction *Inst : PostIncs) { BasicBlock *BB = DT.findNearestCommonDominator(IVIncInsertPos->getParent(), Inst->getParent()); if (BB == Inst->getParent()) IVIncInsertPos = Inst; else if (BB != IVIncInsertPos->getParent()) IVIncInsertPos = BB->getTerminator(); } } /// reconcileNewOffset - Determine if the given use can accommodate a fixup /// at the given offset and other details. If so, update the use and /// return true. bool LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, LSRUse::KindType Kind, Type *AccessTy) { int64_t NewMinOffset = LU.MinOffset; int64_t NewMaxOffset = LU.MaxOffset; Type *NewAccessTy = AccessTy; // Check for a mismatched kind. It's tempting to collapse mismatched kinds to // something conservative, however this can pessimize in the case that one of // the uses will have all its uses outside the loop, for example. if (LU.Kind != Kind) return false; // Check for a mismatched access type, and fall back conservatively as needed. // TODO: Be less conservative when the type is similar and can use the same // addressing modes. if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) NewAccessTy = Type::getVoidTy(AccessTy->getContext()); // Conservatively assume HasBaseReg is true for now. if (NewOffset < LU.MinOffset) { if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, LU.MaxOffset - NewOffset, HasBaseReg)) return false; NewMinOffset = NewOffset; } else if (NewOffset > LU.MaxOffset) { if (!isAlwaysFoldable(TTI, Kind, NewAccessTy, /*BaseGV=*/nullptr, NewOffset - LU.MinOffset, HasBaseReg)) return false; NewMaxOffset = NewOffset; } // Update the use. LU.MinOffset = NewMinOffset; LU.MaxOffset = NewMaxOffset; LU.AccessTy = NewAccessTy; if (NewOffset != LU.Offsets.back()) LU.Offsets.push_back(NewOffset); return true; } /// getUse - Return an LSRUse index and an offset value for a fixup which /// needs the given expression, with the given kind and optional access type. /// Either reuse an existing use or create a new one, as needed. std::pair<size_t, int64_t> LSRInstance::getUse(const SCEV *&Expr, LSRUse::KindType Kind, Type *AccessTy) { const SCEV *Copy = Expr; int64_t Offset = ExtractImmediate(Expr, SE); // Basic uses can't accept any offset, for example. if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr, Offset, /*HasBaseReg=*/ true)) { Expr = Copy; Offset = 0; } std::pair<UseMapTy::iterator, bool> P = UseMap.insert(std::make_pair(LSRUse::SCEVUseKindPair(Expr, Kind), 0)); if (!P.second) { // A use already existed with this base. size_t LUIdx = P.first->second; LSRUse &LU = Uses[LUIdx]; if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) // Reuse this use. return std::make_pair(LUIdx, Offset); } // Create a new use. size_t LUIdx = Uses.size(); P.first->second = LUIdx; Uses.push_back(LSRUse(Kind, AccessTy)); LSRUse &LU = Uses[LUIdx]; // We don't need to track redundant offsets, but we don't need to go out // of our way here to avoid them. if (LU.Offsets.empty() || Offset != LU.Offsets.back()) LU.Offsets.push_back(Offset); LU.MinOffset = Offset; LU.MaxOffset = Offset; return std::make_pair(LUIdx, Offset); } /// DeleteUse - Delete the given use from the Uses list. void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { if (&LU != &Uses.back()) std::swap(LU, Uses.back()); Uses.pop_back(); // Update RegUses. RegUses.SwapAndDropUse(LUIdx, Uses.size()); } /// FindUseWithFormula - Look for a use distinct from OrigLU which is has /// a formula that has the same registers as the given formula. LSRUse * LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, const LSRUse &OrigLU) { // Search all uses for the formula. This could be more clever. for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; // Check whether this use is close enough to OrigLU, to see whether it's // worthwhile looking through its formulae. // Ignore ICmpZero uses because they may contain formulae generated by // GenerateICmpZeroScales, in which case adding fixup offsets may // be invalid. if (&LU != &OrigLU && LU.Kind != LSRUse::ICmpZero && LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && LU.WidestFixupType == OrigLU.WidestFixupType && LU.HasFormulaWithSameRegs(OrigF)) { // Scan through this use's formulae. for (const Formula &F : LU.Formulae) { // Check to see if this formula has the same registers and symbols // as OrigF. if (F.BaseRegs == OrigF.BaseRegs && F.ScaledReg == OrigF.ScaledReg && F.BaseGV == OrigF.BaseGV && F.Scale == OrigF.Scale && F.UnfoldedOffset == OrigF.UnfoldedOffset) { if (F.BaseOffset == 0) return &LU; // This is the formula where all the registers and symbols matched; // there aren't going to be any others. Since we declined it, we // can skip the rest of the formulae and proceed to the next LSRUse. break; } } } } // Nothing looked good. return nullptr; } void LSRInstance::CollectInterestingTypesAndFactors() { SmallSetVector<const SCEV *, 4> Strides; // Collect interesting types and strides. SmallVector<const SCEV *, 4> Worklist; for (const IVStrideUse &U : IU) { const SCEV *Expr = IU.getExpr(U); // Collect interesting types. Types.insert(SE.getEffectiveSCEVType(Expr->getType())); // Add strides for mentioned loops. Worklist.push_back(Expr); do { const SCEV *S = Worklist.pop_back_val(); if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { if (AR->getLoop() == L) Strides.insert(AR->getStepRecurrence(SE)); Worklist.push_back(AR->getStart()); } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { Worklist.append(Add->op_begin(), Add->op_end()); } } while (!Worklist.empty()); } // Compute interesting factors from the set of interesting strides. for (SmallSetVector<const SCEV *, 4>::const_iterator I = Strides.begin(), E = Strides.end(); I != E; ++I) for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = std::next(I); NewStrideIter != E; ++NewStrideIter) { const SCEV *OldStride = *I; const SCEV *NewStride = *NewStrideIter; if (SE.getTypeSizeInBits(OldStride->getType()) != SE.getTypeSizeInBits(NewStride->getType())) { if (SE.getTypeSizeInBits(OldStride->getType()) > SE.getTypeSizeInBits(NewStride->getType())) NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); else OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); } if (const SCEVConstant *Factor = dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, SE, true))) { if (Factor->getValue()->getValue().getMinSignedBits() <= 64) Factors.insert(Factor->getValue()->getValue().getSExtValue()); } else if (const SCEVConstant *Factor = dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, NewStride, SE, true))) { if (Factor->getValue()->getValue().getMinSignedBits() <= 64) Factors.insert(Factor->getValue()->getValue().getSExtValue()); } } // If all uses use the same type, don't bother looking for truncation-based // reuse. if (Types.size() == 1) Types.clear(); DEBUG(print_factors_and_types(dbgs())); } /// findIVOperand - Helper for CollectChains that finds an IV operand (computed /// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped /// Instructions to IVStrideUses, we could partially skip this. static User::op_iterator findIVOperand(User::op_iterator OI, User::op_iterator OE, Loop *L, ScalarEvolution &SE) { for(; OI != OE; ++OI) { if (Instruction *Oper = dyn_cast<Instruction>(*OI)) { if (!SE.isSCEVable(Oper->getType())) continue; if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) { if (AR->getLoop() == L) break; } } } return OI; } /// getWideOperand - IVChain logic must consistenctly peek base TruncInst /// operands, so wrap it in a convenient helper. static Value *getWideOperand(Value *Oper) { if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper)) return Trunc->getOperand(0); return Oper; } /// isCompatibleIVType - Return true if we allow an IV chain to include both /// types. static bool isCompatibleIVType(Value *LVal, Value *RVal) { Type *LType = LVal->getType(); Type *RType = RVal->getType(); return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy()); } /// getExprBase - Return an approximation of this SCEV expression's "base", or /// NULL for any constant. Returning the expression itself is /// conservative. Returning a deeper subexpression is more precise and valid as /// long as it isn't less complex than another subexpression. For expressions /// involving multiple unscaled values, we need to return the pointer-type /// SCEVUnknown. This avoids forming chains across objects, such as: /// PrevOper==a[i], IVOper==b[i], IVInc==b-a. /// /// Since SCEVUnknown is the rightmost type, and pointers are the rightmost /// SCEVUnknown, we simply return the rightmost SCEV operand. static const SCEV *getExprBase(const SCEV *S) { switch (S->getSCEVType()) { default: // uncluding scUnknown. return S; case scConstant: return nullptr; case scTruncate: return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand()); case scZeroExtend: return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand()); case scSignExtend: return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand()); case scAddExpr: { // Skip over scaled operands (scMulExpr) to follow add operands as long as // there's nothing more complex. // FIXME: not sure if we want to recognize negation. const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()), E(Add->op_begin()); I != E; ++I) { const SCEV *SubExpr = *I; if (SubExpr->getSCEVType() == scAddExpr) return getExprBase(SubExpr); if (SubExpr->getSCEVType() != scMulExpr) return SubExpr; } return S; // all operands are scaled, be conservative. } case scAddRecExpr: return getExprBase(cast<SCEVAddRecExpr>(S)->getStart()); } } /// Return true if the chain increment is profitable to expand into a loop /// invariant value, which may require its own register. A profitable chain /// increment will be an offset relative to the same base. We allow such offsets /// to potentially be used as chain increment as long as it's not obviously /// expensive to expand using real instructions. bool IVChain::isProfitableIncrement(const SCEV *OperExpr, const SCEV *IncExpr, ScalarEvolution &SE) { // Aggressively form chains when -stress-ivchain. if (StressIVChain) return true; // Do not replace a constant offset from IV head with a nonconstant IV // increment. if (!isa<SCEVConstant>(IncExpr)) { const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Incs[0].IVOperand)); if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr))) return 0; } SmallPtrSet<const SCEV*, 8> Processed; return !isHighCostExpansion(IncExpr, Processed, SE); } /// Return true if the number of registers needed for the chain is estimated to /// be less than the number required for the individual IV users. First prohibit /// any IV users that keep the IV live across increments (the Users set should /// be empty). Next count the number and type of increments in the chain. /// /// Chaining IVs can lead to considerable code bloat if ISEL doesn't /// effectively use postinc addressing modes. Only consider it profitable it the /// increments can be computed in fewer registers when chained. /// /// TODO: Consider IVInc free if it's already used in another chains. static bool isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users, ScalarEvolution &SE, const TargetTransformInfo &TTI) { if (StressIVChain) return true; if (!Chain.hasIncs()) return false; if (!Users.empty()) { DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " users:\n"; for (Instruction *Inst : Users) { dbgs() << " " << *Inst << "\n"; }); return false; } assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); // The chain itself may require a register, so intialize cost to 1. int cost = 1; // A complete chain likely eliminates the need for keeping the original IV in // a register. LSR does not currently know how to form a complete chain unless // the header phi already exists. if (isa<PHINode>(Chain.tailUserInst()) && SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) { --cost; } const SCEV *LastIncExpr = nullptr; unsigned NumConstIncrements = 0; unsigned NumVarIncrements = 0; unsigned NumReusedIncrements = 0; for (const IVInc &Inc : Chain) { if (Inc.IncExpr->isZero()) continue; // Incrementing by zero or some constant is neutral. We assume constants can // be folded into an addressing mode or an add's immediate operand. if (isa<SCEVConstant>(Inc.IncExpr)) { ++NumConstIncrements; continue; } if (Inc.IncExpr == LastIncExpr) ++NumReusedIncrements; else ++NumVarIncrements; LastIncExpr = Inc.IncExpr; } // An IV chain with a single increment is handled by LSR's postinc // uses. However, a chain with multiple increments requires keeping the IV's // value live longer than it needs to be if chained. if (NumConstIncrements > 1) --cost; // Materializing increment expressions in the preheader that didn't exist in // the original code may cost a register. For example, sign-extended array // indices can produce ridiculous increments like this: // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) cost += NumVarIncrements; // Reusing variable increments likely saves a register to hold the multiple of // the stride. cost -= NumReusedIncrements; DEBUG(dbgs() << "Chain: " << *Chain.Incs[0].UserInst << " Cost: " << cost << "\n"); return cost < 0; } /// ChainInstruction - Add this IV user to an existing chain or make it the head /// of a new chain. void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, SmallVectorImpl<ChainUsers> &ChainUsersVec) { // When IVs are used as types of varying widths, they are generally converted // to a wider type with some uses remaining narrow under a (free) trunc. Value *const NextIV = getWideOperand(IVOper); const SCEV *const OperExpr = SE.getSCEV(NextIV); const SCEV *const OperExprBase = getExprBase(OperExpr); // Visit all existing chains. Check if its IVOper can be computed as a // profitable loop invariant increment from the last link in the Chain. unsigned ChainIdx = 0, NChains = IVChainVec.size(); const SCEV *LastIncExpr = nullptr; for (; ChainIdx < NChains; ++ChainIdx) { IVChain &Chain = IVChainVec[ChainIdx]; // Prune the solution space aggressively by checking that both IV operands // are expressions that operate on the same unscaled SCEVUnknown. This // "base" will be canceled by the subsequent getMinusSCEV call. Checking // first avoids creating extra SCEV expressions. if (!StressIVChain && Chain.ExprBase != OperExprBase) continue; Value *PrevIV = getWideOperand(Chain.Incs.back().IVOperand); if (!isCompatibleIVType(PrevIV, NextIV)) continue; // A phi node terminates a chain. if (isa<PHINode>(UserInst) && isa<PHINode>(Chain.tailUserInst())) continue; // The increment must be loop-invariant so it can be kept in a register. const SCEV *PrevExpr = SE.getSCEV(PrevIV); const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); if (!SE.isLoopInvariant(IncExpr, L)) continue; if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { LastIncExpr = IncExpr; break; } } // If we haven't found a chain, create a new one, unless we hit the max. Don't // bother for phi nodes, because they must be last in the chain. if (ChainIdx == NChains) { if (isa<PHINode>(UserInst)) return; if (NChains >= MaxChains && !StressIVChain) { DEBUG(dbgs() << "IV Chain Limit\n"); return; } LastIncExpr = OperExpr; // IVUsers may have skipped over sign/zero extensions. We don't currently // attempt to form chains involving extensions unless they can be hoisted // into this loop's AddRec. if (!isa<SCEVAddRecExpr>(LastIncExpr)) return; ++NChains; IVChainVec.push_back(IVChain(IVInc(UserInst, IVOper, LastIncExpr), OperExprBase)); ChainUsersVec.resize(NChains); DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Head: (" << *UserInst << ") IV=" << *LastIncExpr << "\n"); } else { DEBUG(dbgs() << "IV Chain#" << ChainIdx << " Inc: (" << *UserInst << ") IV+" << *LastIncExpr << "\n"); // Add this IV user to the end of the chain. IVChainVec[ChainIdx].add(IVInc(UserInst, IVOper, LastIncExpr)); } IVChain &Chain = IVChainVec[ChainIdx]; SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers; // This chain's NearUsers become FarUsers. if (!LastIncExpr->isZero()) { ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), NearUsers.end()); NearUsers.clear(); } // All other uses of IVOperand become near uses of the chain. // We currently ignore intermediate values within SCEV expressions, assuming // they will eventually be used be the current chain, or can be computed // from one of the chain increments. To be more precise we could // transitively follow its user and only add leaf IV users to the set. for (User *U : IVOper->users()) { Instruction *OtherUse = dyn_cast<Instruction>(U); if (!OtherUse) continue; // Uses in the chain will no longer be uses if the chain is formed. // Include the head of the chain in this iteration (not Chain.begin()). IVChain::const_iterator IncIter = Chain.Incs.begin(); IVChain::const_iterator IncEnd = Chain.Incs.end(); for( ; IncIter != IncEnd; ++IncIter) { if (IncIter->UserInst == OtherUse) break; } if (IncIter != IncEnd) continue; if (SE.isSCEVable(OtherUse->getType()) && !isa<SCEVUnknown>(SE.getSCEV(OtherUse)) && IU.isIVUserOrOperand(OtherUse)) { continue; } NearUsers.insert(OtherUse); } // Since this user is part of the chain, it's no longer considered a use // of the chain. ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); } /// CollectChains - Populate the vector of Chains. /// /// This decreases ILP at the architecture level. Targets with ample registers, /// multiple memory ports, and no register renaming probably don't want /// this. However, such targets should probably disable LSR altogether. /// /// The job of LSR is to make a reasonable choice of induction variables across /// the loop. Subsequent passes can easily "unchain" computation exposing more /// ILP *within the loop* if the target wants it. /// /// Finding the best IV chain is potentially a scheduling problem. Since LSR /// will not reorder memory operations, it will recognize this as a chain, but /// will generate redundant IV increments. Ideally this would be corrected later /// by a smart scheduler: /// = A[i] /// = A[i+x] /// A[i] = /// A[i+x] = /// /// TODO: Walk the entire domtree within this loop, not just the path to the /// loop latch. This will discover chains on side paths, but requires /// maintaining multiple copies of the Chains state. void LSRInstance::CollectChains() { DEBUG(dbgs() << "Collecting IV Chains.\n"); SmallVector<ChainUsers, 8> ChainUsersVec; SmallVector<BasicBlock *,8> LatchPath; BasicBlock *LoopHeader = L->getHeader(); for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { LatchPath.push_back(Rung->getBlock()); } LatchPath.push_back(LoopHeader); // Walk the instruction stream from the loop header to the loop latch. for (SmallVectorImpl<BasicBlock *>::reverse_iterator BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend(); BBIter != BBEnd; ++BBIter) { for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end(); I != E; ++I) { // Skip instructions that weren't seen by IVUsers analysis. if (isa<PHINode>(I) || !IU.isIVUserOrOperand(I)) continue; // Ignore users that are part of a SCEV expression. This way we only // consider leaf IV Users. This effectively rediscovers a portion of // IVUsers analysis but in program order this time. if (SE.isSCEVable(I->getType()) && !isa<SCEVUnknown>(SE.getSCEV(I))) continue; // Remove this instruction from any NearUsers set it may be in. for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); ChainIdx < NChains; ++ChainIdx) { ChainUsersVec[ChainIdx].NearUsers.erase(I); } // Search for operands that can be chained. SmallPtrSet<Instruction*, 4> UniqueOperands; User::op_iterator IVOpEnd = I->op_end(); User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE); while (IVOpIter != IVOpEnd) { Instruction *IVOpInst = cast<Instruction>(*IVOpIter); if (UniqueOperands.insert(IVOpInst).second) ChainInstruction(I, IVOpInst, ChainUsersVec); IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); } } // Continue walking down the instructions. } // Continue walking down the domtree. // Visit phi backedges to determine if the chain can generate the IV postinc. for (BasicBlock::iterator I = L->getHeader()->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) { if (!SE.isSCEVable(PN->getType())) continue; Instruction *IncV = dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); if (IncV) ChainInstruction(PN, IncV, ChainUsersVec); } // Remove any unprofitable chains. unsigned ChainIdx = 0; for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); UsersIdx < NChains; ++UsersIdx) { if (!isProfitableChain(IVChainVec[UsersIdx], ChainUsersVec[UsersIdx].FarUsers, SE, TTI)) continue; // Preserve the chain at UsesIdx. if (ChainIdx != UsersIdx) IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; FinalizeChain(IVChainVec[ChainIdx]); ++ChainIdx; } IVChainVec.resize(ChainIdx); } void LSRInstance::FinalizeChain(IVChain &Chain) { assert(!Chain.Incs.empty() && "empty IV chains are not allowed"); DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n"); for (const IVInc &Inc : Chain) { DEBUG(dbgs() << " Inc: " << Inc.UserInst << "\n"); auto UseI = std::find(Inc.UserInst->op_begin(), Inc.UserInst->op_end(), Inc.IVOperand); assert(UseI != Inc.UserInst->op_end() && "cannot find IV operand"); IVIncSet.insert(UseI); } } /// Return true if the IVInc can be folded into an addressing mode. static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, Value *Operand, const TargetTransformInfo &TTI) { const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); if (!IncConst || !isAddressUse(UserInst, Operand)) return false; if (IncConst->getValue()->getValue().getMinSignedBits() > 64) return false; int64_t IncOffset = IncConst->getValue()->getSExtValue(); if (!isAlwaysFoldable(TTI, LSRUse::Address, getAccessType(UserInst), /*BaseGV=*/ nullptr, IncOffset, /*HaseBaseReg=*/ false)) return false; return true; } /// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to /// materialize the IV user's operand from the previous IV user's operand. void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) { // Find the new IVOperand for the head of the chain. It may have been replaced // by LSR. const IVInc &Head = Chain.Incs[0]; User::op_iterator IVOpEnd = Head.UserInst->op_end(); // findIVOperand returns IVOpEnd if it can no longer find a valid IV user. User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), IVOpEnd, L, SE); Value *IVSrc = nullptr; while (IVOpIter != IVOpEnd) { IVSrc = getWideOperand(*IVOpIter); // If this operand computes the expression that the chain needs, we may use // it. (Check this after setting IVSrc which is used below.) // // Note that if Head.IncExpr is wider than IVSrc, then this phi is too // narrow for the chain, so we can no longer use it. We do allow using a // wider phi, assuming the LSR checked for free truncation. In that case we // should already have a truncate on this operand such that // getSCEV(IVSrc) == IncExpr. if (SE.getSCEV(*IVOpIter) == Head.IncExpr || SE.getSCEV(IVSrc) == Head.IncExpr) { break; } IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); } if (IVOpIter == IVOpEnd) { // Gracefully give up on this chain. DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); return; } DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); Type *IVTy = IVSrc->getType(); Type *IntTy = SE.getEffectiveSCEVType(IVTy); const SCEV *LeftOverExpr = nullptr; for (const IVInc &Inc : Chain) { Instruction *InsertPt = Inc.UserInst; if (isa<PHINode>(InsertPt)) InsertPt = L->getLoopLatch()->getTerminator(); // IVOper will replace the current IV User's operand. IVSrc is the IV // value currently held in a register. Value *IVOper = IVSrc; if (!Inc.IncExpr->isZero()) { // IncExpr was the result of subtraction of two narrow values, so must // be signed. const SCEV *IncExpr = SE.getNoopOrSignExtend(Inc.IncExpr, IntTy); LeftOverExpr = LeftOverExpr ? SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; } if (LeftOverExpr && !LeftOverExpr->isZero()) { // Expand the IV increment. Rewriter.clearPostInc(); Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), SE.getUnknown(IncV)); IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); // If an IV increment can't be folded, use it as the next IV value. if (!canFoldIVIncExpr(LeftOverExpr, Inc.UserInst, Inc.IVOperand, TTI)) { assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); IVSrc = IVOper; LeftOverExpr = nullptr; } } Type *OperTy = Inc.IVOperand->getType(); if (IVTy != OperTy) { assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && "cannot extend a chained IV"); IRBuilder<> Builder(InsertPt); IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); } Inc.UserInst->replaceUsesOfWith(Inc.IVOperand, IVOper); DeadInsts.emplace_back(Inc.IVOperand); } // If LSR created a new, wider phi, we may also replace its postinc. We only // do this if we also found a wide value for the head of the chain. if (isa<PHINode>(Chain.tailUserInst())) { for (BasicBlock::iterator I = L->getHeader()->begin(); PHINode *Phi = dyn_cast<PHINode>(I); ++I) { if (!isCompatibleIVType(Phi, IVSrc)) continue; Instruction *PostIncV = dyn_cast<Instruction>( Phi->getIncomingValueForBlock(L->getLoopLatch())); if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) continue; Value *IVOper = IVSrc; Type *PostIncTy = PostIncV->getType(); if (IVTy != PostIncTy) { assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); } Phi->replaceUsesOfWith(PostIncV, IVOper); DeadInsts.emplace_back(PostIncV); } } } void LSRInstance::CollectFixupsAndInitialFormulae() { for (const IVStrideUse &U : IU) { Instruction *UserInst = U.getUser(); // Skip IV users that are part of profitable IV Chains. User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(), U.getOperandValToReplace()); assert(UseI != UserInst->op_end() && "cannot find IV operand"); if (IVIncSet.count(UseI)) continue; // Record the uses. LSRFixup &LF = getNewFixup(); LF.UserInst = UserInst; LF.OperandValToReplace = U.getOperandValToReplace(); LF.PostIncLoops = U.getPostIncLoops(); LSRUse::KindType Kind = LSRUse::Basic; Type *AccessTy = nullptr; if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { Kind = LSRUse::Address; AccessTy = getAccessType(LF.UserInst); } const SCEV *S = IU.getExpr(U); // Equality (== and !=) ICmps are special. We can rewrite (i == N) as // (N - i == 0), and this allows (N - i) to be the expression that we work // with rather than just N or i, so we can consider the register // requirements for both N and i at the same time. Limiting this code to // equality icmps is not a problem because all interesting loops use // equality icmps, thanks to IndVarSimplify. if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) if (CI->isEquality()) { // Swap the operands if needed to put the OperandValToReplace on the // left, for consistency. Value *NV = CI->getOperand(1); if (NV == LF.OperandValToReplace) { CI->setOperand(1, CI->getOperand(0)); CI->setOperand(0, NV); NV = CI->getOperand(1); Changed = true; } // x == y --> x - y == 0 const SCEV *N = SE.getSCEV(NV); if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) { // S is normalized, so normalize N before folding it into S // to keep the result normalized. N = TransformForPostIncUse(Normalize, N, CI, nullptr, LF.PostIncLoops, SE, DT); Kind = LSRUse::ICmpZero; S = SE.getMinusSCEV(N, S); } // -1 and the negations of all interesting strides (except the negation // of -1) are now also interesting. for (size_t i = 0, e = Factors.size(); i != e; ++i) if (Factors[i] != -1) Factors.insert(-(uint64_t)Factors[i]); Factors.insert(-1); } // Set up the initial formula for this use. std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); LF.LUIdx = P.first; LF.Offset = P.second; LSRUse &LU = Uses[LF.LUIdx]; LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); if (!LU.WidestFixupType || SE.getTypeSizeInBits(LU.WidestFixupType) < SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) LU.WidestFixupType = LF.OperandValToReplace->getType(); // If this is the first use of this LSRUse, give it a formula. if (LU.Formulae.empty()) { InsertInitialFormula(S, LU, LF.LUIdx); CountRegisters(LU.Formulae.back(), LF.LUIdx); } } DEBUG(print_fixups(dbgs())); } /// InsertInitialFormula - Insert a formula for the given expression into /// the given use, separating out loop-variant portions from loop-invariant /// and loop-computable portions. void LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { // Mark uses whose expressions cannot be expanded. if (!isSafeToExpand(S, SE)) LU.RigidFormula = true; Formula F; F.InitialMatch(S, L, SE); bool Inserted = InsertFormula(LU, LUIdx, F); assert(Inserted && "Initial formula already exists!"); (void)Inserted; } /// InsertSupplementalFormula - Insert a simple single-register formula for /// the given expression into the given use. void LSRInstance::InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { Formula F; F.BaseRegs.push_back(S); F.HasBaseReg = true; bool Inserted = InsertFormula(LU, LUIdx, F); assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; } /// CountRegisters - Note which registers are used by the given formula, /// updating RegUses. void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { if (F.ScaledReg) RegUses.CountRegister(F.ScaledReg, LUIdx); for (const SCEV *BaseReg : F.BaseRegs) RegUses.CountRegister(BaseReg, LUIdx); } /// InsertFormula - If the given formula has not yet been inserted, add it to /// the list, and return true. Return false otherwise. bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { // Do not insert formula that we will not be able to expand. assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && "Formula is illegal"); if (!LU.InsertFormula(F)) return false; CountRegisters(F, LUIdx); return true; } /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of /// loop-invariant values which we're tracking. These other uses will pin these /// values in registers, making them less profitable for elimination. /// TODO: This currently misses non-constant addrec step registers. /// TODO: Should this give more weight to users inside the loop? void LSRInstance::CollectLoopInvariantFixupsAndFormulae() { SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); SmallPtrSet<const SCEV *, 32> Visited; while (!Worklist.empty()) { const SCEV *S = Worklist.pop_back_val(); // Don't process the same SCEV twice if (!Visited.insert(S).second) continue; if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) Worklist.append(N->op_begin(), N->op_end()); else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) Worklist.push_back(C->getOperand()); else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { Worklist.push_back(D->getLHS()); Worklist.push_back(D->getRHS()); } else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) { const Value *V = US->getValue(); if (const Instruction *Inst = dyn_cast<Instruction>(V)) { // Look for instructions defined outside the loop. if (L->contains(Inst)) continue; } else if (isa<UndefValue>(V)) // Undef doesn't have a live range, so it doesn't matter. continue; for (const Use &U : V->uses()) { const Instruction *UserInst = dyn_cast<Instruction>(U.getUser()); // Ignore non-instructions. if (!UserInst) continue; // Ignore instructions in other functions (as can happen with // Constants). if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) continue; // Ignore instructions not dominated by the loop. const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? UserInst->getParent() : cast<PHINode>(UserInst)->getIncomingBlock( PHINode::getIncomingValueNumForOperand(U.getOperandNo())); if (!DT.dominates(L->getHeader(), UseBB)) continue; // Ignore uses which are part of other SCEV expressions, to avoid // analyzing them multiple times. if (SE.isSCEVable(UserInst->getType())) { const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); // If the user is a no-op, look through to its uses. if (!isa<SCEVUnknown>(UserS)) continue; if (UserS == US) { Worklist.push_back( SE.getUnknown(const_cast<Instruction *>(UserInst))); continue; } } // Ignore icmp instructions which are already being analyzed. if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { unsigned OtherIdx = !U.getOperandNo(); Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) continue; } LSRFixup &LF = getNewFixup(); LF.UserInst = const_cast<Instruction *>(UserInst); LF.OperandValToReplace = U; std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, nullptr); LF.LUIdx = P.first; LF.Offset = P.second; LSRUse &LU = Uses[LF.LUIdx]; LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); if (!LU.WidestFixupType || SE.getTypeSizeInBits(LU.WidestFixupType) < SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) LU.WidestFixupType = LF.OperandValToReplace->getType(); InsertSupplementalFormula(US, LU, LF.LUIdx); CountRegisters(LU.Formulae.back(), Uses.size() - 1); break; } } } } /// CollectSubexprs - Split S into subexpressions which can be pulled out into /// separate registers. If C is non-null, multiply each subexpression by C. /// /// Return remainder expression after factoring the subexpressions captured by /// Ops. If Ops is complete, return NULL. static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C, SmallVectorImpl<const SCEV *> &Ops, const Loop *L, ScalarEvolution &SE, unsigned Depth = 0) { // Arbitrarily cap recursion to protect compile time. if (Depth >= 3) return S; if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { // Break out add operands. for (const SCEV *S : Add->operands()) { const SCEV *Remainder = CollectSubexprs(S, C, Ops, L, SE, Depth+1); if (Remainder) Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); } return nullptr; } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { // Split a non-zero base out of an addrec. if (AR->getStart()->isZero()) return S; const SCEV *Remainder = CollectSubexprs(AR->getStart(), C, Ops, L, SE, Depth+1); // Split the non-zero AddRec unless it is part of a nested recurrence that // does not pertain to this loop. if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) { Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder); Remainder = nullptr; } if (Remainder != AR->getStart()) { if (!Remainder) Remainder = SE.getConstant(AR->getType(), 0); return SE.getAddRecExpr(Remainder, AR->getStepRecurrence(SE), AR->getLoop(), //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) SCEV::FlagAnyWrap); } } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { // Break (C * (a + b + c)) into C*a + C*b + C*c. if (Mul->getNumOperands() != 2) return S; if (const SCEVConstant *Op0 = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { C = C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0; const SCEV *Remainder = CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1); if (Remainder) Ops.push_back(SE.getMulExpr(C, Remainder)); return nullptr; } } return S; } /// \brief Helper function for LSRInstance::GenerateReassociations. void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx, const Formula &Base, unsigned Depth, size_t Idx, bool IsScaledReg) { const SCEV *BaseReg = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; SmallVector<const SCEV *, 8> AddOps; const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE); if (Remainder) AddOps.push_back(Remainder); if (AddOps.size() == 1) return; for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), JE = AddOps.end(); J != JE; ++J) { // Loop-variant "unknown" values are uninteresting; we won't be able to // do anything meaningful with them. if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) continue; // Don't pull a constant into a register if the constant could be folded // into an immediate field. if (isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, *J, Base.getNumRegs() > 1)) continue; // Collect all operands except *J. SmallVector<const SCEV *, 8> InnerAddOps( ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); InnerAddOps.append(std::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end()); // Don't leave just a constant behind in a register if the constant could // be folded into an immediate field. if (InnerAddOps.size() == 1 && isAlwaysFoldable(TTI, SE, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, InnerAddOps[0], Base.getNumRegs() > 1)) continue; const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); if (InnerSum->isZero()) continue; Formula F = Base; // Add the remaining pieces of the add back into the new formula. const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); if (InnerSumSC && SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue())) { F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + InnerSumSC->getValue()->getZExtValue(); if (IsScaledReg) F.ScaledReg = nullptr; else F.BaseRegs.erase(F.BaseRegs.begin() + Idx); } else if (IsScaledReg) F.ScaledReg = InnerSum; else F.BaseRegs[Idx] = InnerSum; // Add J as its own register, or an unfolded immediate. const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); if (SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && TTI.isLegalAddImmediate((uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue())) F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + SC->getValue()->getZExtValue(); else F.BaseRegs.push_back(*J); // We may have changed the number of register in base regs, adjust the // formula accordingly. F.Canonicalize(); if (InsertFormula(LU, LUIdx, F)) // If that formula hadn't been seen before, recurse to find more like // it. GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth + 1); } } /// GenerateReassociations - Split out subexpressions from adds and the bases of /// addrecs. void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, unsigned Depth) { assert(Base.isCanonical() && "Input must be in the canonical form"); // Arbitrarily cap recursion to protect compile time. if (Depth >= 3) return; for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) GenerateReassociationsImpl(LU, LUIdx, Base, Depth, i); if (Base.Scale == 1) GenerateReassociationsImpl(LU, LUIdx, Base, Depth, /* Idx */ -1, /* IsScaledReg */ true); } /// GenerateCombinations - Generate a formula consisting of all of the /// loop-dominating registers added into a single register. void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base) { // This method is only interesting on a plurality of registers. if (Base.BaseRegs.size() + (Base.Scale == 1) <= 1) return; // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before // processing the formula. Base.Unscale(); Formula F = Base; F.BaseRegs.clear(); SmallVector<const SCEV *, 4> Ops; for (const SCEV *BaseReg : Base.BaseRegs) { if (SE.properlyDominates(BaseReg, L->getHeader()) && !SE.hasComputableLoopEvolution(BaseReg, L)) Ops.push_back(BaseReg); else F.BaseRegs.push_back(BaseReg); } if (Ops.size() > 1) { const SCEV *Sum = SE.getAddExpr(Ops); // TODO: If Sum is zero, it probably means ScalarEvolution missed an // opportunity to fold something. For now, just ignore such cases // rather than proceed with zero in a register. if (!Sum->isZero()) { F.BaseRegs.push_back(Sum); F.Canonicalize(); (void)InsertFormula(LU, LUIdx, F); } } } /// \brief Helper function for LSRInstance::GenerateSymbolicOffsets. void LSRInstance::GenerateSymbolicOffsetsImpl(LSRUse &LU, unsigned LUIdx, const Formula &Base, size_t Idx, bool IsScaledReg) { const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; GlobalValue *GV = ExtractSymbol(G, SE); if (G->isZero() || !GV) return; Formula F = Base; F.BaseGV = GV; if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) return; if (IsScaledReg) F.ScaledReg = G; else F.BaseRegs[Idx] = G; (void)InsertFormula(LU, LUIdx, F); } /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base) { // We can't add a symbolic offset if the address already contains one. if (Base.BaseGV) return; for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, i); if (Base.Scale == 1) GenerateSymbolicOffsetsImpl(LU, LUIdx, Base, /* Idx */ -1, /* IsScaledReg */ true); } /// \brief Helper function for LSRInstance::GenerateConstantOffsets. void LSRInstance::GenerateConstantOffsetsImpl( LSRUse &LU, unsigned LUIdx, const Formula &Base, const SmallVectorImpl<int64_t> &Worklist, size_t Idx, bool IsScaledReg) { const SCEV *G = IsScaledReg ? Base.ScaledReg : Base.BaseRegs[Idx]; for (int64_t Offset : Worklist) { Formula F = Base; F.BaseOffset = (uint64_t)Base.BaseOffset - Offset; if (isLegalUse(TTI, LU.MinOffset - Offset, LU.MaxOffset - Offset, LU.Kind, LU.AccessTy, F)) { // Add the offset to the base register. const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), Offset), G); // If it cancelled out, drop the base register, otherwise update it. if (NewG->isZero()) { if (IsScaledReg) { F.Scale = 0; F.ScaledReg = nullptr; } else F.DeleteBaseReg(F.BaseRegs[Idx]); F.Canonicalize(); } else if (IsScaledReg) F.ScaledReg = NewG; else F.BaseRegs[Idx] = NewG; (void)InsertFormula(LU, LUIdx, F); } } int64_t Imm = ExtractImmediate(G, SE); if (G->isZero() || Imm == 0) return; Formula F = Base; F.BaseOffset = (uint64_t)F.BaseOffset + Imm; if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F)) return; if (IsScaledReg) F.ScaledReg = G; else F.BaseRegs[Idx] = G; (void)InsertFormula(LU, LUIdx, F); } /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base) { // TODO: For now, just add the min and max offset, because it usually isn't // worthwhile looking at everything inbetween. SmallVector<int64_t, 2> Worklist; Worklist.push_back(LU.MinOffset); if (LU.MaxOffset != LU.MinOffset) Worklist.push_back(LU.MaxOffset); for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, i); if (Base.Scale == 1) GenerateConstantOffsetsImpl(LU, LUIdx, Base, Worklist, /* Idx */ -1, /* IsScaledReg */ true); } /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up /// the comparison. For example, x == y -> x*c == y*c. void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base) { if (LU.Kind != LSRUse::ICmpZero) return; // Determine the integer type for the base formula. Type *IntTy = Base.getType(); if (!IntTy) return; if (SE.getTypeSizeInBits(IntTy) > 64) return; // Don't do this if there is more than one offset. if (LU.MinOffset != LU.MaxOffset) return; assert(!Base.BaseGV && "ICmpZero use is not legal!"); // Check each interesting stride. for (int64_t Factor : Factors) { // Check that the multiplication doesn't overflow. if (Base.BaseOffset == INT64_MIN && Factor == -1) continue; int64_t NewBaseOffset = (uint64_t)Base.BaseOffset * Factor; if (NewBaseOffset / Factor != Base.BaseOffset) continue; // If the offset will be truncated at this use, check that it is in bounds. if (!IntTy->isPointerTy() && !ConstantInt::isValueValidForType(IntTy, NewBaseOffset)) continue; // Check that multiplying with the use offset doesn't overflow. int64_t Offset = LU.MinOffset; if (Offset == INT64_MIN && Factor == -1) continue; Offset = (uint64_t)Offset * Factor; if (Offset / Factor != LU.MinOffset) continue; // If the offset will be truncated at this use, check that it is in bounds. if (!IntTy->isPointerTy() && !ConstantInt::isValueValidForType(IntTy, Offset)) continue; Formula F = Base; F.BaseOffset = NewBaseOffset; // Check that this scale is legal. if (!isLegalUse(TTI, Offset, Offset, LU.Kind, LU.AccessTy, F)) continue; // Compensate for the use having MinOffset built into it. F.BaseOffset = (uint64_t)F.BaseOffset + Offset - LU.MinOffset; const SCEV *FactorS = SE.getConstant(IntTy, Factor); // Check that multiplying with each base register doesn't overflow. for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) goto next; } // Check that multiplying with the scaled register doesn't overflow. if (F.ScaledReg) { F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) continue; } // Check that multiplying with the unfolded offset doesn't overflow. if (F.UnfoldedOffset != 0) { if (F.UnfoldedOffset == INT64_MIN && Factor == -1) continue; F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) continue; // If the offset will be truncated, check that it is in bounds. if (!IntTy->isPointerTy() && !ConstantInt::isValueValidForType(IntTy, F.UnfoldedOffset)) continue; } // If we make it here and it's legal, add it. (void)InsertFormula(LU, LUIdx, F); next:; } } /// GenerateScales - Generate stride factor reuse formulae by making use of /// scaled-offset address modes, for example. void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { // Determine the integer type for the base formula. Type *IntTy = Base.getType(); if (!IntTy) return; // If this Formula already has a scaled register, we can't add another one. // Try to unscale the formula to generate a better scale. if (Base.Scale != 0 && !Base.Unscale()) return; assert(Base.Scale == 0 && "Unscale did not did its job!"); // Check each interesting stride. for (int64_t Factor : Factors) { Base.Scale = Factor; Base.HasBaseReg = Base.BaseRegs.size() > 1; // Check whether this scale is going to be legal. if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, Base)) { // As a special-case, handle special out-of-loop Basic users specially. // TODO: Reconsider this special case. if (LU.Kind == LSRUse::Basic && isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LSRUse::Special, LU.AccessTy, Base) && LU.AllFixupsOutsideLoop) LU.Kind = LSRUse::Special; else continue; } // For an ICmpZero, negating a solitary base register won't lead to // new solutions. if (LU.Kind == LSRUse::ICmpZero && !Base.HasBaseReg && Base.BaseOffset == 0 && !Base.BaseGV) continue; // For each addrec base reg, apply the scale, if possible. for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { const SCEV *FactorS = SE.getConstant(IntTy, Factor); if (FactorS->isZero()) continue; // Divide out the factor, ignoring high bits, since we'll be // scaling the value back up in the end. if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { // TODO: This could be optimized to avoid all the copying. Formula F = Base; F.ScaledReg = Quotient; F.DeleteBaseReg(F.BaseRegs[i]); // The canonical representation of 1*reg is reg, which is already in // Base. In that case, do not try to insert the formula, it will be // rejected anyway. if (F.Scale == 1 && F.BaseRegs.empty()) continue; (void)InsertFormula(LU, LUIdx, F); } } } } /// GenerateTruncates - Generate reuse formulae from different IV types. void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { // Don't bother truncating symbolic values. if (Base.BaseGV) return; // Determine the integer type for the base formula. Type *DstTy = Base.getType(); if (!DstTy) return; DstTy = SE.getEffectiveSCEVType(DstTy); for (Type *SrcTy : Types) { if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) { Formula F = Base; if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy); for (const SCEV *&BaseReg : F.BaseRegs) BaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy); // TODO: This assumes we've done basic processing on all uses and // have an idea what the register usage is. if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) continue; (void)InsertFormula(LU, LUIdx, F); } } } namespace { /// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to /// defer modifications so that the search phase doesn't have to worry about /// the data structures moving underneath it. struct WorkItem { size_t LUIdx; int64_t Imm; const SCEV *OrigReg; WorkItem(size_t LI, int64_t I, const SCEV *R) : LUIdx(LI), Imm(I), OrigReg(R) {} void print(raw_ostream &OS) const; void dump() const; }; } void WorkItem::print(raw_ostream &OS) const { OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx << " , add offset " << Imm; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void WorkItem::dump() const { print(errs()); errs() << '\n'; } #endif /// GenerateCrossUseConstantOffsets - Look for registers which are a constant /// distance apart and try to form reuse opportunities between them. void LSRInstance::GenerateCrossUseConstantOffsets() { // Group the registers by their value without any added constant offset. typedef std::map<int64_t, const SCEV *> ImmMapTy; DenseMap<const SCEV *, ImmMapTy> Map; DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; SmallVector<const SCEV *, 8> Sequence; for (const SCEV *Use : RegUses) { const SCEV *Reg = Use; // Make a copy for ExtractImmediate to modify. int64_t Imm = ExtractImmediate(Reg, SE); auto Pair = Map.insert(std::make_pair(Reg, ImmMapTy())); if (Pair.second) Sequence.push_back(Reg); Pair.first->second.insert(std::make_pair(Imm, Use)); UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(Use); } // Now examine each set of registers with the same base value. Build up // a list of work to do and do the work in a separate step so that we're // not adding formulae and register counts while we're searching. SmallVector<WorkItem, 32> WorkItems; SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; for (const SCEV *Reg : Sequence) { const ImmMapTy &Imms = Map.find(Reg)->second; // It's not worthwhile looking for reuse if there's only one offset. if (Imms.size() == 1) continue; DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; for (const auto &Entry : Imms) dbgs() << ' ' << Entry.first; dbgs() << '\n'); // Examine each offset. for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); J != JE; ++J) { const SCEV *OrigReg = J->second; int64_t JImm = J->first; const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); if (!isa<SCEVConstant>(OrigReg) && UsedByIndicesMap[Reg].count() == 1) { DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); continue; } // Conservatively examine offsets between this orig reg a few selected // other orig regs. ImmMapTy::const_iterator OtherImms[] = { Imms.begin(), std::prev(Imms.end()), Imms.lower_bound((Imms.begin()->first + std::prev(Imms.end())->first) / 2) }; for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { ImmMapTy::const_iterator M = OtherImms[i]; if (M == J || M == JE) continue; // Compute the difference between the two. int64_t Imm = (uint64_t)JImm - M->first; for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; LUIdx = UsedByIndices.find_next(LUIdx)) // Make a memo of this use, offset, and register tuple. if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second) WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); } } } Map.clear(); Sequence.clear(); UsedByIndicesMap.clear(); UniqueItems.clear(); // Now iterate through the worklist and add new formulae. for (const WorkItem &WI : WorkItems) { size_t LUIdx = WI.LUIdx; LSRUse &LU = Uses[LUIdx]; int64_t Imm = WI.Imm; const SCEV *OrigReg = WI.OrigReg; Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); unsigned BitWidth = SE.getTypeSizeInBits(IntTy); // TODO: Use a more targeted data structure. for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { Formula F = LU.Formulae[L]; // FIXME: The code for the scaled and unscaled registers looks // very similar but slightly different. Investigate if they // could be merged. That way, we would not have to unscale the // Formula. F.Unscale(); // Use the immediate in the scaled register. if (F.ScaledReg == OrigReg) { int64_t Offset = (uint64_t)F.BaseOffset + Imm * (uint64_t)F.Scale; // Don't create 50 + reg(-50). if (F.referencesReg(SE.getSCEV( ConstantInt::get(IntTy, -(uint64_t)Offset)))) continue; Formula NewF = F; NewF.BaseOffset = Offset; if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, NewF)) continue; NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); // If the new scale is a constant in a register, and adding the constant // value to the immediate would produce a value closer to zero than the // immediate itself, then the formula isn't worthwhile. if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) if (C->getValue()->isNegative() != (NewF.BaseOffset < 0) && (C->getValue()->getValue().abs() * APInt(BitWidth, F.Scale)) .ule(std::abs(NewF.BaseOffset))) continue; // OK, looks good. NewF.Canonicalize(); (void)InsertFormula(LU, LUIdx, NewF); } else { // Use the immediate in a base register. for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { const SCEV *BaseReg = F.BaseRegs[N]; if (BaseReg != OrigReg) continue; Formula NewF = F; NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm; if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, NewF)) { if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) continue; NewF = F; NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; } NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); // If the new formula has a constant in a register, and adding the // constant value to the immediate would produce a value closer to // zero than the immediate itself, then the formula isn't worthwhile. for (const SCEV *NewReg : NewF.BaseRegs) if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewReg)) if ((C->getValue()->getValue() + NewF.BaseOffset).abs().slt( std::abs(NewF.BaseOffset)) && (C->getValue()->getValue() + NewF.BaseOffset).countTrailingZeros() >= countTrailingZeros<uint64_t>(NewF.BaseOffset)) goto skip_formula; // Ok, looks good. NewF.Canonicalize(); (void)InsertFormula(LU, LUIdx, NewF); break; skip_formula:; } } } } } /// GenerateAllReuseFormulae - Generate formulae for each use. void LSRInstance::GenerateAllReuseFormulae() { // This is split into multiple loops so that hasRegsUsedByUsesOtherThan // queries are more precise. for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) GenerateReassociations(LU, LUIdx, LU.Formulae[i]); for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) GenerateCombinations(LU, LUIdx, LU.Formulae[i]); } for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) GenerateScales(LU, LUIdx, LU.Formulae[i]); } for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) GenerateTruncates(LU, LUIdx, LU.Formulae[i]); } GenerateCrossUseConstantOffsets(); DEBUG(dbgs() << "\n" "After generating reuse formulae:\n"; print_uses(dbgs())); } /// If there are multiple formulae with the same set of registers used /// by other uses, pick the best one and delete the others. void LSRInstance::FilterOutUndesirableDedicatedRegisters() { DenseSet<const SCEV *> VisitedRegs; SmallPtrSet<const SCEV *, 16> Regs; SmallPtrSet<const SCEV *, 16> LoserRegs; #ifndef NDEBUG bool ChangedFormulae = false; #endif // Collect the best formula for each unique set of shared registers. This // is reset for each use. typedef DenseMap<SmallVector<const SCEV *, 4>, size_t, UniquifierDenseMapInfo> BestFormulaeTy; BestFormulaeTy BestFormulae; for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); bool Any = false; for (size_t FIdx = 0, NumForms = LU.Formulae.size(); FIdx != NumForms; ++FIdx) { Formula &F = LU.Formulae[FIdx]; // Some formulas are instant losers. For example, they may depend on // nonexistent AddRecs from other loops. These need to be filtered // immediately, otherwise heuristics could choose them over others leading // to an unsatisfactory solution. Passing LoserRegs into RateFormula here // avoids the need to recompute this information across formulae using the // same bad AddRec. Passing LoserRegs is also essential unless we remove // the corresponding bad register from the Regs set. Cost CostF; Regs.clear(); CostF.RateFormula(TTI, F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, LU, &LoserRegs); if (CostF.isLoser()) { // During initial formula generation, undesirable formulae are generated // by uses within other loops that have some non-trivial address mode or // use the postinc form of the IV. LSR needs to provide these formulae // as the basis of rediscovering the desired formula that uses an AddRec // corresponding to the existing phi. Once all formulae have been // generated, these initial losers may be pruned. DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); dbgs() << "\n"); } else { SmallVector<const SCEV *, 4> Key; for (const SCEV *Reg : F.BaseRegs) { if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) Key.push_back(Reg); } if (F.ScaledReg && RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) Key.push_back(F.ScaledReg); // Unstable sort by host order ok, because this is only used for // uniquifying. std::sort(Key.begin(), Key.end()); std::pair<BestFormulaeTy::const_iterator, bool> P = BestFormulae.insert(std::make_pair(Key, FIdx)); if (P.second) continue; Formula &Best = LU.Formulae[P.first->second]; Cost CostBest; Regs.clear(); CostBest.RateFormula(TTI, Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT, LU); if (CostF < CostBest) std::swap(F, Best); DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); dbgs() << "\n" " in favor of formula "; Best.print(dbgs()); dbgs() << '\n'); } #ifndef NDEBUG ChangedFormulae = true; #endif LU.DeleteFormula(F); --FIdx; --NumForms; Any = true; } // Now that we've filtered out some formulae, recompute the Regs set. if (Any) LU.RecomputeRegs(LUIdx, RegUses); // Reset this to prepare for the next use. BestFormulae.clear(); } DEBUG(if (ChangedFormulae) { dbgs() << "\n" "After filtering out undesirable candidates:\n"; print_uses(dbgs()); }); } // This is a rough guess that seems to work fairly well. static const size_t ComplexityLimit = UINT16_MAX; /// EstimateSearchSpaceComplexity - Estimate the worst-case number of /// solutions the solver might have to consider. It almost never considers /// this many solutions because it prune the search space, but the pruning /// isn't always sufficient. size_t LSRInstance::EstimateSearchSpaceComplexity() const { size_t Power = 1; for (const LSRUse &LU : Uses) { size_t FSize = LU.Formulae.size(); if (FSize >= ComplexityLimit) { Power = ComplexityLimit; break; } Power *= FSize; if (Power >= ComplexityLimit) break; } return Power; } /// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset /// of the registers of another formula, it won't help reduce register /// pressure (though it may not necessarily hurt register pressure); remove /// it to simplify the system. void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { DEBUG(dbgs() << "The search space is too complex.\n"); DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " "which use a superset of registers used by other " "formulae.\n"); for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; bool Any = false; for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { Formula &F = LU.Formulae[i]; // Look for a formula with a constant or GV in a register. If the use // also has a formula with that same value in an immediate field, // delete the one that uses a register. for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { Formula NewF = F; NewF.BaseOffset += C->getValue()->getSExtValue(); NewF.BaseRegs.erase(NewF.BaseRegs.begin() + (I - F.BaseRegs.begin())); if (LU.HasFormulaWithSameRegs(NewF)) { DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); LU.DeleteFormula(F); --i; --e; Any = true; break; } } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) if (!F.BaseGV) { Formula NewF = F; NewF.BaseGV = GV; NewF.BaseRegs.erase(NewF.BaseRegs.begin() + (I - F.BaseRegs.begin())); if (LU.HasFormulaWithSameRegs(NewF)) { DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); LU.DeleteFormula(F); --i; --e; Any = true; break; } } } } } if (Any) LU.RecomputeRegs(LUIdx, RegUses); } DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } } /// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers /// for expressions like A, A+1, A+2, etc., allocate a single register for /// them. void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { if (EstimateSearchSpaceComplexity() < ComplexityLimit) return; DEBUG(dbgs() << "The search space is too complex.\n" "Narrowing the search space by assuming that uses separated " "by a constant offset will use the same registers.\n"); // This is especially useful for unrolled loops. for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; for (const Formula &F : LU.Formulae) { if (F.BaseOffset == 0 || (F.Scale != 0 && F.Scale != 1)) continue; LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU); if (!LUThatHas) continue; if (!reconcileNewOffset(*LUThatHas, F.BaseOffset, /*HasBaseReg=*/ false, LU.Kind, LU.AccessTy)) continue; DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n'); LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; // Update the relocs to reference the new use. for (LSRFixup &Fixup : Fixups) { if (Fixup.LUIdx == LUIdx) { Fixup.LUIdx = LUThatHas - &Uses.front(); Fixup.Offset += F.BaseOffset; // Add the new offset to LUThatHas' offset list. if (LUThatHas->Offsets.back() != Fixup.Offset) { LUThatHas->Offsets.push_back(Fixup.Offset); if (Fixup.Offset > LUThatHas->MaxOffset) LUThatHas->MaxOffset = Fixup.Offset; if (Fixup.Offset < LUThatHas->MinOffset) LUThatHas->MinOffset = Fixup.Offset; } DEBUG(dbgs() << "New fixup has offset " << Fixup.Offset << '\n'); } if (Fixup.LUIdx == NumUses-1) Fixup.LUIdx = LUIdx; } // Delete formulae from the new use which are no longer legal. bool Any = false; for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { Formula &F = LUThatHas->Formulae[i]; if (!isLegalUse(TTI, LUThatHas->MinOffset, LUThatHas->MaxOffset, LUThatHas->Kind, LUThatHas->AccessTy, F)) { DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); LUThatHas->DeleteFormula(F); --i; --e; Any = true; } } if (Any) LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); // Delete the old use. DeleteUse(LU, LUIdx); --LUIdx; --NumUses; break; } } DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call /// FilterOutUndesirableDedicatedRegisters again, if necessary, now that /// we've done more filtering, as it may be able to find more formulae to /// eliminate. void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { DEBUG(dbgs() << "The search space is too complex.\n"); DEBUG(dbgs() << "Narrowing the search space by re-filtering out " "undesirable dedicated registers.\n"); FilterOutUndesirableDedicatedRegisters(); DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } } /// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely /// to be profitable, and then in any use which has any reference to that /// register, delete all formulae which do not reference that register. void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { // With all other options exhausted, loop until the system is simple // enough to handle. SmallPtrSet<const SCEV *, 4> Taken; while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { // Ok, we have too many of formulae on our hands to conveniently handle. // Use a rough heuristic to thin out the list. DEBUG(dbgs() << "The search space is too complex.\n"); // Pick the register which is used by the most LSRUses, which is likely // to be a good reuse register candidate. const SCEV *Best = nullptr; unsigned BestNum = 0; for (const SCEV *Reg : RegUses) { if (Taken.count(Reg)) continue; if (!Best) Best = Reg; else { unsigned Count = RegUses.getUsedByIndices(Reg).count(); if (Count > BestNum) { Best = Reg; BestNum = Count; } } } DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best << " will yield profitable reuse.\n"); Taken.insert(Best); // In any use with formulae which references this register, delete formulae // which don't reference it. for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { LSRUse &LU = Uses[LUIdx]; if (!LU.Regs.count(Best)) continue; bool Any = false; for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { Formula &F = LU.Formulae[i]; if (!F.referencesReg(Best)) { DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); LU.DeleteFormula(F); --e; --i; Any = true; assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); continue; } } if (Any) LU.RecomputeRegs(LUIdx, RegUses); } DEBUG(dbgs() << "After pre-selection:\n"; print_uses(dbgs())); } } /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of /// formulae to choose from, use some rough heuristics to prune down the number /// of formulae. This keeps the main solver from taking an extraordinary amount /// of time in some worst-case scenarios. void LSRInstance::NarrowSearchSpaceUsingHeuristics() { NarrowSearchSpaceByDetectingSupersets(); NarrowSearchSpaceByCollapsingUnrolledCode(); NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); NarrowSearchSpaceByPickingWinnerRegs(); } /// SolveRecurse - This is the recursive solver. void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, Cost &SolutionCost, SmallVectorImpl<const Formula *> &Workspace, const Cost &CurCost, const SmallPtrSet<const SCEV *, 16> &CurRegs, DenseSet<const SCEV *> &VisitedRegs) const { // Some ideas: // - prune more: // - use more aggressive filtering // - sort the formula so that the most profitable solutions are found first // - sort the uses too // - search faster: // - don't compute a cost, and then compare. compare while computing a cost // and bail early. // - track register sets with SmallBitVector const LSRUse &LU = Uses[Workspace.size()]; // If this use references any register that's already a part of the // in-progress solution, consider it a requirement that a formula must // reference that register in order to be considered. This prunes out // unprofitable searching. SmallSetVector<const SCEV *, 4> ReqRegs; for (const SCEV *S : CurRegs) if (LU.Regs.count(S)) ReqRegs.insert(S); SmallPtrSet<const SCEV *, 16> NewRegs; Cost NewCost; for (const Formula &F : LU.Formulae) { // Ignore formulae which may not be ideal in terms of register reuse of // ReqRegs. The formula should use all required registers before // introducing new ones. int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size()); for (const SCEV *Reg : ReqRegs) { if ((F.ScaledReg && F.ScaledReg == Reg) || std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) != F.BaseRegs.end()) { --NumReqRegsToFind; if (NumReqRegsToFind == 0) break; } } if (NumReqRegsToFind != 0) { // If none of the formulae satisfied the required registers, then we could // clear ReqRegs and try again. Currently, we simply give up in this case. continue; } // Evaluate the cost of the current formula. If it's already worse than // the current best, prune the search at that point. NewCost = CurCost; NewRegs = CurRegs; NewCost.RateFormula(TTI, F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT, LU); if (NewCost < SolutionCost) { Workspace.push_back(&F); if (Workspace.size() != Uses.size()) { SolveRecurse(Solution, SolutionCost, Workspace, NewCost, NewRegs, VisitedRegs); if (F.getNumRegs() == 1 && Workspace.size() == 1) VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); } else { DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); dbgs() << ".\n Regs:"; for (const SCEV *S : NewRegs) dbgs() << ' ' << *S; dbgs() << '\n'); SolutionCost = NewCost; Solution = Workspace; } Workspace.pop_back(); } } } /// Solve - Choose one formula from each use. Return the results in the given /// Solution vector. void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { SmallVector<const Formula *, 8> Workspace; Cost SolutionCost; SolutionCost.Lose(); Cost CurCost; SmallPtrSet<const SCEV *, 16> CurRegs; DenseSet<const SCEV *> VisitedRegs; Workspace.reserve(Uses.size()); // SolveRecurse does all the work. SolveRecurse(Solution, SolutionCost, Workspace, CurCost, CurRegs, VisitedRegs); if (Solution.empty()) { DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); return; } // Ok, we've now made all our decisions. DEBUG(dbgs() << "\n" "The chosen solution requires "; SolutionCost.print(dbgs()); dbgs() << ":\n"; for (size_t i = 0, e = Uses.size(); i != e; ++i) { dbgs() << " "; Uses[i].print(dbgs()); dbgs() << "\n" " "; Solution[i]->print(dbgs()); dbgs() << '\n'; }); assert(Solution.size() == Uses.size() && "Malformed solution!"); } /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up /// the dominator tree far as we can go while still being dominated by the /// input positions. This helps canonicalize the insert position, which /// encourages sharing. BasicBlock::iterator LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, const SmallVectorImpl<Instruction *> &Inputs) const { for (;;) { const Loop *IPLoop = LI.getLoopFor(IP->getParent()); unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; BasicBlock *IDom; for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { if (!Rung) return IP; Rung = Rung->getIDom(); if (!Rung) return IP; IDom = Rung->getBlock(); // Don't climb into a loop though. const Loop *IDomLoop = LI.getLoopFor(IDom); unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; if (IDomDepth <= IPLoopDepth && (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) break; } bool AllDominate = true; Instruction *BetterPos = nullptr; Instruction *Tentative = IDom->getTerminator(); for (Instruction *Inst : Inputs) { if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { AllDominate = false; break; } // Attempt to find an insert position in the middle of the block, // instead of at the end, so that it can be used for other expansions. if (IDom == Inst->getParent() && (!BetterPos || !DT.dominates(Inst, BetterPos))) BetterPos = std::next(BasicBlock::iterator(Inst)); } if (!AllDominate) break; if (BetterPos) IP = BetterPos; else IP = Tentative; } return IP; } /// AdjustInsertPositionForExpand - Determine an input position which will be /// dominated by the operands and which will dominate the result. BasicBlock::iterator LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, const LSRFixup &LF, const LSRUse &LU, SCEVExpander &Rewriter) const { // Collect some instructions which must be dominated by the // expanding replacement. These must be dominated by any operands that // will be required in the expansion. SmallVector<Instruction *, 4> Inputs; if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) Inputs.push_back(I); if (LU.Kind == LSRUse::ICmpZero) if (Instruction *I = dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) Inputs.push_back(I); if (LF.PostIncLoops.count(L)) { if (LF.isUseFullyOutsideLoop(L)) Inputs.push_back(L->getLoopLatch()->getTerminator()); else Inputs.push_back(IVIncInsertPos); } // The expansion must also be dominated by the increment positions of any // loops it for which it is using post-inc mode. for (const Loop *PIL : LF.PostIncLoops) { if (PIL == L) continue; // Be dominated by the loop exit. SmallVector<BasicBlock *, 4> ExitingBlocks; PIL->getExitingBlocks(ExitingBlocks); if (!ExitingBlocks.empty()) { BasicBlock *BB = ExitingBlocks[0]; for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); Inputs.push_back(BB->getTerminator()); } } assert(!isa<PHINode>(LowestIP) && !isa<LandingPadInst>(LowestIP) && !isa<DbgInfoIntrinsic>(LowestIP) && "Insertion point must be a normal instruction"); // Then, climb up the immediate dominator tree as far as we can go while // still being dominated by the input positions. BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); // Don't insert instructions before PHI nodes. while (isa<PHINode>(IP)) ++IP; // Ignore landingpad instructions. while (isa<LandingPadInst>(IP)) ++IP; // Ignore debug intrinsics. while (isa<DbgInfoIntrinsic>(IP)) ++IP; // Set IP below instructions recently inserted by SCEVExpander. This keeps the // IP consistent across expansions and allows the previously inserted // instructions to be reused by subsequent expansion. while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP; return IP; } /// Expand - Emit instructions for the leading candidate expression for this /// LSRUse (this is called "expanding"). Value *LSRInstance::Expand(const LSRFixup &LF, const Formula &F, BasicBlock::iterator IP, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts) const { const LSRUse &LU = Uses[LF.LUIdx]; if (LU.RigidFormula) return LF.OperandValToReplace; // Determine an input position which will be dominated by the operands and // which will dominate the result. IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); // Inform the Rewriter if we have a post-increment use, so that it can // perform an advantageous expansion. Rewriter.setPostInc(LF.PostIncLoops); // This is the type that the user actually needs. Type *OpTy = LF.OperandValToReplace->getType(); // This will be the type that we'll initially expand to. Type *Ty = F.getType(); if (!Ty) // No type known; just expand directly to the ultimate type. Ty = OpTy; else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) // Expand directly to the ultimate type if it's the right size. Ty = OpTy; // This is the type to do integer arithmetic in. Type *IntTy = SE.getEffectiveSCEVType(Ty); // Build up a list of operands to add together to form the full base. SmallVector<const SCEV *, 8> Ops; // Expand the BaseRegs portion. for (const SCEV *Reg : F.BaseRegs) { assert(!Reg->isZero() && "Zero allocated in a base register!"); // If we're expanding for a post-inc user, make the post-inc adjustment. PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); Reg = TransformForPostIncUse(Denormalize, Reg, LF.UserInst, LF.OperandValToReplace, Loops, SE, DT); Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, IP))); } // Expand the ScaledReg portion. Value *ICmpScaledV = nullptr; if (F.Scale != 0) { const SCEV *ScaledS = F.ScaledReg; // If we're expanding for a post-inc user, make the post-inc adjustment. PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); ScaledS = TransformForPostIncUse(Denormalize, ScaledS, LF.UserInst, LF.OperandValToReplace, Loops, SE, DT); if (LU.Kind == LSRUse::ICmpZero) { // Expand ScaleReg as if it was part of the base regs. if (F.Scale == 1) Ops.push_back( SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP))); else { // An interesting way of "folding" with an icmp is to use a negated // scale, which we'll implement by inserting it into the other operand // of the icmp. assert(F.Scale == -1 && "The only scale supported by ICmpZero uses is -1!"); ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, IP); } } else { // Otherwise just expand the scaled register and an explicit scale, // which is expected to be matched as part of the address. // Flush the operand list to suppress SCEVExpander hoisting address modes. // Unless the addressing mode will not be folded. if (!Ops.empty() && LU.Kind == LSRUse::Address && isAMCompletelyFolded(TTI, LU, F)) { Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP)); if (F.Scale != 1) ScaledS = SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale)); Ops.push_back(ScaledS); } } // Expand the GV portion. if (F.BaseGV) { // Flush the operand list to suppress SCEVExpander hoisting. if (!Ops.empty()) { Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } Ops.push_back(SE.getUnknown(F.BaseGV)); } // Flush the operand list to suppress SCEVExpander hoisting of both folded and // unfolded offsets. LSR assumes they both live next to their uses. if (!Ops.empty()) { Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } // Expand the immediate portion. int64_t Offset = (uint64_t)F.BaseOffset + LF.Offset; if (Offset != 0) { if (LU.Kind == LSRUse::ICmpZero) { // The other interesting way of "folding" with an ICmpZero is to use a // negated immediate. if (!ICmpScaledV) ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); else { Ops.push_back(SE.getUnknown(ICmpScaledV)); ICmpScaledV = ConstantInt::get(IntTy, Offset); } } else { // Just add the immediate values. These again are expected to be matched // as part of the address. Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); } } // Expand the unfolded offset portion. int64_t UnfoldedOffset = F.UnfoldedOffset; if (UnfoldedOffset != 0) { // Just add the immediate values. Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, UnfoldedOffset))); } // Emit instructions summing all the operands. const SCEV *FullS = Ops.empty() ? SE.getConstant(IntTy, 0) : SE.getAddExpr(Ops); Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); // We're done expanding now, so reset the rewriter. Rewriter.clearPostInc(); // An ICmpZero Formula represents an ICmp which we're handling as a // comparison against zero. Now that we've expanded an expression for that // form, update the ICmp's other operand. if (LU.Kind == LSRUse::ICmpZero) { ICmpInst *CI = cast<ICmpInst>(LF.UserInst); DeadInsts.emplace_back(CI->getOperand(1)); assert(!F.BaseGV && "ICmp does not support folding a global value and " "a scale at the same time!"); if (F.Scale == -1) { if (ICmpScaledV->getType() != OpTy) { Instruction *Cast = CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, OpTy, false), ICmpScaledV, OpTy, "tmp", CI); ICmpScaledV = Cast; } CI->setOperand(1, ICmpScaledV); } else { // A scale of 1 means that the scale has been expanded as part of the // base regs. assert((F.Scale == 0 || F.Scale == 1) && "ICmp does not support folding a global value and " "a scale at the same time!"); Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), -(uint64_t)Offset); if (C->getType() != OpTy) C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, OpTy, false), C, OpTy); CI->setOperand(1, C); } } return FullV; } /// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use /// of their operands effectively happens in their predecessor blocks, so the /// expression may need to be expanded in multiple places. void LSRInstance::RewriteForPHI(PHINode *PN, const LSRFixup &LF, const Formula &F, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts, Pass *P) const { DenseMap<BasicBlock *, Value *> Inserted; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == LF.OperandValToReplace) { BasicBlock *BB = PN->getIncomingBlock(i); // If this is a critical edge, split the edge so that we do not insert // the code on all predecessor/successor paths. We do this unless this // is the canonical backedge for this loop, which complicates post-inc // users. if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BB->getTerminator())) { BasicBlock *Parent = PN->getParent(); Loop *PNLoop = LI.getLoopFor(Parent); if (!PNLoop || Parent != PNLoop->getHeader()) { // Split the critical edge. BasicBlock *NewBB = nullptr; if (!Parent->isLandingPad()) { NewBB = SplitCriticalEdge(BB, Parent, CriticalEdgeSplittingOptions(&DT, &LI) .setMergeIdenticalEdges() .setDontDeleteUselessPHIs()); } else { SmallVector<BasicBlock*, 2> NewBBs; SplitLandingPadPredecessors(Parent, BB, "", "", NewBBs, /*AliasAnalysis*/ nullptr, &DT, &LI); NewBB = NewBBs[0]; } // If NewBB==NULL, then SplitCriticalEdge refused to split because all // phi predecessors are identical. The simple thing to do is skip // splitting in this case rather than complicate the API. if (NewBB) { // If PN is outside of the loop and BB is in the loop, we want to // move the block to be immediately before the PHI block, not // immediately after BB. if (L->contains(BB) && !L->contains(PN)) NewBB->moveBefore(PN->getParent()); // Splitting the edge can reduce the number of PHI entries we have. e = PN->getNumIncomingValues(); BB = NewBB; i = PN->getBasicBlockIndex(BB); } } } std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr))); if (!Pair.second) PN->setIncomingValue(i, Pair.first->second); else { Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. Type *OpTy = LF.OperandValToReplace->getType(); if (FullV->getType() != OpTy) FullV = CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), FullV, LF.OperandValToReplace->getType(), "tmp", BB->getTerminator()); PN->setIncomingValue(i, FullV); Pair.first->second = FullV; } } } /// Rewrite - Emit instructions for the leading candidate expression for this /// LSRUse (this is called "expanding"), and update the UserInst to reference /// the newly expanded value. void LSRInstance::Rewrite(const LSRFixup &LF, const Formula &F, SCEVExpander &Rewriter, SmallVectorImpl<WeakTrackingVH> &DeadInsts, Pass *P) const { // First, find an insertion point that dominates UserInst. For PHI nodes, // find the nearest block which dominates all the relevant uses. if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); } else { Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. Type *OpTy = LF.OperandValToReplace->getType(); if (FullV->getType() != OpTy) { Instruction *Cast = CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), FullV, OpTy, "tmp", LF.UserInst); FullV = Cast; } // Update the user. ICmpZero is handled specially here (for now) because // Expand may have updated one of the operands of the icmp already, and // its new value may happen to be equal to LF.OperandValToReplace, in // which case doing replaceUsesOfWith leads to replacing both operands // with the same value. TODO: Reorganize this. if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) LF.UserInst->setOperand(0, FullV); else LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); } DeadInsts.emplace_back(LF.OperandValToReplace); } /// ImplementSolution - Rewrite all the fixup locations with new values, /// following the chosen solution. void LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, Pass *P) { // Keep track of instructions we may have made dead, so that // we can remove them after we are done working. SmallVector<WeakTrackingVH, 16> DeadInsts; SCEVExpander Rewriter(SE, L->getHeader()->getModule()->getDataLayout(), "lsr"); #ifndef NDEBUG Rewriter.setDebugType(DEBUG_TYPE); #endif Rewriter.disableCanonicalMode(); Rewriter.enableLSRMode(); Rewriter.setIVIncInsertPos(L, IVIncInsertPos); // Mark phi nodes that terminate chains so the expander tries to reuse them. for (const IVChain &Chain : IVChainVec) { if (PHINode *PN = dyn_cast<PHINode>(Chain.tailUserInst())) Rewriter.setChainedPhi(PN); } // Expand the new value definitions and update the users. for (const LSRFixup &Fixup : Fixups) { Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); Changed = true; } for (const IVChain &Chain : IVChainVec) { GenerateIVChain(Chain, Rewriter, DeadInsts); Changed = true; } // Clean up after ourselves. This must be done before deleting any // instructions. Rewriter.clear(); Changed |= DeleteTriviallyDeadInstructions(DeadInsts); } LSRInstance::LSRInstance(Loop *L, Pass *P) : IU(P->getAnalysis<IVUsers>()), SE(P->getAnalysis<ScalarEvolution>()), DT(P->getAnalysis<DominatorTreeWrapperPass>().getDomTree()), LI(P->getAnalysis<LoopInfoWrapperPass>().getLoopInfo()), TTI(P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI( *L->getHeader()->getParent())), L(L), Changed(false), IVIncInsertPos(nullptr) { // If LoopSimplify form is not available, stay out of trouble. if (!L->isLoopSimplifyForm()) return; // If there's no interesting work to be done, bail early. if (IU.empty()) return; // If there's too much analysis to be done, bail early. We won't be able to // model the problem anyway. unsigned NumUsers = 0; for (const IVStrideUse &U : IU) { if (++NumUsers > MaxIVUsers) { (void)U; DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << U << "\n"); return; } } #ifndef NDEBUG // All dominating loops must have preheaders, or SCEVExpander may not be able // to materialize an AddRecExpr whose Start is an outer AddRecExpr. // // IVUsers analysis should only create users that are dominated by simple loop // headers. Since this loop should dominate all of its users, its user list // should be empty if this loop itself is not within a simple loop nest. for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); Rung; Rung = Rung->getIDom()) { BasicBlock *BB = Rung->getBlock(); const Loop *DomLoop = LI.getLoopFor(BB); if (DomLoop && DomLoop->getHeader() == BB) { assert(DomLoop->getLoopPreheader() && "LSR needs a simplified loop nest"); } } #endif // DEBUG DEBUG(dbgs() << "\nLSR on loop "; L->getHeader()->printAsOperand(dbgs(), /*PrintType=*/false); dbgs() << ":\n"); // First, perform some low-level loop optimizations. OptimizeShadowIV(); OptimizeLoopTermCond(); // If loop preparation eliminates all interesting IV users, bail. if (IU.empty()) return; // Skip nested loops until we can model them better with formulae. if (!L->empty()) { DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); return; } // Start collecting data and preparing for the solver. CollectChains(); CollectInterestingTypesAndFactors(); CollectFixupsAndInitialFormulae(); CollectLoopInvariantFixupsAndFormulae(); assert(!Uses.empty() && "IVUsers reported at least one use"); DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; print_uses(dbgs())); // Now use the reuse data to generate a bunch of interesting ways // to formulate the values needed for the uses. GenerateAllReuseFormulae(); FilterOutUndesirableDedicatedRegisters(); NarrowSearchSpaceUsingHeuristics(); SmallVector<const Formula *, 8> Solution; Solve(Solution); // Release memory that is no longer needed. Factors.clear(); Types.clear(); RegUses.clear(); if (Solution.empty()) return; #ifndef NDEBUG // Formulae should be legal. for (const LSRUse &LU : Uses) { for (const Formula &F : LU.Formulae) assert(isLegalUse(TTI, LU.MinOffset, LU.MaxOffset, LU.Kind, LU.AccessTy, F) && "Illegal formula generated!"); }; #endif // Now that we've decided what we want, make it so. ImplementSolution(Solution, P); } void LSRInstance::print_factors_and_types(raw_ostream &OS) const { if (Factors.empty() && Types.empty()) return; OS << "LSR has identified the following interesting factors and types: "; bool First = true; for (int64_t Factor : Factors) { if (!First) OS << ", "; First = false; OS << '*' << Factor; } for (Type *Ty : Types) { if (!First) OS << ", "; First = false; OS << '(' << *Ty << ')'; } OS << '\n'; } void LSRInstance::print_fixups(raw_ostream &OS) const { OS << "LSR is examining the following fixup sites:\n"; for (const LSRFixup &LF : Fixups) { dbgs() << " "; LF.print(OS); OS << '\n'; } } void LSRInstance::print_uses(raw_ostream &OS) const { OS << "LSR is examining the following uses:\n"; for (const LSRUse &LU : Uses) { dbgs() << " "; LU.print(OS); OS << '\n'; for (const Formula &F : LU.Formulae) { OS << " "; F.print(OS); OS << '\n'; } } } void LSRInstance::print(raw_ostream &OS) const { print_factors_and_types(OS); print_fixups(OS); print_uses(OS); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRInstance::dump() const { print(errs()); errs() << '\n'; } #endif namespace { class LoopStrengthReduce : public LoopPass { public: static char ID; // Pass ID, replacement for typeid LoopStrengthReduce(); private: bool runOnLoop(Loop *L, LPPassManager &LPM) override; void getAnalysisUsage(AnalysisUsage &AU) const override; }; } char LoopStrengthReduce::ID = 0; INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", "Loop Strength Reduction", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(IVUsers) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", "Loop Strength Reduction", false, false) Pass *llvm::createLoopStrengthReducePass() { return new LoopStrengthReduce(); } LoopStrengthReduce::LoopStrengthReduce() : LoopPass(ID) { initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); } void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { // We split critical edges, so we change the CFG. However, we do update // many analyses if they are around. AU.addPreservedID(LoopSimplifyID); AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addRequiredID(LoopSimplifyID); AU.addRequired<DominatorTreeWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addPreserved<ScalarEvolution>(); // Requiring LoopSimplify a second time here prevents IVUsers from running // twice, since LoopSimplify was invalidated by running ScalarEvolution. AU.addRequiredID(LoopSimplifyID); AU.addRequired<IVUsers>(); AU.addPreserved<IVUsers>(); AU.addRequired<TargetTransformInfoWrapperPass>(); } bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { if (skipOptnoneFunction(L)) return false; bool Changed = false; // Run the main LSR transformation. #if 0 // HLSL Change - move LSRInstance to the heap to avoid >16K stack allocation here. Changed |= LSRInstance(L, this).getChanged(); #else { std::unique_ptr<LSRInstance> instance(new LSRInstance(L, this)); Changed |= instance->getChanged(); } #endif // Remove any extra phis created by processing inner loops. Changed |= DeleteDeadPHIs(L->getHeader()); if (EnablePhiElim && L->isLoopSimplifyForm()) { SmallVector<WeakTrackingVH, 16> DeadInsts; const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), DL, "lsr"); #ifndef NDEBUG Rewriter.setDebugType(DEBUG_TYPE); #endif unsigned numFolded = Rewriter.replaceCongruentIVs( L, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), DeadInsts, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( *L->getHeader()->getParent())); if (numFolded) { Changed = true; DeleteTriviallyDeadInstructions(DeadInsts); DeleteDeadPHIs(L->getHeader()); } } return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Loop unrolling may create many similar GEPs for array accesses. // e.g., a 2-level loop // // float a[32][32]; // global variable // // for (int i = 0; i < 2; ++i) { // for (int j = 0; j < 2; ++j) { // ... // ... = a[x + i][y + j]; // ... // } // } // // will probably be unrolled to: // // gep %a, 0, %x, %y; load // gep %a, 0, %x, %y + 1; load // gep %a, 0, %x + 1, %y; load // gep %a, 0, %x + 1, %y + 1; load // // LLVM's GVN does not use partial redundancy elimination yet, and is thus // unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs // significant slowdown in targets with limited addressing modes. For instance, // because the PTX target does not support the reg+reg addressing mode, the // NVPTX backend emits PTX code that literally computes the pointer address of // each GEP, wasting tons of registers. It emits the following PTX for the // first load and similar PTX for other loads. // // mov.u32 %r1, %x; // mov.u32 %r2, %y; // mul.wide.u32 %rl2, %r1, 128; // mov.u64 %rl3, a; // add.s64 %rl4, %rl3, %rl2; // mul.wide.u32 %rl5, %r2, 4; // add.s64 %rl6, %rl4, %rl5; // ld.global.f32 %f1, [%rl6]; // // To reduce the register pressure, the optimization implemented in this file // merges the common part of a group of GEPs, so we can compute each pointer // address by adding a simple offset to the common part, saving many registers. // // It works by splitting each GEP into a variadic base and a constant offset. // The variadic base can be computed once and reused by multiple GEPs, and the // constant offsets can be nicely folded into the reg+immediate addressing mode // (supported by most targets) without using any extra register. // // For instance, we transform the four GEPs and four loads in the above example // into: // // base = gep a, 0, x, y // load base // laod base + 1 * sizeof(float) // load base + 32 * sizeof(float) // load base + 33 * sizeof(float) // // Given the transformed IR, a backend that supports the reg+immediate // addressing mode can easily fold the pointer arithmetics into the loads. For // example, the NVPTX backend can easily fold the pointer arithmetics into the // ld.global.f32 instructions, and the resultant PTX uses much fewer registers. // // mov.u32 %r1, %tid.x; // mov.u32 %r2, %tid.y; // mul.wide.u32 %rl2, %r1, 128; // mov.u64 %rl3, a; // add.s64 %rl4, %rl3, %rl2; // mul.wide.u32 %rl5, %r2, 4; // add.s64 %rl6, %rl4, %rl5; // ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX // ld.global.f32 %f2, [%rl6+4]; // much better // ld.global.f32 %f3, [%rl6+128]; // much better // ld.global.f32 %f4, [%rl6+132]; // much better // // Another improvement enabled by the LowerGEP flag is to lower a GEP with // multiple indices to either multiple GEPs with a single index or arithmetic // operations (depending on whether the target uses alias analysis in codegen). // Such transformation can have following benefits: // (1) It can always extract constants in the indices of structure type. // (2) After such Lowering, there are more optimization opportunities such as // CSE, LICM and CGP. // // E.g. The following GEPs have multiple indices: // BB1: // %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3 // load %p // ... // BB2: // %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2 // load %p2 // ... // // We can not do CSE for to the common part related to index "i64 %i". Lowering // GEPs can achieve such goals. // If the target does not use alias analysis in codegen, this pass will // lower a GEP with multiple indices into arithmetic operations: // BB1: // %1 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity // %3 = add i64 %1, %2 ; CSE opportunity // %4 = mul i64 %j1, length_of_struct // %5 = add i64 %3, %4 // %6 = add i64 %3, struct_field_3 ; Constant offset // %p = inttoptr i64 %6 to i32* // load %p // ... // BB2: // %7 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity // %9 = add i64 %7, %8 ; CSE opportunity // %10 = mul i64 %j2, length_of_struct // %11 = add i64 %9, %10 // %12 = add i64 %11, struct_field_2 ; Constant offset // %p = inttoptr i64 %12 to i32* // load %p2 // ... // // If the target uses alias analysis in codegen, this pass will lower a GEP // with multiple indices into multiple GEPs with a single index: // BB1: // %1 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity // %3 = getelementptr i8* %1, i64 %2 ; CSE opportunity // %4 = mul i64 %j1, length_of_struct // %5 = getelementptr i8* %3, i64 %4 // %6 = getelementptr i8* %5, struct_field_3 ; Constant offset // %p = bitcast i8* %6 to i32* // load %p // ... // BB2: // %7 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity // %9 = getelementptr i8* %7, i64 %8 ; CSE opportunity // %10 = mul i64 %j2, length_of_struct // %11 = getelementptr i8* %9, i64 %10 // %12 = getelementptr i8* %11, struct_field_2 ; Constant offset // %p2 = bitcast i8* %12 to i32* // load %p2 // ... // // Lowering GEPs can also benefit other passes such as LICM and CGP. // LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple // indices if one of the index is variant. If we lower such GEP into invariant // parts and variant parts, LICM can hoist/sink those invariant parts. // CGP (CodeGen Prepare) tries to sink address calculations that match the // target's addressing modes. A GEP with multiple indices may not match and will // not be sunk. If we lower such GEP into smaller parts, CGP may sink some of // them. So we end up with a better addressing mode. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/IR/IRBuilder.h" using namespace llvm; static cl::opt<bool> DisableSeparateConstOffsetFromGEP( "disable-separate-const-offset-from-gep", cl::init(false), cl::desc("Do not separate the constant offset from a GEP instruction"), cl::Hidden); // Setting this flag may emit false positives when the input module already // contains dead instructions. Therefore, we set it only in unit tests that are // free of dead code. static cl::opt<bool> VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false), cl::desc("Verify this pass produces no dead code"), cl::Hidden); namespace { /// \brief A helper class for separating a constant offset from a GEP index. /// /// In real programs, a GEP index may be more complicated than a simple addition /// of something and a constant integer which can be trivially splitted. For /// example, to split ((a << 3) | 5) + b, we need to search deeper for the /// constant offset, so that we can separate the index to (a << 3) + b and 5. /// /// Therefore, this class looks into the expression that computes a given GEP /// index, and tries to find a constant integer that can be hoisted to the /// outermost level of the expression as an addition. Not every constant in an /// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a + /// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case, /// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15). class ConstantOffsetExtractor { public: /// Extracts a constant offset from the given GEP index. It returns the /// new index representing the remainder (equal to the original index minus /// the constant offset), or nullptr if we cannot extract a constant offset. /// \p Idx The given GEP index /// \p GEP The given GEP /// \p UserChainTail Outputs the tail of UserChain so that we can /// garbage-collect unused instructions in UserChain. static Value *Extract(Value *Idx, GetElementPtrInst *GEP, User *&UserChainTail, const DominatorTree *DT); /// Looks for a constant offset from the given GEP index without extracting /// it. It returns the numeric value of the extracted constant offset (0 if /// failed). The meaning of the arguments are the same as Extract. static int64_t Find(Value *Idx, GetElementPtrInst *GEP, const DominatorTree *DT); private: ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT) : IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) { } /// Searches the expression that computes V for a non-zero constant C s.t. /// V can be reassociated into the form V' + C. If the searching is /// successful, returns C and update UserChain as a def-use chain from C to V; /// otherwise, UserChain is empty. /// /// \p V The given expression /// \p SignExtended Whether V will be sign-extended in the computation of the /// GEP index /// \p ZeroExtended Whether V will be zero-extended in the computation of the /// GEP index /// \p NonNegative Whether V is guaranteed to be non-negative. For example, /// an index of an inbounds GEP is guaranteed to be /// non-negative. Levaraging this, we can better split /// inbounds GEPs. APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative); /// A helper function to look into both operands of a binary operator. APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended, bool ZeroExtended); /// After finding the constant offset C from the GEP index I, we build a new /// index I' s.t. I' + C = I. This function builds and returns the new /// index I' according to UserChain produced by function "find". /// /// The building conceptually takes two steps: /// 1) iteratively distribute s/zext towards the leaves of the expression tree /// that computes I /// 2) reassociate the expression tree to the form I' + C. /// /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute /// sext to a, b and 5 so that we have /// sext(a) + (sext(b) + 5). /// Then, we reassociate it to /// (sext(a) + sext(b)) + 5. /// Given this form, we know I' is sext(a) + sext(b). Value *rebuildWithoutConstOffset(); /// After the first step of rebuilding the GEP index without the constant /// offset, distribute s/zext to the operands of all operators in UserChain. /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) => /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))). /// /// The function also updates UserChain to point to new subexpressions after /// distributing s/zext. e.g., the old UserChain of the above example is /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)), /// and the new UserChain is /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) -> /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5)) /// /// \p ChainIndex The index to UserChain. ChainIndex is initially /// UserChain.size() - 1, and is decremented during /// the recursion. Value *distributeExtsAndCloneChain(unsigned ChainIndex); /// Reassociates the GEP index to the form I' + C and returns I'. Value *removeConstOffset(unsigned ChainIndex); /// A helper function to apply ExtInsts, a list of s/zext, to value V. /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function /// returns "sext i32 (zext i16 V to i32) to i64". Value *applyExts(Value *V); /// A helper function that returns whether we can trace into the operands /// of binary operator BO for a constant offset. /// /// \p SignExtended Whether BO is surrounded by sext /// \p ZeroExtended Whether BO is surrounded by zext /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound /// array index. bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO, bool NonNegative); /// The path from the constant offset to the old GEP index. e.g., if the GEP /// index is "a * b + (c + 5)". After running function find, UserChain[0] will /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and /// UserChain[2] will be the entire expression "a * b + (c + 5)". /// /// This path helps to rebuild the new GEP index. SmallVector<User *, 8> UserChain; /// A data structure used in rebuildWithoutConstOffset. Contains all /// sext/zext instructions along UserChain. SmallVector<CastInst *, 16> ExtInsts; Instruction *IP; /// Insertion position of cloned instructions. const DataLayout &DL; const DominatorTree *DT; }; /// \brief A pass that tries to split every GEP in the function into a variadic /// base and a constant offset. It is a FunctionPass because searching for the /// constant offset may inspect other basic blocks. class SeparateConstOffsetFromGEP : public FunctionPass { public: static char ID; SeparateConstOffsetFromGEP(const TargetMachine *TM = nullptr, bool LowerGEP = false) : FunctionPass(ID), DL(nullptr), DT(nullptr), TM(TM), LowerGEP(LowerGEP) { initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); AU.setPreservesCFG(); } bool doInitialization(Module &M) override { DL = &M.getDataLayout(); return false; } bool runOnFunction(Function &F) override; private: /// Tries to split the given GEP into a variadic base and a constant offset, /// and returns true if the splitting succeeds. bool splitGEP(GetElementPtrInst *GEP); /// Lower a GEP with multiple indices into multiple GEPs with a single index. /// Function splitGEP already split the original GEP into a variadic part and /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the /// variadic part into a set of GEPs with a single index and applies /// AccumulativeByteOffset to it. /// \p Variadic The variadic part of the original GEP. /// \p AccumulativeByteOffset The constant offset. void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset); /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form. /// Function splitGEP already split the original GEP into a variadic part and /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the /// variadic part into a set of arithmetic operations and applies /// AccumulativeByteOffset to it. /// \p Variadic The variadic part of the original GEP. /// \p AccumulativeByteOffset The constant offset. void lowerToArithmetics(GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset); /// Finds the constant offset within each index and accumulates them. If /// LowerGEP is true, it finds in indices of both sequential and structure /// types, otherwise it only finds in sequential indices. The output /// NeedsExtraction indicates whether we successfully find a non-zero constant /// offset. int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction); /// Canonicalize array indices to pointer-size integers. This helps to /// simplify the logic of splitting a GEP. For example, if a + b is a /// pointer-size integer, we have /// gep base, a + b = gep (gep base, a), b /// However, this equality may not hold if the size of a + b is smaller than /// the pointer size, because LLVM conceptually sign-extends GEP indices to /// pointer size before computing the address /// (http://llvm.org/docs/LangRef.html#id181). /// /// This canonicalization is very likely already done in clang and /// instcombine. Therefore, the program will probably remain the same. /// /// Returns true if the module changes. /// /// Verified in @i32_add in split-gep.ll bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP); /// Verify F is free of dead code. void verifyNoDeadCode(Function &F); const DataLayout *DL; const DominatorTree *DT; const TargetMachine *TM; /// Whether to lower a GEP with multiple indices into arithmetic operations or /// multiple GEPs with a single index. bool LowerGEP; }; } // anonymous namespace char SeparateConstOffsetFromGEP::ID = 0; INITIALIZE_PASS_BEGIN( SeparateConstOffsetFromGEP, "separate-const-offset-from-gep", "Split GEPs to a variadic base and a constant offset for better CSE", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END( SeparateConstOffsetFromGEP, "separate-const-offset-from-gep", "Split GEPs to a variadic base and a constant offset for better CSE", false, false) FunctionPass * llvm::createSeparateConstOffsetFromGEPPass(const TargetMachine *TM, bool LowerGEP) { return new SeparateConstOffsetFromGEP(TM, LowerGEP); } bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO, bool NonNegative) { // We only consider ADD, SUB and OR, because a non-zero constant found in // expressions composed of these operations can be easily hoisted as a // constant offset by reassociation. if (BO->getOpcode() != Instruction::Add && BO->getOpcode() != Instruction::Sub && BO->getOpcode() != Instruction::Or) { return false; } Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1); // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS). if (BO->getOpcode() == Instruction::Or && !haveNoCommonBitsSet(LHS, RHS, DL, nullptr, BO, DT)) return false; // In addition, tracing into BO requires that its surrounding s/zext (if // any) is distributable to both operands. // // Suppose BO = A op B. // SignExtended | ZeroExtended | Distributable? // --------------+--------------+---------------------------------- // 0 | 0 | true because no s/zext exists // 0 | 1 | zext(BO) == zext(A) op zext(B) // 1 | 0 | sext(BO) == sext(A) op sext(B) // 1 | 1 | zext(sext(BO)) == // | | zext(sext(A)) op zext(sext(B)) if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) { // If a + b >= 0 and (a >= 0 or b >= 0), then // sext(a + b) = sext(a) + sext(b) // even if the addition is not marked nsw. // // Leveraging this invarient, we can trace into an sext'ed inbound GEP // index if the constant offset is non-negative. // // Verified in @sext_add in split-gep.ll. if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) { if (!ConstLHS->isNegative()) return true; } if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) { if (!ConstRHS->isNegative()) return true; } } // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B) // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B) if (BO->getOpcode() == Instruction::Add || BO->getOpcode() == Instruction::Sub) { if (SignExtended && !BO->hasNoSignedWrap()) return false; if (ZeroExtended && !BO->hasNoUnsignedWrap()) return false; } return true; } APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO, bool SignExtended, bool ZeroExtended) { // BO being non-negative does not shed light on whether its operands are // non-negative. Clear the NonNegative flag here. APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended, /* NonNegative */ false); // If we found a constant offset in the left operand, stop and return that. // This shortcut might cause us to miss opportunities of combining the // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9. // However, such cases are probably already handled by -instcombine, // given this pass runs after the standard optimizations. if (ConstantOffset != 0) return ConstantOffset; ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended, /* NonNegative */ false); // If U is a sub operator, negate the constant offset found in the right // operand. if (BO->getOpcode() == Instruction::Sub) ConstantOffset = -ConstantOffset; return ConstantOffset; } APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative) { // TODO(jingyue): We could trace into integer/pointer casts, such as // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only // integers because it gives good enough results for our benchmarks. unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); // We cannot do much with Values that are not a User, such as an Argument. User *U = dyn_cast<User>(V); if (U == nullptr) return APInt(BitWidth, 0); APInt ConstantOffset(BitWidth, 0); if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { // Hooray, we found it! ConstantOffset = CI->getValue(); } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) { // Trace into subexpressions for more hoisting opportunities. if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended); } else if (isa<SExtInst>(V)) { ConstantOffset = find(U->getOperand(0), /* SignExtended */ true, ZeroExtended, NonNegative).sext(BitWidth); } else if (isa<ZExtInst>(V)) { // As an optimization, we can clear the SignExtended flag because // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll. // // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0. ConstantOffset = find(U->getOperand(0), /* SignExtended */ false, /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth); } // If we found a non-zero constant offset, add it to the path for // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't // help this optimization. if (ConstantOffset != 0) UserChain.push_back(U); return ConstantOffset; } Value *ConstantOffsetExtractor::applyExts(Value *V) { Value *Current = V; // ExtInsts is built in the use-def order. Therefore, we apply them to V // in the reversed order. for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) { if (Constant *C = dyn_cast<Constant>(Current)) { // If Current is a constant, apply s/zext using ConstantExpr::getCast. // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt. Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType()); } else { Instruction *Ext = (*I)->clone(); Ext->setOperand(0, Current); Ext->insertBefore(IP); Current = Ext; } } return Current; } Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() { distributeExtsAndCloneChain(UserChain.size() - 1); // Remove all nullptrs (used to be s/zext) from UserChain. unsigned NewSize = 0; for (auto I = UserChain.begin(), E = UserChain.end(); I != E; ++I) { if (*I != nullptr) { UserChain[NewSize] = *I; NewSize++; } } UserChain.resize(NewSize); return removeConstOffset(UserChain.size() - 1); } Value * ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) { User *U = UserChain[ChainIndex]; if (ChainIndex == 0) { assert(isa<ConstantInt>(U)); // If U is a ConstantInt, applyExts will return a ConstantInt as well. return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U)); } if (CastInst *Cast = dyn_cast<CastInst>(U)) { assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) && "We only traced into two types of CastInst: sext and zext"); ExtInsts.push_back(Cast); UserChain[ChainIndex] = nullptr; return distributeExtsAndCloneChain(ChainIndex - 1); } // Function find only trace into BinaryOperator and CastInst. BinaryOperator *BO = cast<BinaryOperator>(U); // OpNo = which operand of BO is UserChain[ChainIndex - 1] unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1); Value *TheOther = applyExts(BO->getOperand(1 - OpNo)); Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1); BinaryOperator *NewBO = nullptr; if (OpNo == 0) { NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther, BO->getName(), IP); } else { NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain, BO->getName(), IP); } return UserChain[ChainIndex] = NewBO; } Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) { if (ChainIndex == 0) { assert(isa<ConstantInt>(UserChain[ChainIndex])); return ConstantInt::getNullValue(UserChain[ChainIndex]->getType()); } BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]); assert(BO->getNumUses() <= 1 && "distributeExtsAndCloneChain clones each BinaryOperator in " "UserChain, so no one should be used more than " "once"); unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1); assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]); Value *NextInChain = removeConstOffset(ChainIndex - 1); Value *TheOther = BO->getOperand(1 - OpNo); // If NextInChain is 0 and not the LHS of a sub, we can simplify the // sub-expression to be just TheOther. if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) { if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0)) return TheOther; } BinaryOperator::BinaryOps NewOp = BO->getOpcode(); if (BO->getOpcode() == Instruction::Or) { // Rebuild "or" as "add", because "or" may be invalid for the new // epxression. // // For instance, given // a | (b + 5) where a and b + 5 have no common bits, // we can extract 5 as the constant offset. // // However, reusing the "or" in the new index would give us // (a | b) + 5 // which does not equal a | (b + 5). // // Replacing the "or" with "add" is fine, because // a | (b + 5) = a + (b + 5) = (a + b) + 5 NewOp = Instruction::Add; } BinaryOperator *NewBO; if (OpNo == 0) { NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP); } else { NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP); } NewBO->takeName(BO); return NewBO; } Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP, User *&UserChainTail, const DominatorTree *DT) { ConstantOffsetExtractor Extractor(GEP, DT); // Find a non-zero constant offset first. APInt ConstantOffset = Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false, GEP->isInBounds()); if (ConstantOffset == 0) { UserChainTail = nullptr; return nullptr; } // Separates the constant offset from the GEP index. Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset(); UserChainTail = Extractor.UserChain.back(); return IdxWithoutConstOffset; } int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP, const DominatorTree *DT) { // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative. return ConstantOffsetExtractor(GEP, DT) .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false, GEP->isInBounds()) .getSExtValue(); } bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize( GetElementPtrInst *GEP) { bool Changed = false; Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); gep_type_iterator GTI = gep_type_begin(*GEP); for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E; ++I, ++GTI) { // Skip struct member indices which must be i32. if (isa<SequentialType>(*GTI)) { if ((*I)->getType() != IntPtrTy) { *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP); Changed = true; } } } return Changed; } int64_t SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction) { NeedsExtraction = false; int64_t AccumulativeByteOffset = 0; gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { if (isa<SequentialType>(*GTI)) { // Tries to extract a constant offset from this GEP index. int64_t ConstantOffset = ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT); if (ConstantOffset != 0) { NeedsExtraction = true; // A GEP may have multiple indices. We accumulate the extracted // constant offset to a byte offset, and later offset the remainder of // the original GEP with this byte offset. AccumulativeByteOffset += ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType()); } } else if (LowerGEP) { StructType *StTy = cast<StructType>(*GTI); uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue(); // Skip field 0 as the offset is always 0. if (Field != 0) { NeedsExtraction = true; AccumulativeByteOffset += DL->getStructLayout(StTy)->getElementOffset(Field); } } } return AccumulativeByteOffset; } void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs( GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) { IRBuilder<> Builder(Variadic); Type *IntPtrTy = DL->getIntPtrType(Variadic->getType()); Type *I8PtrTy = Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace()); Value *ResultPtr = Variadic->getOperand(0); if (ResultPtr->getType() != I8PtrTy) ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); gep_type_iterator GTI = gep_type_begin(*Variadic); // Create an ugly GEP for each sequential index. We don't create GEPs for // structure indices, as they are accumulated in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { if (isa<SequentialType>(*GTI)) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) if (CI->isZero()) continue; APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(), DL->getTypeAllocSize(GTI.getIndexedType())); // Scale the index by element size. if (ElementSize != 1) { if (ElementSize.isPowerOf2()) { Idx = Builder.CreateShl( Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2())); } else { Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize)); } } // Create an ugly GEP with a single index for each index. ResultPtr = Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep"); } } // Create a GEP with the constant offset index. if (AccumulativeByteOffset != 0) { Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset); ResultPtr = Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep"); } if (ResultPtr->getType() != Variadic->getType()) ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType()); Variadic->replaceAllUsesWith(ResultPtr); Variadic->eraseFromParent(); } void SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) { IRBuilder<> Builder(Variadic); Type *IntPtrTy = DL->getIntPtrType(Variadic->getType()); Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy); gep_type_iterator GTI = gep_type_begin(*Variadic); // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We // don't create arithmetics for structure indices, as they are accumulated // in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { if (isa<SequentialType>(*GTI)) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) if (CI->isZero()) continue; APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(), DL->getTypeAllocSize(GTI.getIndexedType())); // Scale the index by element size. if (ElementSize != 1) { if (ElementSize.isPowerOf2()) { Idx = Builder.CreateShl( Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2())); } else { Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize)); } } // Create an ADD for each index. ResultPtr = Builder.CreateAdd(ResultPtr, Idx); } } // Create an ADD for the constant offset index. if (AccumulativeByteOffset != 0) { ResultPtr = Builder.CreateAdd( ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset)); } ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType()); Variadic->replaceAllUsesWith(ResultPtr); Variadic->eraseFromParent(); } bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) { // Skip vector GEPs. if (GEP->getType()->isVectorTy()) return false; // The backend can already nicely handle the case where all indices are // constant. if (GEP->hasAllConstantIndices()) return false; bool Changed = canonicalizeArrayIndicesToPointerSize(GEP); bool NeedsExtraction; int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction); if (!NeedsExtraction) return Changed; // If LowerGEP is disabled, before really splitting the GEP, check whether the // backend supports the addressing mode we are about to produce. If no, this // splitting probably won't be beneficial. // If LowerGEP is enabled, even the extracted constant offset can not match // the addressing mode, we can still do optimizations to other lowered parts // of variable indices. Therefore, we don't check for addressing modes in that // case. if (!LowerGEP) { TargetTransformInfo &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI( *GEP->getParent()->getParent()); unsigned AddrSpace = GEP->getPointerAddressSpace(); if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(), /*BaseGV=*/nullptr, AccumulativeByteOffset, /*HasBaseReg=*/true, /*Scale=*/0, AddrSpace)) { return Changed; } } // Remove the constant offset in each sequential index. The resultant GEP // computes the variadic base. // Notice that we don't remove struct field indices here. If LowerGEP is // disabled, a structure index is not accumulated and we still use the old // one. If LowerGEP is enabled, a structure index is accumulated in the // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later // handle the constant offset and won't need a new structure index. gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { if (isa<SequentialType>(*GTI)) { // Splits this GEP index into a variadic part and a constant offset, and // uses the variadic part as the new index. Value *OldIdx = GEP->getOperand(I); User *UserChainTail; Value *NewIdx = ConstantOffsetExtractor::Extract(OldIdx, GEP, UserChainTail, DT); if (NewIdx != nullptr) { // Switches to the index with the constant offset removed. GEP->setOperand(I, NewIdx); // After switching to the new index, we can garbage-collect UserChain // and the old index if they are not used. RecursivelyDeleteTriviallyDeadInstructions(UserChainTail); RecursivelyDeleteTriviallyDeadInstructions(OldIdx); } } } // Clear the inbounds attribute because the new index may be off-bound. // e.g., // // b = add i64 a, 5 // addr = gep inbounds float* p, i64 b // // is transformed to: // // addr2 = gep float* p, i64 a // addr = gep float* addr2, i64 5 // // If a is -4, although the old index b is in bounds, the new index a is // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the // inbounds keyword is not present, the offsets are added to the base // address with silently-wrapping two's complement arithmetic". // Therefore, the final code will be a semantically equivalent. // // TODO(jingyue): do some range analysis to keep as many inbounds as // possible. GEPs with inbounds are more friendly to alias analysis. GEP->setIsInBounds(false); // Lowers a GEP to either GEPs with a single index or arithmetic operations. if (LowerGEP) { // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to // arithmetic operations if the target uses alias analysis in codegen. if (TM && TM->getSubtargetImpl(*GEP->getParent()->getParent())->useAA()) lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset); else lowerToArithmetics(GEP, AccumulativeByteOffset); return true; } // No need to create another GEP if the accumulative byte offset is 0. if (AccumulativeByteOffset == 0) return true; // Offsets the base with the accumulative byte offset. // // %gep ; the base // ... %gep ... // // => add the offset // // %gep2 ; clone of %gep // %new.gep = gep %gep2, <offset / sizeof(*%gep)> // %gep ; will be removed // ... %gep ... // // => replace all uses of %gep with %new.gep and remove %gep // // %gep2 ; clone of %gep // %new.gep = gep %gep2, <offset / sizeof(*%gep)> // ... %new.gep ... // // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep): // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the // type of %gep. // // %gep2 ; clone of %gep // %0 = bitcast %gep2 to i8* // %uglygep = gep %0, <offset> // %new.gep = bitcast %uglygep to <type of %gep> // ... %new.gep ... Instruction *NewGEP = GEP->clone(); NewGEP->insertBefore(GEP); // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned = // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is // used with unsigned integers later. int64_t ElementTypeSizeOfGEP = static_cast<int64_t>( DL->getTypeAllocSize(GEP->getType()->getElementType())); Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) { // Very likely. As long as %gep is natually aligned, the byte offset we // extracted should be a multiple of sizeof(*%gep). int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP; NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP, ConstantInt::get(IntPtrTy, Index, true), GEP->getName(), GEP); } else { // Unlikely but possible. For example, // #pragma pack(1) // struct S { // int a[3]; // int64 b[8]; // }; // #pragma pack() // // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of // sizeof(int64). // // Emit an uglygep in this case. Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(), GEP->getPointerAddressSpace()); NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP); NewGEP = GetElementPtrInst::Create( Type::getInt8Ty(GEP->getContext()), NewGEP, ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep", GEP); if (GEP->getType() != I8PtrTy) NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP); } GEP->replaceAllUsesWith(NewGEP); GEP->eraseFromParent(); return true; } bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; if (DisableSeparateConstOffsetFromGEP) return false; DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); bool Changed = false; for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) { for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) { if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++)) { Changed |= splitGEP(GEP); } // No need to split GEP ConstantExprs because all its indices are constant // already. } } if (VerifyNoDeadCode) verifyNoDeadCode(F); return Changed; } void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) { for (auto &B : F) { for (auto &I : B) { if (isInstructionTriviallyDead(&I)) { std::string ErrMessage; raw_string_ostream RSO(ErrMessage); RSO << "Dead instruction detected!\n" << I << "\n"; llvm_unreachable(RSO.str().c_str()); } } } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/NaryReassociate.cpp
//===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass reassociates n-ary add expressions and eliminates the redundancy // exposed by the reassociation. // // A motivating example: // // void foo(int a, int b) { // bar(a + b); // bar((a + 2) + b); // } // // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify // the above code to // // int t = a + b; // bar(t); // bar(t + 2); // // However, the Reassociate pass is unable to do that because it processes each // instruction individually and believes (a + 2) + b is the best form according // to its rank system. // // To address this limitation, NaryReassociate reassociates an expression in a // form that reuses existing instructions. As a result, NaryReassociate can // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that // (a + b) is computed before. // // NaryReassociate works as follows. For every instruction in the form of (a + // b) + c, it checks whether a + c or b + c is already computed by a dominating // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b + // c) + a and removes the redundancy accordingly. To efficiently look up whether // an expression is computed before, we store each instruction seen and its SCEV // into an SCEV-to-instruction map. // // Although the algorithm pattern-matches only ternary additions, it // automatically handles many >3-ary expressions by walking through the function // in the depth-first order. For example, given // // (a + c) + d // ((a + b) + c) + d // // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites // ((a + c) + b) + d into ((a + c) + d) + b. // // Finally, the above dominator-based algorithm may need to be run multiple // iterations before emitting optimal code. One source of this need is that we // only split an operand when it is used only once. The above algorithm can // eliminate an instruction and decrease the usage count of its operands. As a // result, an instruction that previously had multiple uses may become a // single-use instruction and thus eligible for split consideration. For // example, // // ac = a + c // ab = a + b // abc = ab + c // ab2 = ab + b // ab2c = ab2 + c // // In the first iteration, we cannot reassociate abc to ac+b because ab is used // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a // result, ab2 becomes dead and ab will be used only once in the second // iteration. // // Limitations and TODO items: // // 1) We only considers n-ary adds for now. This should be extended and // generalized. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Module.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; using namespace PatternMatch; #define DEBUG_TYPE "nary-reassociate" namespace { class NaryReassociate : public FunctionPass { public: static char ID; NaryReassociate(): FunctionPass(ID) { initializeNaryReassociatePass(*PassRegistry::getPassRegistry()); } bool doInitialization(Module &M) override { DL = &M.getDataLayout(); return false; } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<ScalarEvolution>(); AU.addPreserved<TargetLibraryInfoWrapperPass>(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); AU.setPreservesCFG(); } private: // Runs only one iteration of the dominator-based algorithm. See the header // comments for why we need multiple iterations. bool doOneIteration(Function &F); // Reassociates I for better CSE. Instruction *tryReassociate(Instruction *I); // Reassociate GEP for better CSE. Instruction *tryReassociateGEP(GetElementPtrInst *GEP); // Try splitting GEP at the I-th index and see whether either part can be // CSE'ed. This is a helper function for tryReassociateGEP. // // \p IndexedType The element type indexed by GEP's I-th index. This is // equivalent to // GEP->getIndexedType(GEP->getPointerOperand(), 0-th index, // ..., i-th index). GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I, Type *IndexedType); // Given GEP's I-th index = LHS + RHS, see whether &Base[..][LHS][..] or // &Base[..][RHS][..] can be CSE'ed and rewrite GEP accordingly. GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I, Value *LHS, Value *RHS, Type *IndexedType); // Reassociate Add for better CSE. Instruction *tryReassociateAdd(BinaryOperator *I); // A helper function for tryReassociateAdd. LHS and RHS are explicitly passed. Instruction *tryReassociateAdd(Value *LHS, Value *RHS, Instruction *I); // Rewrites I to LHS + RHS if LHS is computed already. Instruction *tryReassociatedAdd(const SCEV *LHS, Value *RHS, Instruction *I); // Returns the closest dominator of \c Dominatee that computes // \c CandidateExpr. Returns null if not found. Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr, Instruction *Dominatee); // GetElementPtrInst implicitly sign-extends an index if the index is shorter // than the pointer size. This function returns whether Index is shorter than // GEP's pointer size, i.e., whether Index needs to be sign-extended in order // to be an index of GEP. bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP); // Returns whether V is known to be non-negative at context \c Ctxt. bool isKnownNonNegative(Value *V, Instruction *Ctxt); // Returns whether AO may sign overflow at context \c Ctxt. It computes a // conservative result -- it answers true when not sure. bool maySignOverflow(AddOperator *AO, Instruction *Ctxt); AssumptionCache *AC; const DataLayout *DL; DominatorTree *DT; ScalarEvolution *SE; TargetLibraryInfo *TLI; TargetTransformInfo *TTI; // A lookup table quickly telling which instructions compute the given SCEV. // Note that there can be multiple instructions at different locations // computing to the same SCEV, so we map a SCEV to an instruction list. For // example, // // if (p1) // foo(a + b); // if (p2) // bar(a + b); DenseMap<const SCEV *, SmallVector<Instruction *, 2>> SeenExprs; }; } // anonymous namespace char NaryReassociate::ID = 0; INITIALIZE_PASS_BEGIN(NaryReassociate, "nary-reassociate", "Nary reassociation", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(NaryReassociate, "nary-reassociate", "Nary reassociation", false, false) FunctionPass *llvm::createNaryReassociatePass() { return new NaryReassociate(); } bool NaryReassociate::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); SE = &getAnalysis<ScalarEvolution>(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); bool Changed = false, ChangedInThisIteration; do { ChangedInThisIteration = doOneIteration(F); Changed |= ChangedInThisIteration; } while (ChangedInThisIteration); return Changed; } // Whitelist the instruction types NaryReassociate handles for now. static bool isPotentiallyNaryReassociable(Instruction *I) { switch (I->getOpcode()) { case Instruction::Add: case Instruction::GetElementPtr: return true; default: return false; } } bool NaryReassociate::doOneIteration(Function &F) { bool Changed = false; SeenExprs.clear(); // Process the basic blocks in pre-order of the dominator tree. This order // ensures that all bases of a candidate are in Candidates when we process it. for (auto Node = GraphTraits<DominatorTree *>::nodes_begin(DT); Node != GraphTraits<DominatorTree *>::nodes_end(DT); ++Node) { BasicBlock *BB = Node->getBlock(); for (auto I = BB->begin(); I != BB->end(); ++I) { if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(I)) { const SCEV *OldSCEV = SE->getSCEV(I); if (Instruction *NewI = tryReassociate(I)) { Changed = true; SE->forgetValue(I); I->replaceAllUsesWith(NewI); RecursivelyDeleteTriviallyDeadInstructions(I, TLI); I = NewI; } // Add the rewritten instruction to SeenExprs; the original instruction // is deleted. const SCEV *NewSCEV = SE->getSCEV(I); SeenExprs[NewSCEV].push_back(I); // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I) // is equivalent to I. However, ScalarEvolution::getSCEV may // weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose // we reassociate // I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4 // to // NewI = &a[sext(i)] + sext(j). // // ScalarEvolution computes // getSCEV(I) = a + 4 * sext(i + j) // getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j) // which are different SCEVs. // // To alleviate this issue of ScalarEvolution not always capturing // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can // map both SCEV before and after tryReassociate(I) to I. // // This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll. if (NewSCEV != OldSCEV) SeenExprs[OldSCEV].push_back(I); } } } return Changed; } Instruction *NaryReassociate::tryReassociate(Instruction *I) { switch (I->getOpcode()) { case Instruction::Add: return tryReassociateAdd(cast<BinaryOperator>(I)); case Instruction::GetElementPtr: return tryReassociateGEP(cast<GetElementPtrInst>(I)); default: llvm_unreachable("should be filtered out by isPotentiallyNaryReassociable"); } } // FIXME: extract this method into TTI->getGEPCost. static bool isGEPFoldable(GetElementPtrInst *GEP, const TargetTransformInfo *TTI, const DataLayout *DL) { GlobalVariable *BaseGV = nullptr; int64_t BaseOffset = 0; bool HasBaseReg = false; int64_t Scale = 0; if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getPointerOperand())) BaseGV = GV; else HasBaseReg = true; gep_type_iterator GTI = gep_type_begin(GEP); for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I, ++GTI) { if (isa<SequentialType>(*GTI)) { int64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); if (ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I)) { BaseOffset += ConstIdx->getSExtValue() * ElementSize; } else { // Needs scale register. if (Scale != 0) { // No addressing mode takes two scale registers. return false; } Scale = ElementSize; } } else { StructType *STy = cast<StructType>(*GTI); uint64_t Field = cast<ConstantInt>(*I)->getZExtValue(); BaseOffset += DL->getStructLayout(STy)->getElementOffset(Field); } } unsigned AddrSpace = GEP->getPointerAddressSpace(); return TTI->isLegalAddressingMode(GEP->getType()->getElementType(), BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace); } Instruction *NaryReassociate::tryReassociateGEP(GetElementPtrInst *GEP) { // Not worth reassociating GEP if it is foldable. if (isGEPFoldable(GEP, TTI, DL)) return nullptr; gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { if (isa<SequentialType>(*GTI++)) { if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) { return NewGEP; } } } return nullptr; } bool NaryReassociate::requiresSignExtension(Value *Index, GetElementPtrInst *GEP) { unsigned PointerSizeInBits = DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace()); return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits; } bool NaryReassociate::isKnownNonNegative(Value *V, Instruction *Ctxt) { bool NonNegative, Negative; // TODO: ComputeSignBits is expensive. Consider caching the results. ComputeSignBit(V, NonNegative, Negative, *DL, 0, AC, Ctxt, DT); return NonNegative; } bool NaryReassociate::maySignOverflow(AddOperator *AO, Instruction *Ctxt) { if (AO->hasNoSignedWrap()) return false; Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1); // If LHS or RHS has the same sign as the sum, AO doesn't sign overflow. // TODO: handle the negative case as well. if (isKnownNonNegative(AO, Ctxt) && (isKnownNonNegative(LHS, Ctxt) || isKnownNonNegative(RHS, Ctxt))) return false; return true; } GetElementPtrInst * NaryReassociate::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I, Type *IndexedType) { Value *IndexToSplit = GEP->getOperand(I + 1); if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) { IndexToSplit = SExt->getOperand(0); } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) { // zext can be treated as sext if the source is non-negative. if (isKnownNonNegative(ZExt->getOperand(0), GEP)) IndexToSplit = ZExt->getOperand(0); } if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) { // If the I-th index needs sext and the underlying add is not equipped with // nsw, we cannot split the add because // sext(LHS + RHS) != sext(LHS) + sext(RHS). if (requiresSignExtension(IndexToSplit, GEP) && maySignOverflow(AO, GEP)) return nullptr; Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1); // IndexToSplit = LHS + RHS. if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType)) return NewGEP; // Symmetrically, try IndexToSplit = RHS + LHS. if (LHS != RHS) { if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType)) return NewGEP; } } return nullptr; } GetElementPtrInst *NaryReassociate::tryReassociateGEPAtIndex( GetElementPtrInst *GEP, unsigned I, Value *LHS, Value *RHS, Type *IndexedType) { // Look for GEP's closest dominator that has the same SCEV as GEP except that // the I-th index is replaced with LHS. SmallVector<const SCEV *, 4> IndexExprs; for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) IndexExprs.push_back(SE->getSCEV(*Index)); // Replace the I-th index with LHS. IndexExprs[I] = SE->getSCEV(LHS); if (isKnownNonNegative(LHS, GEP) && DL->getTypeSizeInBits(LHS->getType()) < DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) { // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to // zext if the source operand is proved non-negative. We should do that // consistently so that CandidateExpr more likely appears before. See // @reassociate_gep_assume for an example of this canonicalization. IndexExprs[I] = SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType()); } const SCEV *CandidateExpr = SE->getGEPExpr( GEP->getSourceElementType(), SE->getSCEV(GEP->getPointerOperand()), IndexExprs, GEP->isInBounds()); auto *Candidate = findClosestMatchingDominator(CandidateExpr, GEP); if (Candidate == nullptr) return nullptr; PointerType *TypeOfCandidate = dyn_cast<PointerType>(Candidate->getType()); // Pretty rare but theoretically possible when a numeric value happens to // share CandidateExpr. if (TypeOfCandidate == nullptr) return nullptr; // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType) uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType); Type *ElementType = TypeOfCandidate->getElementType(); uint64_t ElementSize = DL->getTypeAllocSize(ElementType); // Another less rare case: because I is not necessarily the last index of the // GEP, the size of the type at the I-th index (IndexedSize) is not // necessarily divisible by ElementSize. For example, // // #pragma pack(1) // struct S { // int a[3]; // int64 b[8]; // }; // #pragma pack() // // sizeof(S) = 100 is indivisible by sizeof(int64) = 8. // // TODO: bail out on this case for now. We could emit uglygep. if (IndexedSize % ElementSize != 0) return nullptr; // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0]))); IRBuilder<> Builder(GEP); Type *IntPtrTy = DL->getIntPtrType(TypeOfCandidate); if (RHS->getType() != IntPtrTy) RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy); if (IndexedSize != ElementSize) { RHS = Builder.CreateMul( RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize)); } GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Builder.CreateGEP(Candidate, RHS)); NewGEP->setIsInBounds(GEP->isInBounds()); NewGEP->takeName(GEP); return NewGEP; } Instruction *NaryReassociate::tryReassociateAdd(BinaryOperator *I) { Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); if (auto *NewI = tryReassociateAdd(LHS, RHS, I)) return NewI; if (auto *NewI = tryReassociateAdd(RHS, LHS, I)) return NewI; return nullptr; } Instruction *NaryReassociate::tryReassociateAdd(Value *LHS, Value *RHS, Instruction *I) { Value *A = nullptr, *B = nullptr; // To be conservative, we reassociate I only when it is the only user of A+B. if (LHS->hasOneUse() && match(LHS, m_Add(m_Value(A), m_Value(B)))) { // I = (A + B) + RHS // = (A + RHS) + B or (B + RHS) + A const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B); const SCEV *RHSExpr = SE->getSCEV(RHS); if (BExpr != RHSExpr) { if (auto *NewI = tryReassociatedAdd(SE->getAddExpr(AExpr, RHSExpr), B, I)) return NewI; } if (AExpr != RHSExpr) { if (auto *NewI = tryReassociatedAdd(SE->getAddExpr(BExpr, RHSExpr), A, I)) return NewI; } } return nullptr; } Instruction *NaryReassociate::tryReassociatedAdd(const SCEV *LHSExpr, Value *RHS, Instruction *I) { auto Pos = SeenExprs.find(LHSExpr); // Bail out if LHSExpr is not previously seen. if (Pos == SeenExprs.end()) return nullptr; // Look for the closest dominator LHS of I that computes LHSExpr, and replace // I with LHS + RHS. auto *LHS = findClosestMatchingDominator(LHSExpr, I); if (LHS == nullptr) return nullptr; Instruction *NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I); NewI->takeName(I); return NewI; } Instruction * NaryReassociate::findClosestMatchingDominator(const SCEV *CandidateExpr, Instruction *Dominatee) { auto Pos = SeenExprs.find(CandidateExpr); if (Pos == SeenExprs.end()) return nullptr; auto &Candidates = Pos->second; // Because we process the basic blocks in pre-order of the dominator tree, a // candidate that doesn't dominate the current instruction won't dominate any // future instruction either. Therefore, we pop it out of the stack. This // optimization makes the algorithm O(n). while (!Candidates.empty()) { Instruction *Candidate = Candidates.back(); if (DT->dominates(Candidate, Dominatee)) return Candidate; Candidates.pop_back(); } return nullptr; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopUnswitch.cpp
//===-- LoopUnswitch.cpp - Hoist loop-invariant conditionals in loop ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass transforms loops that contain branches on loop-invariant conditions // to have multiple loops. For example, it turns the left into the right code: // // for (...) if (lic) // A for (...) // if (lic) A; B; C // B else // C for (...) // A; C // // This can increase the size of the code exponentially (doubling it every time // a loop is unswitched) so we only unswitch if the resultant code will be // smaller than a threshold. // // This pass expects LICM to be run before it to hoist invariant conditions out // of the loop, to make the unswitching opportunity obvious. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CodeMetrics.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/MDBuilder.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #include <algorithm> #include <map> #include <set> using namespace llvm; #define DEBUG_TYPE "loop-unswitch" STATISTIC(NumBranches, "Number of branches unswitched"); STATISTIC(NumSwitches, "Number of switches unswitched"); STATISTIC(NumSelects , "Number of selects unswitched"); STATISTIC(NumTrivial , "Number of unswitches that are trivial"); STATISTIC(NumSimplify, "Number of simplifications of unswitched code"); STATISTIC(TotalInsts, "Total number of instructions analyzed"); // The specific value of 100 here was chosen based only on intuition and a // few specific examples. #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned> Threshold("loop-unswitch-threshold", cl::desc("Max loop size to unswitch"), cl::init(100), cl::Hidden); #else static const unsigned Threshold = 100; #endif namespace { class LUAnalysisCache { typedef DenseMap<const SwitchInst*, SmallPtrSet<const Value *, 8> > UnswitchedValsMap; typedef UnswitchedValsMap::iterator UnswitchedValsIt; struct LoopProperties { unsigned CanBeUnswitchedCount; unsigned WasUnswitchedCount; unsigned SizeEstimation; UnswitchedValsMap UnswitchedVals; }; // Here we use std::map instead of DenseMap, since we need to keep valid // LoopProperties pointer for current loop for better performance. typedef std::map<const Loop*, LoopProperties> LoopPropsMap; typedef LoopPropsMap::iterator LoopPropsMapIt; LoopPropsMap LoopsProperties; UnswitchedValsMap *CurLoopInstructions; LoopProperties *CurrentLoopProperties; // A loop unswitching with an estimated cost above this threshold // is not performed. MaxSize is turned into unswitching quota for // the current loop, and reduced correspondingly, though note that // the quota is returned by releaseMemory() when the loop has been // processed, so that MaxSize will return to its previous // value. So in most cases MaxSize will equal the Threshold flag // when a new loop is processed. An exception to that is that // MaxSize will have a smaller value while processing nested loops // that were introduced due to loop unswitching of an outer loop. // // FIXME: The way that MaxSize works is subtle and depends on the // pass manager processing loops and calling releaseMemory() in a // specific order. It would be good to find a more straightforward // way of doing what MaxSize does. unsigned MaxSize; public: LUAnalysisCache() : CurLoopInstructions(nullptr), CurrentLoopProperties(nullptr), MaxSize(Threshold) {} // Analyze loop. Check its size, calculate is it possible to unswitch // it. Returns true if we can unswitch this loop. bool countLoop(const Loop *L, const TargetTransformInfo &TTI, AssumptionCache *AC); // Clean all data related to given loop. void forgetLoop(const Loop *L); // Mark case value as unswitched. // Since SI instruction can be partly unswitched, in order to avoid // extra unswitching in cloned loops keep track all unswitched values. void setUnswitched(const SwitchInst *SI, const Value *V); // Check was this case value unswitched before or not. bool isUnswitched(const SwitchInst *SI, const Value *V); // Returns true if another unswitching could be done within the cost // threshold. bool CostAllowsUnswitching(); // Clone all loop-unswitch related loop properties. // Redistribute unswitching quotas. // Note, that new loop data is stored inside the VMap. void cloneData(const Loop *NewLoop, const Loop *OldLoop, const ValueToValueMapTy &VMap); }; class LoopUnswitch : public LoopPass { LoopInfo *LI; // Loop information LPPassManager *LPM; AssumptionCache *AC; // LoopProcessWorklist - Used to check if second loop needs processing // after RewriteLoopBodyWithConditionConstant rewrites first loop. std::vector<Loop*> LoopProcessWorklist; LUAnalysisCache BranchesInfo; bool OptimizeForSize; bool redoLoop; Loop *currentLoop; DominatorTree *DT; BasicBlock *loopHeader; BasicBlock *loopPreheader; // LoopBlocks contains all of the basic blocks of the loop, including the // preheader of the loop, the body of the loop, and the exit blocks of the // loop, in that order. std::vector<BasicBlock*> LoopBlocks; // NewBlocks contained cloned copy of basic blocks from LoopBlocks. std::vector<BasicBlock*> NewBlocks; public: static char ID; // Pass ID, replacement for typeid explicit LoopUnswitch(bool Os = false) : LoopPass(ID), OptimizeForSize(Os), redoLoop(false), currentLoop(nullptr), DT(nullptr), loopHeader(nullptr), loopPreheader(nullptr) { initializeLoopUnswitchPass(*PassRegistry::getPassRegistry()); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; bool processCurrentLoop(); /// This transformation requires natural loop information & requires that /// loop preheaders be inserted into the CFG. /// void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequiredID(LoopSimplifyID); AU.addPreservedID(LoopSimplifyID); AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<ScalarEvolution>(); AU.addRequired<TargetTransformInfoWrapperPass>(); } private: void releaseMemory() override { BranchesInfo.forgetLoop(currentLoop); } void initLoopData() { loopHeader = currentLoop->getHeader(); loopPreheader = currentLoop->getLoopPreheader(); } /// Split all of the edges from inside the loop to their exit blocks. /// Update the appropriate Phi nodes as we do so. void SplitExitEdges(Loop *L, const SmallVectorImpl<BasicBlock *> &ExitBlocks); bool UnswitchIfProfitable(Value *LoopCond, Constant *Val, TerminatorInst *TI = nullptr); void UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val, BasicBlock *ExitBlock, TerminatorInst *TI); void UnswitchNontrivialCondition(Value *LIC, Constant *OnVal, Loop *L, TerminatorInst *TI); void RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, Constant *Val, bool isEqual); void EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val, BasicBlock *TrueDest, BasicBlock *FalseDest, Instruction *InsertPt, TerminatorInst *TI); void SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L); bool IsTrivialUnswitchCondition(Value *Cond, Constant **Val = nullptr, BasicBlock **LoopExit = nullptr); }; } // Analyze loop. Check its size, calculate is it possible to unswitch // it. Returns true if we can unswitch this loop. bool LUAnalysisCache::countLoop(const Loop *L, const TargetTransformInfo &TTI, AssumptionCache *AC) { LoopPropsMapIt PropsIt; bool Inserted; std::tie(PropsIt, Inserted) = LoopsProperties.insert(std::make_pair(L, LoopProperties())); LoopProperties &Props = PropsIt->second; if (Inserted) { // New loop. // Limit the number of instructions to avoid causing significant code // expansion, and the number of basic blocks, to avoid loops with // large numbers of branches which cause loop unswitching to go crazy. // This is a very ad-hoc heuristic. SmallPtrSet<const Value *, 32> EphValues; CodeMetrics::collectEphemeralValues(L, AC, EphValues); // FIXME: This is overly conservative because it does not take into // consideration code simplification opportunities and code that can // be shared by the resultant unswitched loops. CodeMetrics Metrics; for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E; ++I) Metrics.analyzeBasicBlock(*I, TTI, EphValues); Props.SizeEstimation = Metrics.NumInsts; Props.CanBeUnswitchedCount = MaxSize / (Props.SizeEstimation); Props.WasUnswitchedCount = 0; MaxSize -= Props.SizeEstimation * Props.CanBeUnswitchedCount; if (Metrics.notDuplicatable) { DEBUG(dbgs() << "NOT unswitching loop %" << L->getHeader()->getName() << ", contents cannot be " << "duplicated!\n"); return false; } } // Be careful. This links are good only before new loop addition. CurrentLoopProperties = &Props; CurLoopInstructions = &Props.UnswitchedVals; return true; } // Clean all data related to given loop. void LUAnalysisCache::forgetLoop(const Loop *L) { LoopPropsMapIt LIt = LoopsProperties.find(L); if (LIt != LoopsProperties.end()) { LoopProperties &Props = LIt->second; MaxSize += (Props.CanBeUnswitchedCount + Props.WasUnswitchedCount) * Props.SizeEstimation; LoopsProperties.erase(LIt); } CurrentLoopProperties = nullptr; CurLoopInstructions = nullptr; } // Mark case value as unswitched. // Since SI instruction can be partly unswitched, in order to avoid // extra unswitching in cloned loops keep track all unswitched values. void LUAnalysisCache::setUnswitched(const SwitchInst *SI, const Value *V) { (*CurLoopInstructions)[SI].insert(V); } // Check was this case value unswitched before or not. bool LUAnalysisCache::isUnswitched(const SwitchInst *SI, const Value *V) { return (*CurLoopInstructions)[SI].count(V); } bool LUAnalysisCache::CostAllowsUnswitching() { return CurrentLoopProperties->CanBeUnswitchedCount > 0; } // Clone all loop-unswitch related loop properties. // Redistribute unswitching quotas. // Note, that new loop data is stored inside the VMap. void LUAnalysisCache::cloneData(const Loop *NewLoop, const Loop *OldLoop, const ValueToValueMapTy &VMap) { LoopProperties &NewLoopProps = LoopsProperties[NewLoop]; LoopProperties &OldLoopProps = *CurrentLoopProperties; UnswitchedValsMap &Insts = OldLoopProps.UnswitchedVals; // Reallocate "can-be-unswitched quota" --OldLoopProps.CanBeUnswitchedCount; ++OldLoopProps.WasUnswitchedCount; NewLoopProps.WasUnswitchedCount = 0; unsigned Quota = OldLoopProps.CanBeUnswitchedCount; NewLoopProps.CanBeUnswitchedCount = Quota / 2; OldLoopProps.CanBeUnswitchedCount = Quota - Quota / 2; NewLoopProps.SizeEstimation = OldLoopProps.SizeEstimation; // Clone unswitched values info: // for new loop switches we clone info about values that was // already unswitched and has redundant successors. for (UnswitchedValsIt I = Insts.begin(); I != Insts.end(); ++I) { const SwitchInst *OldInst = I->first; Value *NewI = VMap.lookup(OldInst); const SwitchInst *NewInst = cast_or_null<SwitchInst>(NewI); assert(NewInst && "All instructions that are in SrcBB must be in VMap."); NewLoopProps.UnswitchedVals[NewInst] = OldLoopProps.UnswitchedVals[OldInst]; } } char LoopUnswitch::ID = 0; INITIALIZE_PASS_BEGIN(LoopUnswitch, "loop-unswitch", "Unswitch loops", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(LoopUnswitch, "loop-unswitch", "Unswitch loops", false, false) Pass *llvm::createLoopUnswitchPass(bool Os) { return new LoopUnswitch(Os); } /// FindLIVLoopCondition - Cond is a condition that occurs in L. If it is /// invariant in the loop, or has an invariant piece, return the invariant. /// Otherwise, return null. static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) { // We started analyze new instruction, increment scanned instructions counter. ++TotalInsts; // We can never unswitch on vector conditions. if (Cond->getType()->isVectorTy()) return nullptr; // Constants should be folded, not unswitched on! if (isa<Constant>(Cond)) return nullptr; // TODO: Handle: br (VARIANT|INVARIANT). // Hoist simple values out. if (L->makeLoopInvariant(Cond, Changed)) return Cond; if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Cond)) if (BO->getOpcode() == Instruction::And || BO->getOpcode() == Instruction::Or) { // If either the left or right side is invariant, we can unswitch on this, // which will cause the branch to go away in one loop and the condition to // simplify in the other one. if (Value *LHS = FindLIVLoopCondition(BO->getOperand(0), L, Changed)) return LHS; if (Value *RHS = FindLIVLoopCondition(BO->getOperand(1), L, Changed)) return RHS; } return nullptr; } bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) { if (skipOptnoneFunction(L)) return false; AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache( *L->getHeader()->getParent()); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); LPM = &LPM_Ref; DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DT = DTWP ? &DTWP->getDomTree() : nullptr; currentLoop = L; Function *F = currentLoop->getHeader()->getParent(); bool Changed = false; do { assert(currentLoop->isLCSSAForm(*DT)); redoLoop = false; Changed |= processCurrentLoop(); } while(redoLoop); if (Changed) { // FIXME: Reconstruct dom info, because it is not preserved properly. if (DT) DT->recalculate(*F); } return Changed; } /// processCurrentLoop - Do actual work and unswitch loop if possible /// and profitable. bool LoopUnswitch::processCurrentLoop() { bool Changed = false; initLoopData(); // If LoopSimplify was unable to form a preheader, don't do any unswitching. if (!loopPreheader) return false; // Loops with indirectbr cannot be cloned. if (!currentLoop->isSafeToClone()) return false; // Without dedicated exits, splitting the exit edge may fail. if (!currentLoop->hasDedicatedExits()) return false; LLVMContext &Context = loopHeader->getContext(); // Probably we reach the quota of branches for this loop. If so // stop unswitching. if (!BranchesInfo.countLoop( currentLoop, getAnalysis<TargetTransformInfoWrapperPass>().getTTI( *currentLoop->getHeader()->getParent()), AC)) return false; // Loop over all of the basic blocks in the loop. If we find an interior // block that is branching on a loop-invariant condition, we can unswitch this // loop. for (Loop::block_iterator I = currentLoop->block_begin(), E = currentLoop->block_end(); I != E; ++I) { TerminatorInst *TI = (*I)->getTerminator(); if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { // If this isn't branching on an invariant condition, we can't unswitch // it. if (BI->isConditional()) { // See if this, or some part of it, is loop invariant. If so, we can // unswitch on it if we desire. Value *LoopCond = FindLIVLoopCondition(BI->getCondition(), currentLoop, Changed); if (LoopCond && UnswitchIfProfitable(LoopCond, ConstantInt::getTrue(Context), TI)) { ++NumBranches; return true; } } } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { Value *LoopCond = FindLIVLoopCondition(SI->getCondition(), currentLoop, Changed); unsigned NumCases = SI->getNumCases(); if (LoopCond && NumCases) { // Find a value to unswitch on: // FIXME: this should chose the most expensive case! // FIXME: scan for a case with a non-critical edge? Constant *UnswitchVal = nullptr; // Do not process same value again and again. // At this point we have some cases already unswitched and // some not yet unswitched. Let's find the first not yet unswitched one. for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) { Constant *UnswitchValCandidate = i.getCaseValue(); if (!BranchesInfo.isUnswitched(SI, UnswitchValCandidate)) { UnswitchVal = UnswitchValCandidate; break; } } if (!UnswitchVal) continue; if (UnswitchIfProfitable(LoopCond, UnswitchVal)) { ++NumSwitches; return true; } } } // Scan the instructions to check for unswitchable values. for (BasicBlock::iterator BBI = (*I)->begin(), E = (*I)->end(); BBI != E; ++BBI) if (SelectInst *SI = dyn_cast<SelectInst>(BBI)) { Value *LoopCond = FindLIVLoopCondition(SI->getCondition(), currentLoop, Changed); if (LoopCond && UnswitchIfProfitable(LoopCond, ConstantInt::getTrue(Context))) { ++NumSelects; return true; } } } return Changed; } /// isTrivialLoopExitBlock - Check to see if all paths from BB exit the /// loop with no side effects (including infinite loops). /// /// If true, we return true and set ExitBB to the block we /// exit through. /// static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB, BasicBlock *&ExitBB, std::set<BasicBlock*> &Visited) { if (!Visited.insert(BB).second) { // Already visited. Without more analysis, this could indicate an infinite // loop. return false; } if (!L->contains(BB)) { // Otherwise, this is a loop exit, this is fine so long as this is the // first exit. if (ExitBB) return false; ExitBB = BB; return true; } // Otherwise, this is an unvisited intra-loop node. Check all successors. for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) { // Check to see if the successor is a trivial loop exit. if (!isTrivialLoopExitBlockHelper(L, *SI, ExitBB, Visited)) return false; } // Okay, everything after this looks good, check to make sure that this block // doesn't include any side effects. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) if (I->mayHaveSideEffects()) return false; return true; } /// isTrivialLoopExitBlock - Return true if the specified block unconditionally /// leads to an exit from the specified loop, and has no side-effects in the /// process. If so, return the block that is exited to, otherwise return null. static BasicBlock *isTrivialLoopExitBlock(Loop *L, BasicBlock *BB) { std::set<BasicBlock*> Visited; Visited.insert(L->getHeader()); // Branches to header make infinite loops. BasicBlock *ExitBB = nullptr; if (isTrivialLoopExitBlockHelper(L, BB, ExitBB, Visited)) return ExitBB; return nullptr; } /// IsTrivialUnswitchCondition - Check to see if this unswitch condition is /// trivial: that is, that the condition controls whether or not the loop does /// anything at all. If this is a trivial condition, unswitching produces no /// code duplications (equivalently, it produces a simpler loop and a new empty /// loop, which gets deleted). /// /// If this is a trivial condition, return true, otherwise return false. When /// returning true, this sets Cond and Val to the condition that controls the /// trivial condition: when Cond dynamically equals Val, the loop is known to /// exit. Finally, this sets LoopExit to the BB that the loop exits to when /// Cond == Val. /// bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val, BasicBlock **LoopExit) { BasicBlock *Header = currentLoop->getHeader(); TerminatorInst *HeaderTerm = Header->getTerminator(); LLVMContext &Context = Header->getContext(); BasicBlock *LoopExitBB = nullptr; if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) { // If the header block doesn't end with a conditional branch on Cond, we // can't handle it. if (!BI->isConditional() || BI->getCondition() != Cond) return false; // Check to see if a successor of the branch is guaranteed to // exit through a unique exit block without having any // side-effects. If so, determine the value of Cond that causes it to do // this. if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop, BI->getSuccessor(0)))) { if (Val) *Val = ConstantInt::getTrue(Context); } else if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop, BI->getSuccessor(1)))) { if (Val) *Val = ConstantInt::getFalse(Context); } } else if (SwitchInst *SI = dyn_cast<SwitchInst>(HeaderTerm)) { // If this isn't a switch on Cond, we can't handle it. if (SI->getCondition() != Cond) return false; // Check to see if a successor of the switch is guaranteed to go to the // latch block or exit through a one exit block without having any // side-effects. If so, determine the value of Cond that causes it to do // this. // Note that we can't trivially unswitch on the default case or // on already unswitched cases. for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) { BasicBlock *LoopExitCandidate; if ((LoopExitCandidate = isTrivialLoopExitBlock(currentLoop, i.getCaseSuccessor()))) { // Okay, we found a trivial case, remember the value that is trivial. ConstantInt *CaseVal = i.getCaseValue(); // Check that it was not unswitched before, since already unswitched // trivial vals are looks trivial too. if (BranchesInfo.isUnswitched(SI, CaseVal)) continue; LoopExitBB = LoopExitCandidate; if (Val) *Val = CaseVal; break; } } } // If we didn't find a single unique LoopExit block, or if the loop exit block // contains phi nodes, this isn't trivial. if (!LoopExitBB || isa<PHINode>(LoopExitBB->begin())) return false; // Can't handle this. if (LoopExit) *LoopExit = LoopExitBB; // We already know that nothing uses any scalar values defined inside of this // loop. As such, we just have to check to see if this loop will execute any // side-effecting instructions (e.g. stores, calls, volatile loads) in the // part of the loop that the code *would* execute. We already checked the // tail, check the header now. for (BasicBlock::iterator I = Header->begin(), E = Header->end(); I != E; ++I) if (I->mayHaveSideEffects()) return false; return true; } /// UnswitchIfProfitable - We have found that we can unswitch currentLoop when /// LoopCond == Val to simplify the loop. If we decide that this is profitable, /// unswitch the loop, reprocess the pieces, then return true. bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val, TerminatorInst *TI) { Function *F = loopHeader->getParent(); Constant *CondVal = nullptr; BasicBlock *ExitBlock = nullptr; if (IsTrivialUnswitchCondition(LoopCond, &CondVal, &ExitBlock)) { // If the condition is trivial, always unswitch. There is no code growth // for this case. UnswitchTrivialCondition(currentLoop, LoopCond, CondVal, ExitBlock, TI); return true; } // Check to see if it would be profitable to unswitch current loop. if (!BranchesInfo.CostAllowsUnswitching()) { DEBUG(dbgs() << "NOT unswitching loop %" << currentLoop->getHeader()->getName() << " at non-trivial condition '" << *Val << "' == " << *LoopCond << "\n" << ". Cost too high.\n"); return false; } // Do not do non-trivial unswitch while optimizing for size. if (OptimizeForSize || F->hasFnAttribute(Attribute::OptimizeForSize)) return false; UnswitchNontrivialCondition(LoopCond, Val, currentLoop, TI); return true; } /// CloneLoop - Recursively clone the specified loop and all of its children, /// mapping the blocks with the specified map. static Loop *CloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM, LoopInfo *LI, LPPassManager *LPM) { Loop *New = new Loop(); LPM->insertLoop(New, PL); // Add all of the blocks in L to the new loop. for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E; ++I) if (LI->getLoopFor(*I) == L) New->addBasicBlockToLoop(cast<BasicBlock>(VM[*I]), *LI); // Add all of the subloops to the new loop. for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) CloneLoop(*I, New, VM, LI, LPM); return New; } static void copyMetadata(Instruction *DstInst, const Instruction *SrcInst, bool Swapped) { if (!SrcInst || !SrcInst->hasMetadata()) return; SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; SrcInst->getAllMetadata(MDs); for (auto &MD : MDs) { switch (MD.first) { default: break; case LLVMContext::MD_prof: if (Swapped && MD.second->getNumOperands() == 3 && isa<MDString>(MD.second->getOperand(0))) { MDString *MDName = cast<MDString>(MD.second->getOperand(0)); if (MDName->getString() == "branch_weights") { auto *ValT = cast_or_null<ConstantAsMetadata>( MD.second->getOperand(1))->getValue(); auto *ValF = cast_or_null<ConstantAsMetadata>( MD.second->getOperand(2))->getValue(); assert(ValT && ValF && "Invalid Operands of branch_weights"); auto NewMD = MDBuilder(DstInst->getParent()->getContext()) .createBranchWeights(cast<ConstantInt>(ValF)->getZExtValue(), cast<ConstantInt>(ValT)->getZExtValue()); MD.second = NewMD; } } LLVM_FALLTHROUGH; // HLSL Change case LLVMContext::MD_dbg: DstInst->setMetadata(MD.first, MD.second); } } } /// EmitPreheaderBranchOnCondition - Emit a conditional branch on two values /// if LIC == Val, branch to TrueDst, otherwise branch to FalseDest. Insert the /// code immediately before InsertPt. void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val, BasicBlock *TrueDest, BasicBlock *FalseDest, Instruction *InsertPt, TerminatorInst *TI) { // Insert a conditional branch on LIC to the two preheaders. The original // code is the true version and the new code is the false version. Value *BranchVal = LIC; bool Swapped = false; if (!isa<ConstantInt>(Val) || Val->getType() != Type::getInt1Ty(LIC->getContext())) BranchVal = new ICmpInst(InsertPt, ICmpInst::ICMP_EQ, LIC, Val); else if (Val != ConstantInt::getTrue(Val->getContext())) { // We want to enter the new loop when the condition is true. std::swap(TrueDest, FalseDest); Swapped = true; } // Insert the new branch. BranchInst *BI = BranchInst::Create(TrueDest, FalseDest, BranchVal, InsertPt); copyMetadata(BI, TI, Swapped); // If either edge is critical, split it. This helps preserve LoopSimplify // form for enclosing loops. auto Options = CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA(); SplitCriticalEdge(BI, 0, Options); SplitCriticalEdge(BI, 1, Options); } /// UnswitchTrivialCondition - Given a loop that has a trivial unswitchable /// condition in it (a cond branch from its header block to its latch block, /// where the path through the loop that doesn't execute its body has no /// side-effects), unswitch it. This doesn't involve any code duplication, just /// moving the conditional branch outside of the loop and updating loop info. void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val, BasicBlock *ExitBlock, TerminatorInst *TI) { DEBUG(dbgs() << "loop-unswitch: Trivial-Unswitch loop %" << loopHeader->getName() << " [" << L->getBlocks().size() << " blocks] in Function " << L->getHeader()->getParent()->getName() << " on cond: " << *Val << " == " << *Cond << "\n"); // First step, split the preheader, so that we know that there is a safe place // to insert the conditional branch. We will change loopPreheader to have a // conditional branch on Cond. BasicBlock *NewPH = SplitEdge(loopPreheader, loopHeader, DT, LI); // Now that we have a place to insert the conditional branch, create a place // to branch to: this is the exit block out of the loop that we should // short-circuit to. // Split this block now, so that the loop maintains its exit block, and so // that the jump from the preheader can execute the contents of the exit block // without actually branching to it (the exit block should be dominated by the // loop header, not the preheader). assert(!L->contains(ExitBlock) && "Exit block is in the loop?"); BasicBlock *NewExit = SplitBlock(ExitBlock, ExitBlock->begin(), DT, LI); // Okay, now we have a position to branch from and a position to branch to, // insert the new conditional branch. EmitPreheaderBranchOnCondition(Cond, Val, NewExit, NewPH, loopPreheader->getTerminator(), TI); LPM->deleteSimpleAnalysisValue(loopPreheader->getTerminator(), L); loopPreheader->getTerminator()->eraseFromParent(); // We need to reprocess this loop, it could be unswitched again. redoLoop = true; // Now that we know that the loop is never entered when this condition is a // particular value, rewrite the loop with this info. We know that this will // at least eliminate the old branch. RewriteLoopBodyWithConditionConstant(L, Cond, Val, false); ++NumTrivial; } /// SplitExitEdges - Split all of the edges from inside the loop to their exit /// blocks. Update the appropriate Phi nodes as we do so. void LoopUnswitch::SplitExitEdges(Loop *L, const SmallVectorImpl<BasicBlock *> &ExitBlocks){ for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { BasicBlock *ExitBlock = ExitBlocks[i]; SmallVector<BasicBlock *, 4> Preds(pred_begin(ExitBlock), pred_end(ExitBlock)); // Although SplitBlockPredecessors doesn't preserve loop-simplify in // general, if we call it on all predecessors of all exits then it does. SplitBlockPredecessors(ExitBlock, Preds, ".us-lcssa", /*AliasAnalysis*/ nullptr, DT, LI, /*PreserveLCSSA*/ true); } } /// UnswitchNontrivialCondition - We determined that the loop is profitable /// to unswitch when LIC equal Val. Split it into loop versions and test the /// condition outside of either loop. Return the loops created as Out1/Out2. void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, Loop *L, TerminatorInst *TI) { Function *F = loopHeader->getParent(); DEBUG(dbgs() << "loop-unswitch: Unswitching loop %" << loopHeader->getName() << " [" << L->getBlocks().size() << " blocks] in Function " << F->getName() << " when '" << *Val << "' == " << *LIC << "\n"); if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>()) SE->forgetLoop(L); LoopBlocks.clear(); NewBlocks.clear(); // First step, split the preheader and exit blocks, and add these blocks to // the LoopBlocks list. BasicBlock *NewPreheader = SplitEdge(loopPreheader, loopHeader, DT, LI); LoopBlocks.push_back(NewPreheader); // We want the loop to come after the preheader, but before the exit blocks. LoopBlocks.insert(LoopBlocks.end(), L->block_begin(), L->block_end()); SmallVector<BasicBlock*, 8> ExitBlocks; L->getUniqueExitBlocks(ExitBlocks); // Split all of the edges from inside the loop to their exit blocks. Update // the appropriate Phi nodes as we do so. SplitExitEdges(L, ExitBlocks); // The exit blocks may have been changed due to edge splitting, recompute. ExitBlocks.clear(); L->getUniqueExitBlocks(ExitBlocks); // Add exit blocks to the loop blocks. LoopBlocks.insert(LoopBlocks.end(), ExitBlocks.begin(), ExitBlocks.end()); // Next step, clone all of the basic blocks that make up the loop (including // the loop preheader and exit blocks), keeping track of the mapping between // the instructions and blocks. NewBlocks.reserve(LoopBlocks.size()); ValueToValueMapTy VMap; for (unsigned i = 0, e = LoopBlocks.size(); i != e; ++i) { BasicBlock *NewBB = CloneBasicBlock(LoopBlocks[i], VMap, ".us", F); NewBlocks.push_back(NewBB); VMap[LoopBlocks[i]] = NewBB; // Keep the BB mapping. LPM->cloneBasicBlockSimpleAnalysis(LoopBlocks[i], NewBB, L); } // Splice the newly inserted blocks into the function right before the // original preheader. F->getBasicBlockList().splice(NewPreheader, F->getBasicBlockList(), NewBlocks[0], F->end()); // FIXME: We could register any cloned assumptions instead of clearing the // whole function's cache. AC->clear(); // Now we create the new Loop object for the versioned loop. Loop *NewLoop = CloneLoop(L, L->getParentLoop(), VMap, LI, LPM); // Recalculate unswitching quota, inherit simplified switches info for NewBB, // Probably clone more loop-unswitch related loop properties. BranchesInfo.cloneData(NewLoop, L, VMap); Loop *ParentLoop = L->getParentLoop(); if (ParentLoop) { // Make sure to add the cloned preheader and exit blocks to the parent loop // as well. ParentLoop->addBasicBlockToLoop(NewBlocks[0], *LI); } for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { BasicBlock *NewExit = cast<BasicBlock>(VMap[ExitBlocks[i]]); // The new exit block should be in the same loop as the old one. if (Loop *ExitBBLoop = LI->getLoopFor(ExitBlocks[i])) ExitBBLoop->addBasicBlockToLoop(NewExit, *LI); assert(NewExit->getTerminator()->getNumSuccessors() == 1 && "Exit block should have been split to have one successor!"); BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0); // If the successor of the exit block had PHI nodes, add an entry for // NewExit. for (BasicBlock::iterator I = ExitSucc->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) { Value *V = PN->getIncomingValueForBlock(ExitBlocks[i]); ValueToValueMapTy::iterator It = VMap.find(V); if (It != VMap.end()) V = It->second; PN->addIncoming(V, NewExit); } if (LandingPadInst *LPad = NewExit->getLandingPadInst()) { PHINode *PN = PHINode::Create(LPad->getType(), 0, "", ExitSucc->getFirstInsertionPt()); for (pred_iterator I = pred_begin(ExitSucc), E = pred_end(ExitSucc); I != E; ++I) { BasicBlock *BB = *I; LandingPadInst *LPI = BB->getLandingPadInst(); LPI->replaceAllUsesWith(PN); PN->addIncoming(LPI, BB); } } } // Rewrite the code to refer to itself. for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i) for (BasicBlock::iterator I = NewBlocks[i]->begin(), E = NewBlocks[i]->end(); I != E; ++I) RemapInstruction(I, VMap,RF_NoModuleLevelChanges|RF_IgnoreMissingEntries); // Rewrite the original preheader to select between versions of the loop. BranchInst *OldBR = cast<BranchInst>(loopPreheader->getTerminator()); assert(OldBR->isUnconditional() && OldBR->getSuccessor(0) == LoopBlocks[0] && "Preheader splitting did not work correctly!"); // Emit the new branch that selects between the two versions of this loop. EmitPreheaderBranchOnCondition(LIC, Val, NewBlocks[0], LoopBlocks[0], OldBR, TI); LPM->deleteSimpleAnalysisValue(OldBR, L); OldBR->eraseFromParent(); LoopProcessWorklist.push_back(NewLoop); redoLoop = true; // Keep a WeakTrackingVH holding onto LIC. If the first call to // RewriteLoopBody deletes the instruction (for example by simplifying a PHI // that feeds into the condition that we're unswitching on), we don't rewrite // the second iteration. WeakTrackingVH LICHandle(LIC); // Now we rewrite the original code to know that the condition is true and the // new code to know that the condition is false. RewriteLoopBodyWithConditionConstant(L, LIC, Val, false); // It's possible that simplifying one loop could cause the other to be // changed to another value or a constant. If its a constant, don't simplify // it. if (!LoopProcessWorklist.empty() && LoopProcessWorklist.back() == NewLoop && LICHandle && !isa<Constant>(LICHandle)) RewriteLoopBodyWithConditionConstant(NewLoop, LICHandle, Val, true); } /// RemoveFromWorklist - Remove all instances of I from the worklist vector /// specified. static void RemoveFromWorklist(Instruction *I, std::vector<Instruction*> &Worklist) { Worklist.erase(std::remove(Worklist.begin(), Worklist.end(), I), Worklist.end()); } /// ReplaceUsesOfWith - When we find that I really equals V, remove I from the /// program, replacing all uses with V and update the worklist. static void ReplaceUsesOfWith(Instruction *I, Value *V, std::vector<Instruction*> &Worklist, Loop *L, LPPassManager *LPM) { DEBUG(dbgs() << "Replace with '" << *V << "': " << *I); // Add uses to the worklist, which may be dead now. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) if (Instruction *Use = dyn_cast<Instruction>(I->getOperand(i))) Worklist.push_back(Use); // Add users to the worklist which may be simplified now. for (User *U : I->users()) Worklist.push_back(cast<Instruction>(U)); LPM->deleteSimpleAnalysisValue(I, L); RemoveFromWorklist(I, Worklist); I->replaceAllUsesWith(V); I->eraseFromParent(); ++NumSimplify; } // RewriteLoopBodyWithConditionConstant - We know either that the value LIC has // the value specified by Val in the specified loop, or we know it does NOT have // that value. Rewrite any uses of LIC or of properties correlated to it. void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, Constant *Val, bool IsEqual) { assert(!isa<Constant>(LIC) && "Why are we unswitching on a constant?"); // FIXME: Support correlated properties, like: // for (...) // if (li1 < li2) // ... // if (li1 > li2) // ... // FOLD boolean conditions (X|LIC), (X&LIC). Fold conditional branches, // selects, switches. std::vector<Instruction*> Worklist; LLVMContext &Context = Val->getContext(); // If we know that LIC == Val, or that LIC == NotVal, just replace uses of LIC // in the loop with the appropriate one directly. if (IsEqual || (isa<ConstantInt>(Val) && Val->getType()->isIntegerTy(1))) { Value *Replacement; if (IsEqual) Replacement = Val; else Replacement = ConstantInt::get(Type::getInt1Ty(Val->getContext()), !cast<ConstantInt>(Val)->getZExtValue()); for (User *U : LIC->users()) { Instruction *UI = dyn_cast<Instruction>(U); if (!UI || !L->contains(UI)) continue; Worklist.push_back(UI); } for (std::vector<Instruction*>::iterator UI = Worklist.begin(), UE = Worklist.end(); UI != UE; ++UI) (*UI)->replaceUsesOfWith(LIC, Replacement); SimplifyCode(Worklist, L); return; } // Otherwise, we don't know the precise value of LIC, but we do know that it // is certainly NOT "Val". As such, simplify any uses in the loop that we // can. This case occurs when we unswitch switch statements. for (User *U : LIC->users()) { Instruction *UI = dyn_cast<Instruction>(U); if (!UI || !L->contains(UI)) continue; Worklist.push_back(UI); // TODO: We could do other simplifications, for example, turning // 'icmp eq LIC, Val' -> false. // If we know that LIC is not Val, use this info to simplify code. SwitchInst *SI = dyn_cast<SwitchInst>(UI); if (!SI || !isa<ConstantInt>(Val)) continue; SwitchInst::CaseIt DeadCase = SI->findCaseValue(cast<ConstantInt>(Val)); // Default case is live for multiple values. if (DeadCase == SI->case_default()) continue; // Found a dead case value. Don't remove PHI nodes in the // successor if they become single-entry, those PHI nodes may // be in the Users list. BasicBlock *Switch = SI->getParent(); BasicBlock *SISucc = DeadCase.getCaseSuccessor(); BasicBlock *Latch = L->getLoopLatch(); BranchesInfo.setUnswitched(SI, Val); if (!SI->findCaseDest(SISucc)) continue; // Edge is critical. // If the DeadCase successor dominates the loop latch, then the // transformation isn't safe since it will delete the sole predecessor edge // to the latch. if (Latch && DT->dominates(SISucc, Latch)) continue; // FIXME: This is a hack. We need to keep the successor around // and hooked up so as to preserve the loop structure, because // trying to update it is complicated. So instead we preserve the // loop structure and put the block on a dead code path. SplitEdge(Switch, SISucc, DT, LI); // Compute the successors instead of relying on the return value // of SplitEdge, since it may have split the switch successor // after PHI nodes. BasicBlock *NewSISucc = DeadCase.getCaseSuccessor(); BasicBlock *OldSISucc = *succ_begin(NewSISucc); // Create an "unreachable" destination. BasicBlock *Abort = BasicBlock::Create(Context, "us-unreachable", Switch->getParent(), OldSISucc); new UnreachableInst(Context, Abort); // Force the new case destination to branch to the "unreachable" // block while maintaining a (dead) CFG edge to the old block. NewSISucc->getTerminator()->eraseFromParent(); BranchInst::Create(Abort, OldSISucc, ConstantInt::getTrue(Context), NewSISucc); // Release the PHI operands for this edge. for (BasicBlock::iterator II = NewSISucc->begin(); PHINode *PN = dyn_cast<PHINode>(II); ++II) PN->setIncomingValue(PN->getBasicBlockIndex(Switch), UndefValue::get(PN->getType())); // Tell the domtree about the new block. We don't fully update the // domtree here -- instead we force it to do a full recomputation // after the pass is complete -- but we do need to inform it of // new blocks. if (DT) DT->addNewBlock(Abort, NewSISucc); } SimplifyCode(Worklist, L); } /// SimplifyCode - Okay, now that we have simplified some instructions in the /// loop, walk over it and constant prop, dce, and fold control flow where /// possible. Note that this is effectively a very simple loop-structure-aware /// optimizer. During processing of this loop, L could very well be deleted, so /// it must not be used. /// /// FIXME: When the loop optimizer is more mature, separate this out to a new /// pass. /// void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) { const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); while (!Worklist.empty()) { Instruction *I = Worklist.back(); Worklist.pop_back(); // Simple DCE. if (isInstructionTriviallyDead(I)) { DEBUG(dbgs() << "Remove dead instruction '" << *I); // Add uses to the worklist, which may be dead now. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) if (Instruction *Use = dyn_cast<Instruction>(I->getOperand(i))) Worklist.push_back(Use); LPM->deleteSimpleAnalysisValue(I, L); RemoveFromWorklist(I, Worklist); I->eraseFromParent(); ++NumSimplify; continue; } // See if instruction simplification can hack this up. This is common for // things like "select false, X, Y" after unswitching made the condition be // 'false'. TODO: update the domtree properly so we can pass it here. if (Value *V = SimplifyInstruction(I, DL)) if (LI->replacementPreservesLCSSAForm(I, V)) { ReplaceUsesOfWith(I, V, Worklist, L, LPM); continue; } // Special case hacks that appear commonly in unswitched code. if (BranchInst *BI = dyn_cast<BranchInst>(I)) { if (BI->isUnconditional()) { // If BI's parent is the only pred of the successor, fold the two blocks // together. BasicBlock *Pred = BI->getParent(); BasicBlock *Succ = BI->getSuccessor(0); BasicBlock *SinglePred = Succ->getSinglePredecessor(); if (!SinglePred) continue; // Nothing to do. assert(SinglePred == Pred && "CFG broken"); DEBUG(dbgs() << "Merging blocks: " << Pred->getName() << " <- " << Succ->getName() << "\n"); // Resolve any single entry PHI nodes in Succ. while (PHINode *PN = dyn_cast<PHINode>(Succ->begin())) ReplaceUsesOfWith(PN, PN->getIncomingValue(0), Worklist, L, LPM); // If Succ has any successors with PHI nodes, update them to have // entries coming from Pred instead of Succ. Succ->replaceAllUsesWith(Pred); // Move all of the successor contents from Succ to Pred. Pred->getInstList().splice(BI, Succ->getInstList(), Succ->begin(), Succ->end()); LPM->deleteSimpleAnalysisValue(BI, L); BI->eraseFromParent(); RemoveFromWorklist(BI, Worklist); // Remove Succ from the loop tree. LI->removeBlock(Succ); LPM->deleteSimpleAnalysisValue(Succ, L); Succ->eraseFromParent(); ++NumSimplify; continue; } continue; } } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilLoopUnroll.cpp
//===- DxilLoopUnroll.cpp - Special Unroll for Constant Values ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // // Special loop unroll routine for creating mandatory constant values and // loops that have exits. // // Overview of algorithm: // // 1. Identify a set of blocks to unroll. // // LLVM's concept of loop excludes exit blocks, which are blocks that no // longer have a path to the loop latch. However, some exit blocks in HLSL // also need to be unrolled. For example: // // [unroll] // for (uint i = 0; i < 4; i++) // { // if (...) // { // // This block here is an exit block, since it's. // // guaranteed to exit the loop. // ... // a[i] = ...; // Indexing requires unroll. // return; // } // } // // // 2. Create LCSSA based on the new loop boundary. // // See LCSSA.cpp for more details. It creates trivial PHI nodes for any // outgoing values of the loop at the exit blocks, so when the loop body // gets cloned, the outgoing values can be added to those PHI nodes easily. // // We are using a modified LCSSA routine here because we are including some // of the original exit blocks in the unroll. // // // 3. Unroll the loop until we succeed. // // Unlike LLVM, we do not try to find a loop count before unrolling. // Instead, we unroll to find a constant terminal condition. Give up when we // fail to do so. // // //===----------------------------------------------------------------------===// #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/IR/PredIteratorCache.h" #include "llvm/IR/Verifier.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include "llvm/Transforms/Utils/UnrollLoop.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/HLModule.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/Analysis/ValueTracking.h" #include "DxilRemoveUnstructuredLoopExits.h" using namespace llvm; using namespace hlsl; namespace { struct ClonedIteration { SmallVector<BasicBlock *, 16> Body; BasicBlock *Latch = nullptr; BasicBlock *Header = nullptr; ValueToValueMapTy VarMap; SetVector<BasicBlock *> Extended; // Blocks that are included in the clone // that are not in the core loop body. ClonedIteration() {} }; class DxilLoopUnroll : public LoopPass { public: static char ID; std::set<Loop *> LoopsThatFailed; unsigned MaxIterationAttempt = 0; bool OnlyWarnOnFail = false; bool StructurizeLoopExits = false; DxilLoopUnroll(unsigned MaxIterationAttempt = 1024, bool OnlyWarnOnFail = false, bool StructurizeLoopExits = false) : LoopPass(ID), MaxIterationAttempt(MaxIterationAttempt), OnlyWarnOnFail(OnlyWarnOnFail), StructurizeLoopExits(StructurizeLoopExits) { initializeDxilLoopUnrollPass(*PassRegistry::getPassRegistry()); } StringRef getPassName() const override { return "Dxil Loop Unroll"; } bool runOnLoop(Loop *L, LPPassManager &LPM) override; bool doFinalization() override; bool IsLoopSafeToClone(Loop *L); void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addRequired<DxilValueCache>(); AU.addRequiredID(&LCSSAID); AU.addRequiredID(LoopSimplifyID); } // Function overrides that resolve options when used for DxOpt void applyOptions(PassOptions O) override { GetPassOptionUnsigned(O, "MaxIterationAttempt", &MaxIterationAttempt, false); GetPassOptionBool(O, "OnlyWarnOnFail", &OnlyWarnOnFail, false); GetPassOptionBool(O, "StructurizeLoopExits", &StructurizeLoopExits, false); } void dumpConfig(raw_ostream &OS) override { LoopPass::dumpConfig(OS); OS << ",MaxIterationAttempt=" << MaxIterationAttempt; OS << ",OnlyWarnOnFail=" << OnlyWarnOnFail; OS << ",StructurizeLoopExits=" << StructurizeLoopExits; } void RecursivelyRemoveLoopOnSuccess(LPPassManager &LPM, Loop *L); void RecursivelyRecreateSubLoopForIteration(LPPassManager &LPM, LoopInfo *LI, Loop *OuterL, Loop *L, ClonedIteration &Iter, unsigned Depth = 0); }; // Copied over from LoopUnroll.cpp - RemapInstruction() static inline void DxilLoopUnrollRemapInstruction(Instruction *I, ValueToValueMapTy &VMap) { for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) { Value *Op = I->getOperand(op); ValueToValueMapTy::iterator It = VMap.find(Op); if (It != VMap.end()) I->setOperand(op, It->second); } if (PHINode *PN = dyn_cast<PHINode>(I)) { for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i)); if (It != VMap.end()) PN->setIncomingBlock(i, cast<BasicBlock>(It->second)); } } } char DxilLoopUnroll::ID; static void FailLoopUnroll(bool WarnOnly, Function *F, DebugLoc DL, const Twine &Message) { LLVMContext &Ctx = F->getContext(); DiagnosticSeverity severity = DiagnosticSeverity::DS_Error; if (WarnOnly) severity = DiagnosticSeverity::DS_Warning; Ctx.diagnose(DiagnosticInfoDxil(F, DL.get(), Message, severity)); } static bool GetConstantI1(Value *V, bool *Val = nullptr) { if (ConstantInt *C = dyn_cast<ConstantInt>(V)) { if (V->getType()->isIntegerTy(1)) { if (Val) *Val = (bool)C->getLimitedValue(); return true; } } return false; } static bool IsMarkedFullUnroll(Loop *L) { if (MDNode *LoopID = L->getLoopID()) return GetUnrollMetadata(LoopID, "llvm.loop.unroll.full"); return false; } static bool IsMarkedUnrollCount(Loop *L, int *OutCount) { if (MDNode *LoopID = L->getLoopID()) { if (MDNode *MD = GetUnrollMetadata(LoopID, "llvm.loop.unroll.count")) { assert(MD->getNumOperands() == 2 && "Unroll count hint metadata should have two operands."); ConstantInt *Val = mdconst::extract<ConstantInt>(MD->getOperand(1)); int Count = Val->getZExtValue(); *OutCount = Count; return true; } } return false; } static bool HasSuccessorsInLoop(BasicBlock *BB, Loop *L) { for (BasicBlock *Succ : successors(BB)) { if (L->contains(Succ)) { return true; } } return false; } static void DetachFromSuccessors(BasicBlock *BB) { SmallVector<BasicBlock *, 16> Successors(succ_begin(BB), succ_end(BB)); for (BasicBlock *Succ : Successors) { Succ->removePredecessor(BB); } } /// Return true if the specified block is in the list. static bool isExitBlock(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &ExitBlocks) { for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) if (ExitBlocks[i] == BB) return true; return false; } // Copied and modified from LCSSA.cpp static bool processInstruction(SetVector<BasicBlock *> &Body, Loop &L, Instruction &Inst, DominatorTree &DT, // HLSL Change const SmallVectorImpl<BasicBlock *> &ExitBlocks, PredIteratorCache &PredCache, LoopInfo *LI) { SmallVector<Use *, 16> UsesToRewrite; BasicBlock *InstBB = Inst.getParent(); for (Use &U : Inst.uses()) { Instruction *User = cast<Instruction>(U.getUser()); BasicBlock *UserBB = User->getParent(); if (PHINode *PN = dyn_cast<PHINode>(User)) UserBB = PN->getIncomingBlock(U); if (InstBB != UserBB && /*!L.contains(UserBB)*/ !Body.count(UserBB)) // HLSL Change UsesToRewrite.push_back(&U); } // If there are no uses outside the loop, exit with no change. if (UsesToRewrite.empty()) return false; #if 0 // HLSL Change ++NumLCSSA; // We are applying the transformation #endif // HLSL Change // Invoke instructions are special in that their result value is not available // along their unwind edge. The code below tests to see whether DomBB // dominates // the value, so adjust DomBB to the normal destination block, which is // effectively where the value is first usable. BasicBlock *DomBB = Inst.getParent(); if (InvokeInst *Inv = dyn_cast<InvokeInst>(&Inst)) DomBB = Inv->getNormalDest(); DomTreeNode *DomNode = DT.getNode(DomBB); SmallVector<PHINode *, 16> AddedPHIs; SmallVector<PHINode *, 8> PostProcessPHIs; SSAUpdater SSAUpdate; SSAUpdate.Initialize(Inst.getType(), Inst.getName()); // Insert the LCSSA phi's into all of the exit blocks dominated by the // value, and add them to the Phi's map. for (SmallVectorImpl<BasicBlock *>::const_iterator BBI = ExitBlocks.begin(), BBE = ExitBlocks.end(); BBI != BBE; ++BBI) { BasicBlock *ExitBB = *BBI; if (!DT.dominates(DomNode, DT.getNode(ExitBB))) continue; // If we already inserted something for this BB, don't reprocess it. if (SSAUpdate.HasValueForBlock(ExitBB)) continue; PHINode *PN = PHINode::Create(Inst.getType(), PredCache.size(ExitBB), Inst.getName() + ".lcssa", ExitBB->begin()); // Add inputs from inside the loop for this PHI. for (BasicBlock *Pred : PredCache.get(ExitBB)) { PN->addIncoming(&Inst, Pred); // If the exit block has a predecessor not within the loop, arrange for // the incoming value use corresponding to that predecessor to be // rewritten in terms of a different LCSSA PHI. if (/*!L.contains(Pred)*/ !Body.count(Pred)) // HLSL Change UsesToRewrite.push_back(&PN->getOperandUse( PN->getOperandNumForIncomingValue(PN->getNumIncomingValues() - 1))); } AddedPHIs.push_back(PN); // Remember that this phi makes the value alive in this block. SSAUpdate.AddAvailableValue(ExitBB, PN); // LoopSimplify might fail to simplify some loops (e.g. when indirect // branches are involved). In such situations, it might happen that an exit // for Loop L1 is the header of a disjoint Loop L2. Thus, when we create // PHIs in such an exit block, we are also inserting PHIs into L2's header. // This could break LCSSA form for L2 because these inserted PHIs can also // have uses outside of L2. Remember all PHIs in such situation as to // revisit than later on. FIXME: Remove this if indirectbr support into // LoopSimplify gets improved. if (auto *OtherLoop = LI->getLoopFor(ExitBB)) if (!L.contains(OtherLoop)) PostProcessPHIs.push_back(PN); } // Rewrite all uses outside the loop in terms of the new PHIs we just // inserted. for (unsigned i = 0, e = UsesToRewrite.size(); i != e; ++i) { // If this use is in an exit block, rewrite to use the newly inserted PHI. // This is required for correctness because SSAUpdate doesn't handle uses in // the same block. It assumes the PHI we inserted is at the end of the // block. Instruction *User = cast<Instruction>(UsesToRewrite[i]->getUser()); BasicBlock *UserBB = User->getParent(); if (PHINode *PN = dyn_cast<PHINode>(User)) UserBB = PN->getIncomingBlock(*UsesToRewrite[i]); if (isa<PHINode>(UserBB->begin()) && isExitBlock(UserBB, ExitBlocks)) { // Tell the VHs that the uses changed. This updates SCEV's caches. if (UsesToRewrite[i]->get()->hasValueHandle()) ValueHandleBase::ValueIsRAUWd(*UsesToRewrite[i], UserBB->begin()); UsesToRewrite[i]->set(UserBB->begin()); continue; } // Otherwise, do full PHI insertion. SSAUpdate.RewriteUse(*UsesToRewrite[i]); } // Post process PHI instructions that were inserted into another disjoint loop // and update their exits properly. for (auto *I : PostProcessPHIs) { if (I->use_empty()) continue; BasicBlock *PHIBB = I->getParent(); Loop *OtherLoop = LI->getLoopFor(PHIBB); SmallVector<BasicBlock *, 8> EBs; OtherLoop->getExitBlocks(EBs); if (EBs.empty()) continue; // Recurse and re-process each PHI instruction. FIXME: we should really // convert this entire thing to a worklist approach where we process a // vector of instructions... SetVector<BasicBlock *> OtherLoopBody( OtherLoop->block_begin(), OtherLoop->block_end()); // HLSL Change processInstruction(OtherLoopBody, *OtherLoop, *I, DT, EBs, PredCache, LI); } // Remove PHI nodes that did not have any uses rewritten. for (unsigned i = 0, e = AddedPHIs.size(); i != e; ++i) { if (AddedPHIs[i]->use_empty()) AddedPHIs[i]->eraseFromParent(); } return true; } // Copied from LCSSA.cpp static bool blockDominatesAnExit(BasicBlock *BB, DominatorTree &DT, const SmallVectorImpl<BasicBlock *> &ExitBlocks) { DomTreeNode *DomNode = DT.getNode(BB); for (BasicBlock *Exit : ExitBlocks) if (DT.dominates(DomNode, DT.getNode(Exit))) return true; return false; } // Copied from LCSSA.cpp // // We need to recreate the LCSSA form since our loop boundary is potentially // different from the canonical one. static bool CreateLCSSA(SetVector<BasicBlock *> &Body, const SmallVectorImpl<BasicBlock *> &ExitBlocks, Loop *L, DominatorTree &DT, LoopInfo *LI) { PredIteratorCache PredCache; bool Changed = false; // Look at all the instructions in the loop, checking to see if they have uses // outside the loop. If so, rewrite those uses. for (SetVector<BasicBlock *>::iterator BBI = Body.begin(), BBE = Body.end(); BBI != BBE; ++BBI) { BasicBlock *BB = *BBI; // For large loops, avoid use-scanning by using dominance information: In // particular, if a block does not dominate any of the loop exits, then none // of the values defined in the block could be used outside the loop. if (!blockDominatesAnExit(BB, DT, ExitBlocks)) continue; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { // Reject two common cases fast: instructions with no uses (like stores) // and instructions with one use that is in the same block as this. if (I->use_empty() || (I->hasOneUse() && I->user_back()->getParent() == BB && !isa<PHINode>(I->user_back()))) continue; Changed |= processInstruction(Body, *L, *I, DT, ExitBlocks, PredCache, LI); } } return Changed; } static Value *GetGEPPtrOrigin(GEPOperator *GEP) { Value *Ptr = GEP->getPointerOperand(); while (Ptr) { if (AllocaInst *AI = dyn_cast<AllocaInst>(Ptr)) { return AI; } else if (GEPOperator *NewGEP = dyn_cast<GEPOperator>(Ptr)) { Ptr = NewGEP->getPointerOperand(); } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { return GV; } else { break; } } return nullptr; } // Find all blocks in the loop with instructions that // would require an unroll to be correct. // // For example: // for (int i = 0; i < 10; i++) { // gep i // } // static void FindProblemBlocks(BasicBlock *Header, const SmallVectorImpl<BasicBlock *> &BlocksInLoop, std::unordered_set<BasicBlock *> &ProblemBlocks, SetVector<AllocaInst *> &ProblemAllocas) { SmallVector<Instruction *, 16> WorkList; std::unordered_set<BasicBlock *> BlocksInLoopSet(BlocksInLoop.begin(), BlocksInLoop.end()); std::unordered_set<Instruction *> InstructionsSeen; for (Instruction &I : *Header) { PHINode *PN = dyn_cast<PHINode>(&I); if (!PN) break; WorkList.push_back(PN); InstructionsSeen.insert(PN); } while (WorkList.size()) { Instruction *I = WorkList.pop_back_val(); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { Type *EltType = GEP->getType()->getPointerElementType(); // NOTE: This is a very convservative in the following conditions: // - constant global resource arrays with external linkage (these can be // dynamically accessed) // - global resource arrays or alloca resource arrays, as long as all // writes come from the same original resource definition (which can // also be an array). // // We may want to make this more precise in the future if it becomes a // problem. // if (hlsl::dxilutil::IsHLSLObjectType(EltType)) { if (Value *Ptr = GetGEPPtrOrigin(cast<GEPOperator>(GEP))) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { if (!GV->isExternalLinkage(llvm::GlobalValue::ExternalLinkage)) ProblemBlocks.insert(GEP->getParent()); } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Ptr)) { ProblemAllocas.insert(AI); ProblemBlocks.insert(GEP->getParent()); } } continue; // Stop Propagating } } for (User *U : I->users()) { if (Instruction *UserI = dyn_cast<Instruction>(U)) { if (!InstructionsSeen.count(UserI) && BlocksInLoopSet.count(UserI->getParent())) { InstructionsSeen.insert(UserI); WorkList.push_back(UserI); } } } } } // Helper function for getting GEP's const index value inline static int64_t GetGEPIndex(GEPOperator *GEP, unsigned idx) { return cast<ConstantInt>(GEP->getOperand(idx + 1))->getSExtValue(); } // Replace allocas with all constant indices with scalar allocas, then promote // them to values where possible (mem2reg). // // Before loop unroll, we did not have constant indices for arrays and SROA was // unable to break them into scalars. Now that unroll has potentially given // them constant values, we need to turn them into scalars. // // if "AllowOOBIndex" is true, it turns any out of bound index into 0. // Otherwise it emits an error and fails compilation. // template <typename IteratorT> static bool BreakUpArrayAllocas(bool AllowOOBIndex, IteratorT ItBegin, IteratorT ItEnd, DominatorTree *DT, AssumptionCache *AC, DxilValueCache *DVC) { bool Success = true; SmallVector<AllocaInst *, 8> WorkList(ItBegin, ItEnd); SmallVector<GEPOperator *, 16> GEPs; while (WorkList.size()) { AllocaInst *AI = WorkList.pop_back_val(); Type *AllocaType = AI->getAllocatedType(); // Only deal with array allocas. if (!AllocaType->isArrayTy()) continue; unsigned ArraySize = AI->getAllocatedType()->getArrayNumElements(); Type *ElementType = AllocaType->getArrayElementType(); if (!ArraySize) continue; GEPs.clear(); // Re-use array for (User *U : AI->users()) { if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { // Try to set all GEP operands to constant if (!GEP->hasAllConstantIndices() && isa<GetElementPtrInst>(GEP)) { for (unsigned i = 0; i < GEP->getNumIndices(); i++) { Value *IndexOp = GEP->getOperand(i + 1); if (isa<Constant>(IndexOp)) continue; if (Constant *C = DVC->GetConstValue(IndexOp)) GEP->setOperand(i + 1, C); } } if (!GEP->hasAllConstantIndices() || GEP->getNumIndices() < 2 || GetGEPIndex(GEP, 0) != 0) { GEPs.clear(); break; } else { GEPs.push_back(GEP); } continue; } // Ignore uses that are only used by lifetime intrinsics. if (isa<BitCastInst>(U) && onlyUsedByLifetimeMarkers(U)) continue; // We've found something that prevents us from safely replacing this // alloca. GEPs.clear(); break; } if (!GEPs.size()) continue; SmallVector<AllocaInst *, 8> ScalarAllocas; ScalarAllocas.resize(ArraySize); IRBuilder<> B(AI); for (GEPOperator *GEP : GEPs) { int64_t idx = GetGEPIndex(GEP, 1); GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(GEP); if (idx < 0 || idx >= ArraySize) { if (AllowOOBIndex) idx = 0; else { Success = false; if (GEPInst) hlsl::dxilutil::EmitErrorOnInstruction( GEPInst, "Array access out of bound."); continue; } } AllocaInst *ScalarAlloca = ScalarAllocas[idx]; if (!ScalarAlloca) { ScalarAlloca = B.CreateAlloca(ElementType); ScalarAllocas[idx] = ScalarAlloca; if (ElementType->isArrayTy()) { WorkList.push_back(ScalarAlloca); } } Value *NewPointer = nullptr; if (ElementType->isArrayTy()) { SmallVector<Value *, 2> Indices; Indices.push_back(B.getInt32(0)); for (unsigned i = 2; i < GEP->getNumIndices(); i++) { Indices.push_back(GEP->getOperand(i + 1)); } NewPointer = B.CreateGEP(ScalarAlloca, Indices); } else { NewPointer = ScalarAlloca; } // TODO: Inherit lifetimes start/end locations from AI if available. GEP->replaceAllUsesWith(NewPointer); } if (!ElementType->isArrayTy()) { ScalarAllocas.erase( std::remove(ScalarAllocas.begin(), ScalarAllocas.end(), nullptr), ScalarAllocas.end()); PromoteMemToReg(ScalarAllocas, *DT, nullptr, AC); } } return Success; } void DxilLoopUnroll::RecursivelyRemoveLoopOnSuccess(LPPassManager &LPM, Loop *L) { // Copy the sub loops into a separate list because // the original list may change. SmallVector<Loop *, 4> SubLoops(L->getSubLoops().begin(), L->getSubLoops().end()); // Must remove all child loops first. for (Loop *SubL : SubLoops) { RecursivelyRemoveLoopOnSuccess(LPM, SubL); } // Remove any loops/subloops that failed because we are about to // delete them. This will not prevent them from being retried because // they would have been recreated for each cloned iteration. LoopsThatFailed.erase(L); // Loop is done and about to be deleted, remove it from queue. LPM.deleteLoopFromQueue(L); } // Mostly copied from Loop::isSafeToClone, but making exception // for dx.op.barrier. // bool DxilLoopUnroll::IsLoopSafeToClone(Loop *L) { // Return false if any loop blocks contain indirectbrs, or there are any calls // to noduplicate functions. for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E; ++I) { if (isa<IndirectBrInst>((*I)->getTerminator())) return false; if (const InvokeInst *II = dyn_cast<InvokeInst>((*I)->getTerminator())) if (II->cannotDuplicate()) return false; for (BasicBlock::iterator BI = (*I)->begin(), BE = (*I)->end(); BI != BE; ++BI) { if (const CallInst *CI = dyn_cast<CallInst>(BI)) { if (CI->cannotDuplicate() && !hlsl::OP::IsDxilOpFuncCallInst(CI, hlsl::OP::OpCode::Barrier)) { return false; } } } } return true; } void DxilLoopUnroll::RecursivelyRecreateSubLoopForIteration( LPPassManager &LPM, LoopInfo *LI, Loop *OuterL, Loop *L, ClonedIteration &Iter, unsigned Depth) { Loop *NewL = new Loop(); // Insert it to queue in a depth first way, otherwise `insertLoopIntoQueue` // inserts adds parent first. LPM.insertLoopIntoQueue(NewL); if (OuterL) { OuterL->addChildLoop(NewL); } else { LI->addTopLevelLoop(NewL); } // First add all the blocks. It's important that we first add them here first // (Instead of letting the recursive call do the job), since it's important // that the loop header is added FIRST. for (auto it = L->block_begin(), end = L->block_end(); it != end; it++) { BasicBlock *OriginalBB = *it; BasicBlock *NewBB = cast<BasicBlock>(Iter.VarMap[OriginalBB]); // Manually call addBlockEntry instead of addBasicBlockToLoop because // addBasicBlockToLoop also checks and sets the BB -> Loop mapping. NewL->addBlockEntry(NewBB); LI->changeLoopFor(NewBB, NewL); // Now check if the block has been added to outer loops already. This is // only necessary for the first depth of this call. if (Depth == 0) { Loop *OuterL_it = OuterL; while (OuterL_it) { OuterL_it->addBlockEntry(NewBB); OuterL_it = OuterL_it->getParentLoop(); } } } // Construct any sub-loops that exist. The BB -> Loop mapping in LI will be // rewritten to the sub-loop as needed. for (Loop *SubL : L->getSubLoops()) { RecursivelyRecreateSubLoopForIteration(LPM, LI, NewL, SubL, Iter, Depth + 1); } } static void RemapDebugInsts(BasicBlock *ClonedBB, ValueToValueMapTy &VarMap, SetVector<BasicBlock *> &ClonedFrom) { LLVMContext &Ctx = ClonedBB->getContext(); for (Instruction &I : *ClonedBB) { DbgValueInst *DV = dyn_cast<DbgValueInst>(&I); if (!DV) continue; Instruction *ValI = dyn_cast_or_null<Instruction>(DV->getValue()); if (!ValI) continue; // If this instruction is in the original cloned set, remap the debug insts if (ClonedFrom.count(ValI->getParent())) { auto it = VarMap.find(ValI); if (it != VarMap.end()) { DV->setArgOperand( 0, MetadataAsValue::get(Ctx, ValueAsMetadata::get(it->second))); } } } } bool DxilLoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) { DebugLoc LoopLoc = L->getStartLoc(); // Debug location for the start of the loop. Function *F = L->getHeader()->getParent(); ScalarEvolution *SE = &getAnalysis<ScalarEvolution>(); DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); bool HasExplicitLoopCount = false; int ExplicitUnrollCountSigned = 0; // If the loop is not marked as [unroll], don't do anything. if (IsMarkedUnrollCount(L, &ExplicitUnrollCountSigned)) { HasExplicitLoopCount = true; } else if (!IsMarkedFullUnroll(L)) { return false; } unsigned ExplicitUnrollCount = 0; if (HasExplicitLoopCount) { if (ExplicitUnrollCountSigned < 1) { FailLoopUnroll(false, F, LoopLoc, "Could not unroll loop. Invalid unroll count."); return false; } ExplicitUnrollCount = (unsigned)ExplicitUnrollCountSigned; } if (!IsLoopSafeToClone(L)) return false; unsigned TripCount = 0; BasicBlock *ExitingBlock = L->getLoopLatch(); if (!ExitingBlock || !L->isLoopExiting(ExitingBlock)) ExitingBlock = L->getExitingBlock(); if (ExitingBlock) { TripCount = SE->getSmallConstantTripCount(L, ExitingBlock); } // Analysis passes DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); AssumptionCache *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F); LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); const bool HasDebugInfo = llvm::hasDebugInfo(*F->getParent()); Loop *OuterL = L->getParentLoop(); BasicBlock *Latch = L->getLoopLatch(); BasicBlock *Header = L->getHeader(); BasicBlock *Predecessor = L->getLoopPredecessor(); const DataLayout &DL = F->getParent()->getDataLayout(); // Quit if we don't have a single latch block or predecessor if (!Latch || !Predecessor) { return false; } // If the loop exit condition is not in the latch, then the loop is not // rotated. Give up. if (!cast<BranchInst>(Latch->getTerminator())->isConditional()) { return false; } SmallVector<BasicBlock *, 16> ExitBlocks; L->getExitBlocks(ExitBlocks); std::unordered_set<BasicBlock *> ExitBlockSet(ExitBlocks.begin(), ExitBlocks.end()); SmallVector<BasicBlock *, 16> BlocksInLoop; // Set of blocks including both body and exits BlocksInLoop.append(L->getBlocks().begin(), L->getBlocks().end()); BlocksInLoop.append(ExitBlocks.begin(), ExitBlocks.end()); // Heuristically find blocks that likely need to be unrolled SetVector<AllocaInst *> ProblemAllocas; std::unordered_set<BasicBlock *> ProblemBlocks; FindProblemBlocks(L->getHeader(), BlocksInLoop, ProblemBlocks, ProblemAllocas); if (StructurizeLoopExits && hlsl::RemoveUnstructuredLoopExits( L, LI, DT, /* exclude */ &ProblemBlocks)) { // Recompute the loop if we managed to simplify the exit blocks Latch = L->getLoopLatch(); ExitBlocks.clear(); L->getExitBlocks(ExitBlocks); ExitBlockSet = std::unordered_set<BasicBlock *>(ExitBlocks.begin(), ExitBlocks.end()); BlocksInLoop.clear(); BlocksInLoop.append(L->getBlocks().begin(), L->getBlocks().end()); BlocksInLoop.append(ExitBlocks.begin(), ExitBlocks.end()); } // Keep track of the PHI nodes at the header. SmallVector<PHINode *, 16> PHIs; for (auto it = Header->begin(); it != Header->end(); it++) { if (PHINode *PN = dyn_cast<PHINode>(it)) { PHIs.push_back(PN); } else { break; } } // Quick simplification of PHINode incoming values for (PHINode *PN : PHIs) { for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { Value *OldIncomingV = PN->getIncomingValue(i); if (Instruction *IncomingI = dyn_cast<Instruction>(OldIncomingV)) { if (Value *NewIncomingV = llvm::SimplifyInstruction(IncomingI, DL)) { PN->setIncomingValue(i, NewIncomingV); } } } } SetVector<BasicBlock *> ToBeCloned; // List of blocks that will be cloned. for (BasicBlock *BB : L->getBlocks()) // Include the body right away ToBeCloned.insert(BB); // Find the exit blocks that also need to be included // in the unroll. SmallVector<BasicBlock *, 8> NewExits; // New set of exit blocks as boundaries for LCSSA SmallVector<BasicBlock *, 8> FakeExits; // Set of blocks created to allow cloning original exit blocks. for (BasicBlock *BB : ExitBlocks) { bool CloneThisExitBlock = ProblemBlocks.count(BB); if (CloneThisExitBlock) { ToBeCloned.insert(BB); // If we are cloning this basic block, we must create a new exit // block for inserting LCSSA PHI nodes. BasicBlock *FakeExit = BasicBlock::Create(BB->getContext(), "loop.exit.new"); F->getBasicBlockList().insert(BB, FakeExit); TerminatorInst *OldTerm = BB->getTerminator(); OldTerm->removeFromParent(); FakeExit->getInstList().push_back(OldTerm); BranchInst::Create(FakeExit, BB); for (BasicBlock *Succ : successors(FakeExit)) { for (Instruction &I : *Succ) { if (PHINode *PN = dyn_cast<PHINode>(&I)) { for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { if (PN->getIncomingBlock(i) == BB) PN->setIncomingBlock(i, FakeExit); } } } } NewExits.push_back(FakeExit); FakeExits.push_back(FakeExit); // Update Dom tree with new exit if (!DT->getNode(FakeExit)) DT->addNewBlock(FakeExit, BB); } else { // If we are not including this exit block in the unroll, // use it for LCSSA as normal. NewExits.push_back(BB); } } // Simplify the PHI nodes that have single incoming value. The original LCSSA // form (if exists) does not necessarily work for our unroll because we may be // unrolling from a different boundary. for (BasicBlock *BB : BlocksInLoop) hlsl::dxilutil::SimplifyTrivialPHIs(BB); // Re-establish LCSSA form to get ready for unrolling. CreateLCSSA(ToBeCloned, NewExits, L, *DT, LI); SmallVector<std::unique_ptr<ClonedIteration>, 16> Iterations; // List of cloned iterations bool Succeeded = false; unsigned MaxAttempt = this->MaxIterationAttempt; // If we were able to figure out the definitive trip count, // just unroll that many times. if (TripCount != 0) { MaxAttempt = TripCount; } else if (HasExplicitLoopCount) { MaxAttempt = ExplicitUnrollCount; } for (unsigned IterationI = 0; IterationI < MaxAttempt; IterationI++) { ClonedIteration *PrevIteration = nullptr; if (Iterations.size()) PrevIteration = Iterations.back().get(); Iterations.push_back(llvm::make_unique<ClonedIteration>()); ClonedIteration &CurIteration = *Iterations.back().get(); // Clone the blocks. for (BasicBlock *BB : ToBeCloned) { BasicBlock *ClonedBB = llvm::CloneBasicBlock(BB, CurIteration.VarMap); CurIteration.VarMap[BB] = ClonedBB; ClonedBB->insertInto(F, Header); CurIteration.Body.push_back(ClonedBB); if (ExitBlockSet.count(BB)) CurIteration.Extended.insert(ClonedBB); // Identify the special blocks. if (BB == Latch) { CurIteration.Latch = ClonedBB; } if (BB == Header) { CurIteration.Header = ClonedBB; } } // Remap the debug instructions if (HasDebugInfo) { for (BasicBlock *BB : CurIteration.Body) { RemapDebugInsts(BB, CurIteration.VarMap, ToBeCloned); } } for (BasicBlock *BB : ToBeCloned) { BasicBlock *ClonedBB = cast<BasicBlock>(CurIteration.VarMap[BB]); // If branching to outside of the loop, need to update the // phi nodes there to include new values. for (BasicBlock *Succ : successors(ClonedBB)) { if (ToBeCloned.count(Succ)) continue; for (Instruction &I : *Succ) { PHINode *PN = dyn_cast<PHINode>(&I); if (!PN) break; // Find the incoming value for this new block. If there is an entry // for this block in the map, then it was defined in the loop, use it. // Otherwise it came from outside the loop. Value *OldIncoming = PN->getIncomingValueForBlock(BB); Value *NewIncoming = OldIncoming; ValueToValueMapTy::iterator Itor = CurIteration.VarMap.find(OldIncoming); if (Itor != CurIteration.VarMap.end()) NewIncoming = Itor->second; PN->addIncoming(NewIncoming, ClonedBB); } } } // Remap the instructions inside of cloned blocks. for (BasicBlock *BB : CurIteration.Body) { for (Instruction &I : *BB) { DxilLoopUnrollRemapInstruction(&I, CurIteration.VarMap); } } // If this is the first block if (!PrevIteration) { // Replace the phi nodes in the clone block with the values coming // from outside of the loop for (PHINode *PN : PHIs) { PHINode *ClonedPN = cast<PHINode>(CurIteration.VarMap[PN]); Value *ReplacementVal = ClonedPN->getIncomingValueForBlock(Predecessor); ClonedPN->replaceAllUsesWith(ReplacementVal); ClonedPN->eraseFromParent(); CurIteration.VarMap[PN] = ReplacementVal; } } else { // Replace the phi nodes with the value defined INSIDE the previous // iteration. for (PHINode *PN : PHIs) { PHINode *ClonedPN = cast<PHINode>(CurIteration.VarMap[PN]); Value *ReplacementVal = PN->getIncomingValueForBlock(Latch); auto itRep = PrevIteration->VarMap.find(ReplacementVal); if (itRep != PrevIteration->VarMap.end()) ReplacementVal = itRep->second; ClonedPN->replaceAllUsesWith(ReplacementVal); ClonedPN->eraseFromParent(); CurIteration.VarMap[PN] = ReplacementVal; } // Make the latch of the previous iteration branch to the header // of this new iteration. if (BranchInst *BI = dyn_cast<BranchInst>(PrevIteration->Latch->getTerminator())) { for (unsigned i = 0; i < BI->getNumSuccessors(); i++) { if (BI->getSuccessor(i) == PrevIteration->Header) { BI->setSuccessor(i, CurIteration.Header); break; } } } } // Check exit condition to see if we fully unrolled the loop if (BranchInst *BI = dyn_cast<BranchInst>(CurIteration.Latch->getTerminator())) { bool Cond = false; Value *ConstantCond = BI->getCondition(); if (Value *C = DVC->GetValue(ConstantCond)) ConstantCond = C; if (GetConstantI1(ConstantCond, &Cond)) { if (BI->getSuccessor(Cond ? 1 : 0) == CurIteration.Header) { Succeeded = true; break; } } } // We've reached the N defined in [unroll(N)] if ((HasExplicitLoopCount && IterationI + 1 >= ExplicitUnrollCount) || (TripCount != 0 && IterationI + 1 >= TripCount)) { Succeeded = true; BranchInst *BI = cast<BranchInst>(CurIteration.Latch->getTerminator()); BasicBlock *ExitBlock = nullptr; for (unsigned i = 0; i < BI->getNumSuccessors(); i++) { BasicBlock *Succ = BI->getSuccessor(i); if (Succ != CurIteration.Header) { ExitBlock = Succ; break; } } BranchInst *NewBI = BranchInst::Create(ExitBlock, BI); BI->replaceAllUsesWith(NewBI); BI->eraseFromParent(); break; } } if (Succeeded) { // Now that we successfully unrolled the loop L, if there were any sub loops // in L, we have to recreate all the sub-loops for each iteration of L that // we cloned. for (std::unique_ptr<ClonedIteration> &IterPtr : Iterations) { for (Loop *SubL : L->getSubLoops()) RecursivelyRecreateSubLoopForIteration(LPM, LI, OuterL, SubL, *IterPtr); } // We are going to be cleaning them up later. Maker sure // they're in entry block so deleting loop blocks don't // kill them too. for (AllocaInst *AI : ProblemAllocas) DXASSERT_LOCALVAR(AI, AI->getParent() == &F->getEntryBlock(), "Alloca is not in entry block."); ClonedIteration &FirstIteration = *Iterations.front().get(); // Make the predecessor branch to the first new header. { BranchInst *BI = cast<BranchInst>(Predecessor->getTerminator()); for (unsigned i = 0, NumSucc = BI->getNumSuccessors(); i < NumSucc; i++) { if (BI->getSuccessor(i) == Header) { BI->setSuccessor(i, FirstIteration.Header); } } } if (OuterL) { // Core body blocks need to be added to outer loop for (size_t i = 0; i < Iterations.size(); i++) { ClonedIteration &Iteration = *Iterations[i].get(); for (BasicBlock *BB : Iteration.Body) { if (!Iteration.Extended.count(BB) && !OuterL->contains(BB)) { OuterL->addBasicBlockToLoop(BB, *LI); } } } // Our newly created exit blocks may need to be added to outer loop for (BasicBlock *BB : FakeExits) { if (HasSuccessorsInLoop(BB, OuterL)) OuterL->addBasicBlockToLoop(BB, *LI); } // Cloned exit blocks may need to be added to outer loop for (size_t i = 0; i < Iterations.size(); i++) { ClonedIteration &Iteration = *Iterations[i].get(); for (BasicBlock *BB : Iteration.Extended) { if (HasSuccessorsInLoop(BB, OuterL)) OuterL->addBasicBlockToLoop(BB, *LI); } } } SE->forgetLoop(L); // Remove the original blocks that we've cloned from all loops. for (BasicBlock *BB : ToBeCloned) LI->removeBlock(BB); // Remove loop and all child loops from queue. RecursivelyRemoveLoopOnSuccess(LPM, L); // Remove dead blocks. for (BasicBlock *BB : ToBeCloned) DetachFromSuccessors(BB); for (BasicBlock *BB : ToBeCloned) BB->dropAllReferences(); for (BasicBlock *BB : ToBeCloned) BB->eraseFromParent(); // Blocks need to be removed from DomTree. There's no easy way // to remove them in the right order, so just make DomTree // recalculate. DT->recalculate(*F); if (OuterL) { // This process may have created multiple back edges for the // parent loop. Simplify to keep it well-formed. simplifyLoop(OuterL, DT, LI, this, nullptr, nullptr, AC); } // Now that we potentially turned some GEP indices into constants, // try to clean up their allocas. if (!BreakUpArrayAllocas(OnlyWarnOnFail /* allow oob index */, ProblemAllocas.begin(), ProblemAllocas.end(), DT, AC, DVC)) { FailLoopUnroll(false, F, LoopLoc, "Could not unroll loop due to out of bound array access."); } DVC->ResetUnknowns(); return true; } // If we were unsuccessful in unrolling the loop else { // Mark loop as failed. LoopsThatFailed.insert(L); // Remove all the cloned blocks for (std::unique_ptr<ClonedIteration> &Ptr : Iterations) { ClonedIteration &Iteration = *Ptr.get(); for (BasicBlock *BB : Iteration.Body) DetachFromSuccessors(BB); } for (std::unique_ptr<ClonedIteration> &Ptr : Iterations) { ClonedIteration &Iteration = *Ptr.get(); for (BasicBlock *BB : Iteration.Body) BB->dropAllReferences(); } for (std::unique_ptr<ClonedIteration> &Ptr : Iterations) { ClonedIteration &Iteration = *Ptr.get(); for (BasicBlock *BB : Iteration.Body) BB->eraseFromParent(); } return false; } } bool DxilLoopUnroll::doFinalization() { const char *Msg = "Could not unroll loop. Loop bound could not be deduced at compile time. " "Use [unroll(n)] to give an explicit count."; if (LoopsThatFailed.size()) { for (Loop *L : LoopsThatFailed) { Function *F = L->getHeader()->getParent(); DebugLoc LoopLoc = L->getStartLoc(); // Debug location for the start of the loop. if (OnlyWarnOnFail) { FailLoopUnroll(true /*warn only*/, F, LoopLoc, Msg); } else { FailLoopUnroll(false /*warn only*/, F, LoopLoc, Twine(Msg) + Twine(" Use '-HV 2016' to treat this as warning.")); } } // This pass instance can be reused. Clear this so it doesn't blow up on the // subsequent runs. LoopsThatFailed.clear(); } return false; } } // namespace Pass *llvm::createDxilLoopUnrollPass(unsigned MaxIterationAttempt, bool OnlyWarnOnFail, bool StructurizeLoopExits) { return new DxilLoopUnroll(MaxIterationAttempt, OnlyWarnOnFail, StructurizeLoopExits); } INITIALIZE_PASS_BEGIN(DxilLoopUnroll, "dxil-loop-unroll", "Dxil Unroll loops", false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(DxilValueCache) INITIALIZE_PASS_END(DxilLoopUnroll, "dxil-loop-unroll", "Dxil Unroll loops", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DCE.cpp
//===- DCE.cpp - Code to perform dead code elimination --------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements dead inst elimination and dead code elimination. // // Dead Inst Elimination performs a single pass over the function removing // instructions that are obviously dead. Dead Code Elimination is similar, but // it rechecks instructions that were used by removed instructions to see if // they are newly dead. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instruction.h" #include "llvm/Pass.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "dce" STATISTIC(DIEEliminated, "Number of insts removed by DIE pass"); STATISTIC(DCEEliminated, "Number of insts removed"); namespace { //===--------------------------------------------------------------------===// // DeadInstElimination pass implementation // struct DeadInstElimination : public BasicBlockPass { static char ID; // Pass identification, replacement for typeid DeadInstElimination() : BasicBlockPass(ID) { initializeDeadInstEliminationPass(*PassRegistry::getPassRegistry()); } bool runOnBasicBlock(BasicBlock &BB) override { if (skipOptnoneFunction(BB)) return false; auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr; bool Changed = false; for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) { Instruction *Inst = DI++; if (isInstructionTriviallyDead(Inst, TLI)) { Inst->eraseFromParent(); Changed = true; ++DIEEliminated; } } return Changed; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); } }; } char DeadInstElimination::ID = 0; INITIALIZE_PASS(DeadInstElimination, "die", "Dead Instruction Elimination", false, false) Pass *llvm::createDeadInstEliminationPass() { return new DeadInstElimination(); } namespace { //===--------------------------------------------------------------------===// // DeadCodeElimination pass implementation // struct DCE : public FunctionPass { static char ID; // Pass identification, replacement for typeid DCE() : FunctionPass(ID) { initializeDCEPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); } }; } char DCE::ID = 0; INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false) bool DCE::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr; // Start out with all of the instructions in the worklist... SmallSetVector<Instruction*, 16> WorkList; for (inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i) WorkList.insert(&*i); // Loop over the worklist finding instructions that are dead. If they are // dead make them drop all of their uses, making other instructions // potentially dead, and work until the worklist is empty. // bool MadeChange = false; while (!WorkList.empty()) { Instruction *I = WorkList.pop_back_val(); if (isInstructionTriviallyDead(I, TLI)) { // If the instruction is dead. // Loop over all of the values that the instruction uses, if there are // instructions being used, add them to the worklist, because they might // go dead after this one is removed. // for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) if (Instruction *Used = dyn_cast<Instruction>(*OI)) WorkList.insert(Used); // Remove the instruction. I->eraseFromParent(); MadeChange = true; ++DCEEliminated; } } return MadeChange; } FunctionPass *llvm::createDeadCodeEliminationPass() { return new DCE(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopRotation.cpp
//===- LoopRotation.cpp - Loop Rotation Pass ------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements Loop Rotation Pass. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CodeMetrics.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include "llvm/Transforms/Utils/ValueMapper.h" using namespace llvm; #define DEBUG_TYPE "loop-rotate" #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned> DefaultRotationThreshold("rotation-max-header-size", cl::init(16), cl::Hidden, cl::desc("The default maximum header size for automatic loop rotation")); #else static const unsigned DefaultRotationThreshold = 16; #endif // HLSL Change Ends STATISTIC(NumRotated, "Number of loops rotated"); namespace { class LoopRotate : public LoopPass { public: static char ID; // Pass ID, replacement for typeid LoopRotate(int SpecifiedMaxHeaderSize = -1) : LoopPass(ID) { initializeLoopRotatePass(*PassRegistry::getPassRegistry()); if (SpecifiedMaxHeaderSize == -1) MaxHeaderSize = DefaultRotationThreshold; else MaxHeaderSize = unsigned(SpecifiedMaxHeaderSize); } // LCSSA form makes instruction renaming easier. void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addRequiredID(LoopSimplifyID); AU.addPreservedID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); AU.addPreserved<ScalarEvolution>(); AU.addRequired<TargetTransformInfoWrapperPass>(); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; bool simplifyLoopLatch(Loop *L); bool rotateLoop(Loop *L, bool SimplifiedLatch); private: unsigned MaxHeaderSize; LoopInfo *LI; const TargetTransformInfo *TTI; AssumptionCache *AC; DominatorTree *DT; }; } char LoopRotate::ID = 0; INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(LoopRotate, "loop-rotate", "Rotate Loops", false, false) Pass *llvm::createLoopRotatePass(int MaxHeaderSize) { return new LoopRotate(MaxHeaderSize); } /// Rotate Loop L as many times as possible. Return true if /// the loop is rotated at least once. bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; // Save the loop metadata. MDNode *LoopMD = L->getLoopID(); Function &F = *L->getHeader()->getParent(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DT = DTWP ? &DTWP->getDomTree() : nullptr; // Simplify the loop latch before attempting to rotate the header // upward. Rotation may not be needed if the loop tail can be folded into the // loop exit. bool SimplifiedLatch = simplifyLoopLatch(L); // One loop can be rotated multiple times. bool MadeChange = false; while (rotateLoop(L, SimplifiedLatch)) { MadeChange = true; SimplifiedLatch = false; } // Restore the loop metadata. // NB! We presume LoopRotation DOESN'T ADD its own metadata. if ((MadeChange || SimplifiedLatch) && LoopMD) L->setLoopID(LoopMD); return MadeChange; } /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the /// old header into the preheader. If there were uses of the values produced by /// these instruction that were outside of the loop, we have to insert PHI nodes /// to merge the two values. Do this now. static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, BasicBlock *OrigPreheader, ValueToValueMapTy &ValueMap) { // Remove PHI node entries that are no longer live. BasicBlock::iterator I, E = OrigHeader->end(); for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); // Now fix up users of the instructions in OrigHeader, inserting PHI nodes // as necessary. SSAUpdater SSA; for (I = OrigHeader->begin(); I != E; ++I) { Value *OrigHeaderVal = I; // If there are no uses of the value (e.g. because it returns void), there // is nothing to rewrite. if (OrigHeaderVal->use_empty()) continue; Value *OrigPreHeaderVal = ValueMap[OrigHeaderVal]; // The value now exits in two versions: the initial value in the preheader // and the loop "next" value in the original header. SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); // Visit each use of the OrigHeader instruction. for (Value::use_iterator UI = OrigHeaderVal->use_begin(), UE = OrigHeaderVal->use_end(); UI != UE; ) { // Grab the use before incrementing the iterator. Use &U = *UI; // Increment the iterator before removing the use from the list. ++UI; // SSAUpdater can't handle a non-PHI use in the same block as an // earlier def. We can easily handle those cases manually. Instruction *UserInst = cast<Instruction>(U.getUser()); if (!isa<PHINode>(UserInst)) { BasicBlock *UserBB = UserInst->getParent(); // The original users in the OrigHeader are already using the // original definitions. if (UserBB == OrigHeader) continue; // Users in the OrigPreHeader need to use the value to which the // original definitions are mapped. if (UserBB == OrigPreheader) { U = OrigPreHeaderVal; continue; } } // Anything else can be handled by SSAUpdater. SSA.RewriteUse(U); } } } /// Determine whether the instructions in this range may be safely and cheaply /// speculated. This is not an important enough situation to develop complex /// heuristics. We handle a single arithmetic instruction along with any type /// conversions. static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, BasicBlock::iterator End, Loop *L) { bool seenIncrement = false; bool MultiExitLoop = false; if (!L->getExitingBlock()) MultiExitLoop = true; for (BasicBlock::iterator I = Begin; I != End; ++I) { if (!isSafeToSpeculativelyExecute(I)) return false; if (isa<DbgInfoIntrinsic>(I)) continue; switch (I->getOpcode()) { default: return false; case Instruction::GetElementPtr: // GEPs are cheap if all indices are constant. if (!cast<GEPOperator>(I)->hasAllConstantIndices()) return false; // fall-thru to increment case LLVM_FALLTHROUGH; // HLSL Change case Instruction::Add: case Instruction::Sub: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: { Value *IVOpnd = !isa<Constant>(I->getOperand(0)) ? I->getOperand(0) : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; if (!IVOpnd) return false; // If increment operand is used outside of the loop, this speculation // could cause extra live range interference. if (MultiExitLoop) { for (User *UseI : IVOpnd->users()) { auto *UserInst = cast<Instruction>(UseI); if (!L->contains(UserInst)) return false; } } if (seenIncrement) return false; seenIncrement = true; break; } case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: // ignore type conversions break; } } return true; } /// Fold the loop tail into the loop exit by speculating the loop tail /// instructions. Typically, this is a single post-increment. In the case of a /// simple 2-block loop, hoisting the increment can be much better than /// duplicating the entire loop header. In the case of loops with early exits, /// rotation will not work anyway, but simplifyLoopLatch will put the loop in /// canonical form so downstream passes can handle it. /// /// I don't believe this invalidates SCEV. bool LoopRotate::simplifyLoopLatch(Loop *L) { BasicBlock *Latch = L->getLoopLatch(); if (!Latch || Latch->hasAddressTaken()) return false; BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); if (!Jmp || !Jmp->isUnconditional()) return false; BasicBlock *LastExit = Latch->getSinglePredecessor(); if (!LastExit || !L->isLoopExiting(LastExit)) return false; BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); if (!BI) return false; if (!shouldSpeculateInstrs(Latch->begin(), Jmp, L)) return false; DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " << LastExit->getName() << "\n"); // Hoist the instructions from Latch into LastExit. LastExit->getInstList().splice(BI, Latch->getInstList(), Latch->begin(), Jmp); unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1; BasicBlock *Header = Jmp->getSuccessor(0); assert(Header == L->getHeader() && "expected a backward branch"); // Remove Latch from the CFG so that LastExit becomes the new Latch. BI->setSuccessor(FallThruPath, Header); Latch->replaceSuccessorsPhiUsesWith(LastExit); Jmp->eraseFromParent(); // Nuke the Latch block. assert(Latch->empty() && "unable to evacuate Latch"); LI->removeBlock(Latch); if (DT) DT->eraseNode(Latch); Latch->eraseFromParent(); return true; } /// Rotate loop LP. Return true if the loop is rotated. /// /// \param SimplifiedLatch is true if the latch was just folded into the final /// loop exit. In this case we may want to rotate even though the new latch is /// now an exiting branch. This rotation would have happened had the latch not /// been simplified. However, if SimplifiedLatch is false, then we avoid /// rotating loops in which the latch exits to avoid excessive or endless /// rotation. LoopRotate should be repeatable and converge to a canonical /// form. This property is satisfied because simplifying the loop latch can only /// happen once across multiple invocations of the LoopRotate pass. bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // If the loop has only one block then there is not much to rotate. if (L->getBlocks().size() == 1) return false; BasicBlock *OrigHeader = L->getHeader(); BasicBlock *OrigLatch = L->getLoopLatch(); BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); if (!BI || BI->isUnconditional()) return false; // If the loop header is not one of the loop exiting blocks then // either this loop is already rotated or it is not // suitable for loop rotation transformations. if (!L->isLoopExiting(OrigHeader)) return false; // If the loop latch already contains a branch that leaves the loop then the // loop is already rotated. if (!OrigLatch) return false; // Rotate if either the loop latch does *not* exit the loop, or if the loop // latch was just simplified. if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch) return false; // Check size of original header and reject loop if it is very big or we can't // duplicate blocks inside it. { SmallPtrSet<const Value *, 32> EphValues; CodeMetrics::collectEphemeralValues(L, AC, EphValues); CodeMetrics Metrics; Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues); if (Metrics.notDuplicatable) { DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" << " instructions: "; L->dump()); return false; } if (Metrics.NumInsts > MaxHeaderSize) return false; } // Now, this loop is suitable for rotation. BasicBlock *OrigPreheader = L->getLoopPreheader(); // If the loop could not be converted to canonical form, it must have an // indirectbr in it, just give up. if (!OrigPreheader) return false; // Anything ScalarEvolution may know about this loop or the PHI nodes // in its header will soon be invalidated. if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>()) SE->forgetLoop(L); DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); // Find new Loop header. NewHeader is a Header's one and only successor // that is inside loop. Header's other successor is outside the // loop. Otherwise loop is not suitable for rotation. BasicBlock *Exit = BI->getSuccessor(0); BasicBlock *NewHeader = BI->getSuccessor(1); if (L->contains(Exit)) std::swap(Exit, NewHeader); assert(NewHeader && "Unable to determine new loop header"); assert(L->contains(NewHeader) && !L->contains(Exit) && "Unable to determine loop header and exit blocks"); // This code assumes that the new header has exactly one predecessor. // Remove any single-entry PHI nodes in it. assert(NewHeader->getSinglePredecessor() && "New header doesn't have one pred!"); FoldSingleEntryPHINodes(NewHeader); // Begin by walking OrigHeader and populating ValueMap with an entry for // each Instruction. BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); ValueToValueMapTy ValueMap; // For PHI nodes, the value available in OldPreHeader is just the // incoming value from OldPreHeader. for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) ValueMap[PN] = PN->getIncomingValueForBlock(OrigPreheader); const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); // For the rest of the instructions, either hoist to the OrigPreheader if // possible or create a clone in the OldPreHeader if not. TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator(); while (I != E) { Instruction *Inst = I++; // If the instruction's operands are invariant and it doesn't read or write // memory, then it is safe to hoist. Doing this doesn't change the order of // execution in the preheader, but does prevent the instruction from // executing in each iteration of the loop. This means it is safe to hoist // something that might trap, but isn't safe to hoist something that reads // memory (without proving that the loop doesn't write). if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && !Inst->mayWriteToMemory() && !isa<TerminatorInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) { Inst->moveBefore(LoopEntryBranch); continue; } // Otherwise, create a duplicate of the instruction. Instruction *C = Inst->clone(); // Eagerly remap the operands of the instruction. RemapInstruction(C, ValueMap, RF_NoModuleLevelChanges|RF_IgnoreMissingEntries); // With the operands remapped, see if the instruction constant folds or is // otherwise simplifyable. This commonly occurs because the entry from PHI // nodes allows icmps and other instructions to fold. // FIXME: Provide TLI, DT, AC to SimplifyInstruction. Value *V = SimplifyInstruction(C, DL); if (V && LI->replacementPreservesLCSSAForm(C, V)) { // If so, then delete the temporary instruction and stick the folded value // in the map. delete C; ValueMap[Inst] = V; } else { // Otherwise, stick the new instruction into the new block! C->setName(Inst->getName()); C->insertBefore(LoopEntryBranch); ValueMap[Inst] = C; } } // Along with all the other instructions, we just cloned OrigHeader's // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's // successors by duplicating their incoming values for OrigHeader. TerminatorInst *TI = OrigHeader->getTerminator(); for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) for (BasicBlock::iterator BI = TI->getSuccessor(i)->begin(); PHINode *PN = dyn_cast<PHINode>(BI); ++BI) PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove // OrigPreHeader's old terminator (the original branch into the loop), and // remove the corresponding incoming values from the PHI nodes in OrigHeader. LoopEntryBranch->eraseFromParent(); // If there were any uses of instructions in the duplicated block outside the // loop, update them, inserting PHI nodes as required RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap); // NewHeader is now the header of the loop. L->moveToHeader(NewHeader); assert(L->getHeader() == NewHeader && "Latch block is our new header"); // At this point, we've finished our major CFG changes. As part of cloning // the loop into the preheader we've simplified instructions and the // duplicated conditional branch may now be branching on a constant. If it is // branching on a constant and if that constant means that we enter the loop, // then we fold away the cond branch to an uncond branch. This simplifies the // loop in cases important for nested loops, and it also means we don't have // to split as many edges. BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); assert(PHBI->isConditional() && "Should be clone of BI condbr!"); if (!isa<ConstantInt>(PHBI->getCondition()) || PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) != NewHeader) { // The conditional branch can't be folded, handle the general case. // Update DominatorTree to reflect the CFG change we just made. Then split // edges as necessary to preserve LoopSimplify form. if (DT) { // Everything that was dominated by the old loop header is now dominated // by the original loop preheader. Conceptually the header was merged // into the preheader, even though we reuse the actual block as a new // loop latch. DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader); SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(), OrigHeaderNode->end()); DomTreeNode *OrigPreheaderNode = DT->getNode(OrigPreheader); for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) DT->changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode); assert(DT->getNode(Exit)->getIDom() == OrigPreheaderNode); assert(DT->getNode(NewHeader)->getIDom() == OrigPreheaderNode); // Update OrigHeader to be dominated by the new header block. DT->changeImmediateDominator(OrigHeader, OrigLatch); } // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and // thus is not a preheader anymore. // Split the edge to form a real preheader. BasicBlock *NewPH = SplitCriticalEdge( OrigPreheader, NewHeader, CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA()); NewPH->setName(NewHeader->getName() + ".lr.ph"); // Preserve canonical loop form, which means that 'Exit' should have only // one predecessor. Note that Exit could be an exit block for multiple // nested loops, causing both of the edges to now be critical and need to // be split. SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit)); bool SplitLatchEdge = false; for (SmallVectorImpl<BasicBlock *>::iterator PI = ExitPreds.begin(), PE = ExitPreds.end(); PI != PE; ++PI) { // We only need to split loop exit edges. Loop *PredLoop = LI->getLoopFor(*PI); if (!PredLoop || PredLoop->contains(Exit)) continue; if (isa<IndirectBrInst>((*PI)->getTerminator())) continue; SplitLatchEdge |= L->getLoopLatch() == *PI; BasicBlock *ExitSplit = SplitCriticalEdge( *PI, Exit, CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA()); ExitSplit->moveBefore(Exit); } assert(SplitLatchEdge && "Despite splitting all preds, failed to split latch exit?"); } else { // We can fold the conditional branch in the preheader, this makes things // simpler. The first step is to remove the extra edge to the Exit block. Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); NewBI->setDebugLoc(PHBI->getDebugLoc()); PHBI->eraseFromParent(); // With our CFG finalized, update DomTree if it is available. if (DT) { // Update OrigHeader to be dominated by the new header block. DT->changeImmediateDominator(NewHeader, OrigPreheader); DT->changeImmediateDominator(OrigHeader, OrigLatch); // Brute force incremental dominator tree update. Call // findNearestCommonDominator on all CFG predecessors of each child of the // original header. DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader); SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(), OrigHeaderNode->end()); bool Changed; do { Changed = false; for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) { DomTreeNode *Node = HeaderChildren[I]; BasicBlock *BB = Node->getBlock(); pred_iterator PI = pred_begin(BB); BasicBlock *NearestDom = *PI; for (pred_iterator PE = pred_end(BB); PI != PE; ++PI) NearestDom = DT->findNearestCommonDominator(NearestDom, *PI); // Remember if this changes the DomTree. if (Node->getIDom()->getBlock() != NearestDom) { DT->changeImmediateDominator(BB, NearestDom); Changed = true; } } // If the dominator changed, this may have an effect on other // predecessors, continue until we reach a fixpoint. } while (Changed); } } assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation"); assert(L->getLoopLatch() && "Invalid loop latch after loop rotation"); // Now that the CFG and DomTree are in a consistent state again, try to merge // the OrigHeader block into OrigLatch. This will succeed if they are // connected by an unconditional branch. This is just a cleanup so the // emitted code isn't too gross in this common case. MergeBlockIntoPredecessor(OrigHeader, DT, LI); DEBUG(dbgs() << "LoopRotation: into "; L->dump()); ++NumRotated; return true; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/Float2Int.cpp
//===- Float2Int.cpp - Demote floating point ops to work on integers ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Float2Int pass, which aims to demote floating // point operations to work on integers, where that is losslessly possible. // //===----------------------------------------------------------------------===// #define DEBUG_TYPE "float2int" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/EquivalenceClasses.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include <deque> #include <functional> // For std::function using namespace llvm; // The algorithm is simple. Start at instructions that convert from the // float to the int domain: fptoui, fptosi and fcmp. Walk up the def-use // graph, using an equivalence datastructure to unify graphs that interfere. // // Mappable instructions are those with an integer corrollary that, given // integer domain inputs, produce an integer output; fadd, for example. // // If a non-mappable instruction is seen, this entire def-use graph is marked // as non-transformable. If we see an instruction that converts from the // integer domain to FP domain (uitofp,sitofp), we terminate our walk. #if 0 // HLSL Change Starts - option pending /// The largest integer type worth dealing with. static cl::opt<unsigned> MaxIntegerBW("float2int-max-integer-bw", cl::init(64), cl::Hidden, cl::desc("Max integer bitwidth to consider in float2int" "(default=64)")); #else static const unsigned MaxIntegerBW = 64; #endif // HLSL Change Ends namespace { struct Float2Int : public FunctionPass { static char ID; // Pass identification, replacement for typeid Float2Int() : FunctionPass(ID) { initializeFloat2IntPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); } void findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots); ConstantRange seen(Instruction *I, ConstantRange R); ConstantRange badRange(); ConstantRange unknownRange(); ConstantRange validateRange(ConstantRange R); void walkBackwards(const SmallPtrSetImpl<Instruction*> &Roots); void walkForwards(); bool validateAndTransform(); Value *convert(Instruction *I, Type *ToTy); void cleanup(); MapVector<Instruction*, ConstantRange > SeenInsts; SmallPtrSet<Instruction*,8> Roots; EquivalenceClasses<Instruction*> ECs; MapVector<Instruction*, Value*> ConvertedInsts; LLVMContext *Ctx; }; } char Float2Int::ID = 0; INITIALIZE_PASS(Float2Int, "float2int", "Float to int", false, false) // Given a FCmp predicate, return a matching ICmp predicate if one // exists, otherwise return BAD_ICMP_PREDICATE. static CmpInst::Predicate mapFCmpPred(CmpInst::Predicate P) { switch (P) { case CmpInst::FCMP_OEQ: case CmpInst::FCMP_UEQ: return CmpInst::ICMP_EQ; case CmpInst::FCMP_OGT: case CmpInst::FCMP_UGT: return CmpInst::ICMP_SGT; case CmpInst::FCMP_OGE: case CmpInst::FCMP_UGE: return CmpInst::ICMP_SGE; case CmpInst::FCMP_OLT: case CmpInst::FCMP_ULT: return CmpInst::ICMP_SLT; case CmpInst::FCMP_OLE: case CmpInst::FCMP_ULE: return CmpInst::ICMP_SLE; case CmpInst::FCMP_ONE: case CmpInst::FCMP_UNE: return CmpInst::ICMP_NE; default: return CmpInst::BAD_ICMP_PREDICATE; } } // Given a floating point binary operator, return the matching // integer version. static Instruction::BinaryOps mapBinOpcode(unsigned Opcode) { switch (Opcode) { default: llvm_unreachable("Unhandled opcode!"); case Instruction::FAdd: return Instruction::Add; case Instruction::FSub: return Instruction::Sub; case Instruction::FMul: return Instruction::Mul; } } // Find the roots - instructions that convert from the FP domain to // integer domain. void Float2Int::findRoots(Function &F, SmallPtrSet<Instruction*,8> &Roots) { for (auto &I : inst_range(F)) { switch (I.getOpcode()) { default: break; case Instruction::FPToUI: case Instruction::FPToSI: Roots.insert(&I); break; case Instruction::FCmp: if (mapFCmpPred(cast<CmpInst>(&I)->getPredicate()) != CmpInst::BAD_ICMP_PREDICATE) Roots.insert(&I); break; } } } // Helper - mark I as having been traversed, having range R. ConstantRange Float2Int::seen(Instruction *I, ConstantRange R) { DEBUG(dbgs() << "F2I: " << *I << ":" << R << "\n"); if (SeenInsts.find(I) != SeenInsts.end()) SeenInsts.find(I)->second = R; else SeenInsts.insert(std::make_pair(I, R)); return R; } // Helper - get a range representing a poison value. ConstantRange Float2Int::badRange() { return ConstantRange(MaxIntegerBW + 1, true); } ConstantRange Float2Int::unknownRange() { return ConstantRange(MaxIntegerBW + 1, false); } ConstantRange Float2Int::validateRange(ConstantRange R) { if (R.getBitWidth() > MaxIntegerBW + 1) return badRange(); return R; } // The most obvious way to structure the search is a depth-first, eager // search from each root. However, that require direct recursion and so // can only handle small instruction sequences. Instead, we split the search // up into two phases: // - walkBackwards: A breadth-first walk of the use-def graph starting from // the roots. Populate "SeenInsts" with interesting // instructions and poison values if they're obvious and // cheap to compute. Calculate the equivalance set structure // while we're here too. // - walkForwards: Iterate over SeenInsts in reverse order, so we visit // defs before their uses. Calculate the real range info. // Breadth-first walk of the use-def graph; determine the set of nodes // we care about and eagerly determine if some of them are poisonous. void Float2Int::walkBackwards(const SmallPtrSetImpl<Instruction*> &Roots) { std::deque<Instruction*> Worklist(Roots.begin(), Roots.end()); while (!Worklist.empty()) { Instruction *I = Worklist.back(); Worklist.pop_back(); if (SeenInsts.find(I) != SeenInsts.end()) // Seen already. continue; switch (I->getOpcode()) { // FIXME: Handle select and phi nodes. default: // Path terminated uncleanly. seen(I, badRange()); break; case Instruction::UIToFP: { // Path terminated cleanly. unsigned BW = I->getOperand(0)->getType()->getPrimitiveSizeInBits(); APInt Min = APInt::getMinValue(BW).zextOrSelf(MaxIntegerBW+1); APInt Max = APInt::getMaxValue(BW).zextOrSelf(MaxIntegerBW+1); seen(I, validateRange(ConstantRange(Min, Max))); continue; } case Instruction::SIToFP: { // Path terminated cleanly. unsigned BW = I->getOperand(0)->getType()->getPrimitiveSizeInBits(); APInt SMin = APInt::getSignedMinValue(BW).sextOrSelf(MaxIntegerBW+1); APInt SMax = APInt::getSignedMaxValue(BW).sextOrSelf(MaxIntegerBW+1); seen(I, validateRange(ConstantRange(SMin, SMax))); continue; } case Instruction::FAdd: case Instruction::FSub: case Instruction::FMul: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::FCmp: seen(I, unknownRange()); break; } for (Value *O : I->operands()) { if (Instruction *OI = dyn_cast<Instruction>(O)) { // Unify def-use chains if they interfere. ECs.unionSets(I, OI); if (SeenInsts.find(I)->second != badRange()) Worklist.push_back(OI); } else if (!isa<ConstantFP>(O)) { // Not an instruction or ConstantFP? we can't do anything. seen(I, badRange()); } } } } // Walk forwards down the list of seen instructions, so we visit defs before // uses. void Float2Int::walkForwards() { for (auto It = SeenInsts.rbegin(), E = SeenInsts.rend(); It != E; ++It) { if (It->second != unknownRange()) continue; Instruction *I = It->first; std::function<ConstantRange(ArrayRef<ConstantRange>)> Op; switch (I->getOpcode()) { // FIXME: Handle select and phi nodes. default: case Instruction::UIToFP: case Instruction::SIToFP: llvm_unreachable("Should have been handled in walkForwards!"); case Instruction::FAdd: Op = [](ArrayRef<ConstantRange> Ops) { assert(Ops.size() == 2 && "FAdd is a binary operator!"); return Ops[0].add(Ops[1]); }; break; case Instruction::FSub: Op = [](ArrayRef<ConstantRange> Ops) { assert(Ops.size() == 2 && "FSub is a binary operator!"); return Ops[0].sub(Ops[1]); }; break; case Instruction::FMul: Op = [](ArrayRef<ConstantRange> Ops) { assert(Ops.size() == 2 && "FMul is a binary operator!"); return Ops[0].multiply(Ops[1]); }; break; // // Root-only instructions - we'll only see these if they're the // first node in a walk. // case Instruction::FPToUI: case Instruction::FPToSI: Op = [](ArrayRef<ConstantRange> Ops) { assert(Ops.size() == 1 && "FPTo[US]I is a unary operator!"); return Ops[0]; }; break; case Instruction::FCmp: Op = [](ArrayRef<ConstantRange> Ops) { assert(Ops.size() == 2 && "FCmp is a binary operator!"); return Ops[0].unionWith(Ops[1]); }; break; } bool Abort = false; SmallVector<ConstantRange,4> OpRanges; for (Value *O : I->operands()) { if (Instruction *OI = dyn_cast<Instruction>(O)) { assert(SeenInsts.find(OI) != SeenInsts.end() && "def not seen before use!"); OpRanges.push_back(SeenInsts.find(OI)->second); } else if (ConstantFP *CF = dyn_cast<ConstantFP>(O)) { // Work out if the floating point number can be losslessly represented // as an integer. // APFloat::convertToInteger(&Exact) purports to do what we want, but // the exactness can be too precise. For example, negative zero can // never be exactly converted to an integer. // // Instead, we ask APFloat to round itself to an integral value - this // preserves sign-of-zero - then compare the result with the original. // APFloat F = CF->getValueAPF(); // First, weed out obviously incorrect values. Non-finite numbers // can't be represented and neither can negative zero, unless // we're in fast math mode. if (!F.isFinite() || (F.isZero() && F.isNegative() && isa<FPMathOperator>(I) && !I->hasNoSignedZeros())) { seen(I, badRange()); Abort = true; break; } APFloat NewF = F; auto Res = NewF.roundToIntegral(APFloat::rmNearestTiesToEven); if (Res != APFloat::opOK || NewF.compare(F) != APFloat::cmpEqual) { seen(I, badRange()); Abort = true; break; } // OK, it's representable. Now get it. APSInt Int(MaxIntegerBW+1, false); bool Exact; CF->getValueAPF().convertToInteger(Int, APFloat::rmNearestTiesToEven, &Exact); OpRanges.push_back(ConstantRange(Int)); } else { llvm_unreachable("Should have already marked this as badRange!"); } } // Reduce the operands' ranges to a single range and return. if (!Abort) seen(I, Op(OpRanges)); } } // If there is a valid transform to be done, do it. bool Float2Int::validateAndTransform() { bool MadeChange = false; // Iterate over every disjoint partition of the def-use graph. for (auto It = ECs.begin(), E = ECs.end(); It != E; ++It) { ConstantRange R(MaxIntegerBW + 1, false); bool Fail = false; Type *ConvertedToTy = nullptr; // For every member of the partition, union all the ranges together. for (auto MI = ECs.member_begin(It), ME = ECs.member_end(); MI != ME; ++MI) { Instruction *I = *MI; auto SeenI = SeenInsts.find(I); if (SeenI == SeenInsts.end()) continue; R = R.unionWith(SeenI->second); // We need to ensure I has no users that have not been seen. // If it does, transformation would be illegal. // // Don't count the roots, as they terminate the graphs. if (Roots.count(I) == 0) { // Set the type of the conversion while we're here. if (!ConvertedToTy) ConvertedToTy = I->getType(); for (User *U : I->users()) { Instruction *UI = dyn_cast<Instruction>(U); if (!UI || SeenInsts.find(UI) == SeenInsts.end()) { DEBUG(dbgs() << "F2I: Failing because of " << *U << "\n"); Fail = true; break; } } } if (Fail) break; } // If the set was empty, or we failed, or the range is poisonous, // bail out. if (ECs.member_begin(It) == ECs.member_end() || Fail || R.isFullSet() || R.isSignWrappedSet()) continue; assert(ConvertedToTy && "Must have set the convertedtoty by this point!"); // The number of bits required is the maximum of the upper and // lower limits, plus one so it can be signed. unsigned MinBW = std::max(R.getLower().getMinSignedBits(), R.getUpper().getMinSignedBits()) + 1; DEBUG(dbgs() << "F2I: MinBitwidth=" << MinBW << ", R: " << R << "\n"); // If we've run off the realms of the exactly representable integers, // the floating point result will differ from an integer approximation. // Do we need more bits than are in the mantissa of the type we converted // to? semanticsPrecision returns the number of mantissa bits plus one // for the sign bit. unsigned MaxRepresentableBits = APFloat::semanticsPrecision(ConvertedToTy->getFltSemantics()) - 1; if (MinBW > MaxRepresentableBits) { DEBUG(dbgs() << "F2I: Value not guaranteed to be representable!\n"); continue; } if (MinBW > 64) { DEBUG(dbgs() << "F2I: Value requires more than 64 bits to represent!\n"); continue; } // OK, R is known to be representable. Now pick a type for it. // FIXME: Pick the smallest legal type that will fit. Type *Ty = (MinBW > 32) ? Type::getInt64Ty(*Ctx) : Type::getInt32Ty(*Ctx); for (auto MI = ECs.member_begin(It), ME = ECs.member_end(); MI != ME; ++MI) convert(*MI, Ty); MadeChange = true; } return MadeChange; } Value *Float2Int::convert(Instruction *I, Type *ToTy) { if (ConvertedInsts.find(I) != ConvertedInsts.end()) // Already converted this instruction. return ConvertedInsts[I]; SmallVector<Value*,4> NewOperands; for (Value *V : I->operands()) { // Don't recurse if we're an instruction that terminates the path. if (I->getOpcode() == Instruction::UIToFP || I->getOpcode() == Instruction::SIToFP) { NewOperands.push_back(V); } else if (Instruction *VI = dyn_cast<Instruction>(V)) { NewOperands.push_back(convert(VI, ToTy)); } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) { APSInt Val(ToTy->getPrimitiveSizeInBits(), /*IsUnsigned=*/false); bool Exact; CF->getValueAPF().convertToInteger(Val, APFloat::rmNearestTiesToEven, &Exact); NewOperands.push_back(ConstantInt::get(ToTy, Val)); } else { llvm_unreachable("Unhandled operand type?"); } } // Now create a new instruction. IRBuilder<> IRB(I); Value *NewV = nullptr; switch (I->getOpcode()) { default: llvm_unreachable("Unhandled instruction!"); case Instruction::FPToUI: NewV = IRB.CreateZExtOrTrunc(NewOperands[0], I->getType()); break; case Instruction::FPToSI: NewV = IRB.CreateSExtOrTrunc(NewOperands[0], I->getType()); break; case Instruction::FCmp: { CmpInst::Predicate P = mapFCmpPred(cast<CmpInst>(I)->getPredicate()); assert(P != CmpInst::BAD_ICMP_PREDICATE && "Unhandled predicate!"); NewV = IRB.CreateICmp(P, NewOperands[0], NewOperands[1], I->getName()); break; } case Instruction::UIToFP: NewV = IRB.CreateZExtOrTrunc(NewOperands[0], ToTy); break; case Instruction::SIToFP: NewV = IRB.CreateSExtOrTrunc(NewOperands[0], ToTy); break; case Instruction::FAdd: case Instruction::FSub: case Instruction::FMul: NewV = IRB.CreateBinOp(mapBinOpcode(I->getOpcode()), NewOperands[0], NewOperands[1], I->getName()); break; } // If we're a root instruction, RAUW. if (Roots.count(I)) I->replaceAllUsesWith(NewV); ConvertedInsts[I] = NewV; return NewV; } // Perform dead code elimination on the instructions we just modified. void Float2Int::cleanup() { for (auto I = ConvertedInsts.rbegin(), E = ConvertedInsts.rend(); I != E; ++I) I->first->eraseFromParent(); } bool Float2Int::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; DEBUG(dbgs() << "F2I: Looking at function " << F.getName() << "\n"); // Clear out all state. ECs = EquivalenceClasses<Instruction*>(); SeenInsts.clear(); ConvertedInsts.clear(); Roots.clear(); Ctx = &F.getParent()->getContext(); findRoots(F, Roots); walkBackwards(Roots); walkForwards(); bool Modified = validateAndTransform(); if (Modified) cleanup(); return Modified; } FunctionPass *llvm::createFloat2IntPass() { return new Float2Int(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopDeletion.cpp
//===- LoopDeletion.cpp - Dead Loop Deletion Pass ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Dead Loop Deletion Pass. This pass is responsible // for eliminating loops with non-infinite computable trip counts that have no // side effects or volatile instructions, and do not contribute to the // computation of the function's return value. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/IR/Dominators.h" using namespace llvm; #define DEBUG_TYPE "loop-delete" STATISTIC(NumDeleted, "Number of loops deleted"); namespace { class LoopDeletion : public LoopPass { public: static char ID; // Pass ID, replacement for typeid LoopDeletion() : LoopPass(ID) { initializeLoopDeletionPass(*PassRegistry::getPassRegistry()); } // Possibly eliminate loop L if it is dead. bool runOnLoop(Loop *L, LPPassManager &LPM) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreserved<ScalarEvolution>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addPreservedID(LoopSimplifyID); AU.addPreservedID(LCSSAID); } private: bool isLoopDead(Loop *L, SmallVectorImpl<BasicBlock *> &exitingBlocks, SmallVectorImpl<BasicBlock *> &exitBlocks, bool &Changed, BasicBlock *Preheader); }; } char LoopDeletion::ID = 0; INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion", "Delete dead loops", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(LoopDeletion, "loop-deletion", "Delete dead loops", false, false) Pass *llvm::createLoopDeletionPass() { return new LoopDeletion(); } /// isLoopDead - Determined if a loop is dead. This assumes that we've already /// checked for unique exit and exiting blocks, and that the code is in LCSSA /// form. bool LoopDeletion::isLoopDead(Loop *L, SmallVectorImpl<BasicBlock *> &exitingBlocks, SmallVectorImpl<BasicBlock *> &exitBlocks, bool &Changed, BasicBlock *Preheader) { BasicBlock *exitBlock = exitBlocks[0]; // Make sure that all PHI entries coming from the loop are loop invariant. // Because the code is in LCSSA form, any values used outside of the loop // must pass through a PHI in the exit block, meaning that this check is // sufficient to guarantee that no loop-variant values are used outside // of the loop. BasicBlock::iterator BI = exitBlock->begin(); while (PHINode *P = dyn_cast<PHINode>(BI)) { Value *incoming = P->getIncomingValueForBlock(exitingBlocks[0]); // Make sure all exiting blocks produce the same incoming value for the exit // block. If there are different incoming values for different exiting // blocks, then it is impossible to statically determine which value should // be used. for (unsigned i = 1, e = exitingBlocks.size(); i < e; ++i) { if (incoming != P->getIncomingValueForBlock(exitingBlocks[i])) return false; } if (Instruction *I = dyn_cast<Instruction>(incoming)) if (!L->makeLoopInvariant(I, Changed, Preheader->getTerminator())) return false; ++BI; } // Make sure that no instructions in the block have potential side-effects. // This includes instructions that could write to memory, and loads that are // marked volatile. This could be made more aggressive by using aliasing // information to identify readonly and readnone calls. for (Loop::block_iterator LI = L->block_begin(), LE = L->block_end(); LI != LE; ++LI) { for (BasicBlock::iterator BI = (*LI)->begin(), BE = (*LI)->end(); BI != BE; ++BI) { if (BI->mayHaveSideEffects()) return false; } } return true; } /// runOnLoop - Remove dead loops, by which we mean loops that do not impact the /// observable behavior of the program other than finite running time. Note /// we do ensure that this never remove a loop that might be infinite, as doing /// so could change the halting/non-halting nature of a program. /// NOTE: This entire process relies pretty heavily on LoopSimplify and LCSSA /// in order to make various safety checks work. bool LoopDeletion::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; // We can only remove the loop if there is a preheader that we can // branch from after removing it. BasicBlock *preheader = L->getLoopPreheader(); if (!preheader) return false; // If LoopSimplify form is not available, stay out of trouble. if (!L->hasDedicatedExits()) return false; // We can't remove loops that contain subloops. If the subloops were dead, // they would already have been removed in earlier executions of this pass. if (L->begin() != L->end()) return false; SmallVector<BasicBlock*, 4> exitingBlocks; L->getExitingBlocks(exitingBlocks); SmallVector<BasicBlock*, 4> exitBlocks; L->getUniqueExitBlocks(exitBlocks); // We require that the loop only have a single exit block. Otherwise, we'd // be in the situation of needing to be able to solve statically which exit // block will be branched to, or trying to preserve the branching logic in // a loop invariant manner. if (exitBlocks.size() != 1) return false; // Finally, we have to check that the loop really is dead. bool Changed = false; if (!isLoopDead(L, exitingBlocks, exitBlocks, Changed, preheader)) return Changed; // Don't remove loops for which we can't solve the trip count. // They could be infinite, in which case we'd be changing program behavior. ScalarEvolution &SE = getAnalysis<ScalarEvolution>(); // HLSL Change begin - remove loops even cannot solve the trip count. //const SCEV *S = SE.getMaxBackedgeTakenCount(L); ////if (isa<SCEVCouldNotCompute>(S)) // return Changed; // HLSL Change end. // Now that we know the removal is safe, remove the loop by changing the // branch from the preheader to go to the single exit block. BasicBlock *exitBlock = exitBlocks[0]; // Because we're deleting a large chunk of code at once, the sequence in which // we remove things is very important to avoid invalidation issues. Don't // mess with this unless you have good reason and know what you're doing. // Tell ScalarEvolution that the loop is deleted. Do this before // deleting the loop so that ScalarEvolution can look at the loop // to determine what it needs to clean up. SE.forgetLoop(L); // Connect the preheader directly to the exit block. TerminatorInst *TI = preheader->getTerminator(); TI->replaceUsesOfWith(L->getHeader(), exitBlock); // Rewrite phis in the exit block to get their inputs from // the preheader instead of the exiting block. BasicBlock::iterator BI = exitBlock->begin(); while (PHINode *P = dyn_cast<PHINode>(BI)) { // HLSL Change begin - apply https://reviews.llvm.org/D34516 // Set the zero'th element of Phi to be from the preheader and remove all // other incoming values. Given the loop has dedicated exits, all other // incoming values must be from the exiting blocks. int PredIndex = 0; P->setIncomingBlock(PredIndex, preheader); // Removes all incoming values from all other exiting blocks (including // duplicate values from an exiting block). // Nuke all entries except the zero'th entry which is the preheader entry. // NOTE! We need to remove Incoming Values in the reverse order as done // below, to keep the indices valid for deletion (removeIncomingValues // updates getNumIncomingValues and shifts all values down into the operand // being deleted). for (unsigned i = 0, e = P->getNumIncomingValues() - 1; i != e; ++i) P->removeIncomingValue(e - i, false); assert((P->getNumIncomingValues() == 1 && P->getIncomingBlock(PredIndex) == preheader) && "Should have exactly one value and that's from the preheader!"); ++BI; // HLSL Change end } // Update the dominator tree and remove the instructions and blocks that will // be deleted from the reference counting scheme. DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); SmallVector<DomTreeNode*, 8> ChildNodes; for (Loop::block_iterator LI = L->block_begin(), LE = L->block_end(); LI != LE; ++LI) { // Move all of the block's children to be children of the preheader, which // allows us to remove the domtree entry for the block. ChildNodes.insert(ChildNodes.begin(), DT[*LI]->begin(), DT[*LI]->end()); for (SmallVectorImpl<DomTreeNode *>::iterator DI = ChildNodes.begin(), DE = ChildNodes.end(); DI != DE; ++DI) { DT.changeImmediateDominator(*DI, DT[preheader]); } ChildNodes.clear(); DT.eraseNode(*LI); // Remove the block from the reference counting scheme, so that we can // delete it freely later. (*LI)->dropAllReferences(); } // Erase the instructions and the blocks without having to worry // about ordering because we already dropped the references. // NOTE: This iteration is safe because erasing the block does not remove its // entry from the loop's block list. We do that in the next section. for (Loop::block_iterator LI = L->block_begin(), LE = L->block_end(); LI != LE; ++LI) (*LI)->eraseFromParent(); // Finally, the blocks from loopinfo. This has to happen late because // otherwise our loop iterators won't work. LoopInfo &loopInfo = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); SmallPtrSet<BasicBlock*, 8> blocks; blocks.insert(L->block_begin(), L->block_end()); for (BasicBlock *BB : blocks) loopInfo.removeBlock(BB); // The last step is to inform the loop pass manager that we've // eliminated this loop. LPM.deleteLoopFromQueue(L); Changed = true; ++NumDeleted; return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/BDCE.cpp
//===---- BDCE.cpp - Bit-tracking dead code elimination -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Bit-Tracking Dead Code Elimination pass. Some // instructions (shifts, some ands, ors, etc.) kill some of their input bits. // We track these dead bits and remove instructions that compute only these // dead bits. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "bdce" STATISTIC(NumRemoved, "Number of instructions removed (unused)"); STATISTIC(NumSimplified, "Number of instructions trivialized (dead bits)"); namespace { struct BDCE : public FunctionPass { static char ID; // Pass identification, replacement for typeid BDCE() : FunctionPass(ID) { initializeBDCEPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function& F) override; void getAnalysisUsage(AnalysisUsage& AU) const override { AU.setPreservesCFG(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); } void determineLiveOperandBits(const Instruction *UserI, const Instruction *I, unsigned OperandNo, const APInt &AOut, APInt &AB, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, APInt &KnownOne2); AssumptionCache *AC; DominatorTree *DT; }; } char BDCE::ID = 0; INITIALIZE_PASS_BEGIN(BDCE, "bdce", "Bit-Tracking Dead Code Elimination", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(BDCE, "bdce", "Bit-Tracking Dead Code Elimination", false, false) static bool isAlwaysLive(Instruction *I) { return isa<TerminatorInst>(I) || isa<DbgInfoIntrinsic>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects(); } void BDCE::determineLiveOperandBits(const Instruction *UserI, const Instruction *I, unsigned OperandNo, const APInt &AOut, APInt &AB, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, APInt &KnownOne2) { unsigned BitWidth = AB.getBitWidth(); // We're called once per operand, but for some instructions, we need to // compute known bits of both operands in order to determine the live bits of // either (when both operands are instructions themselves). We don't, // however, want to do this twice, so we cache the result in APInts that live // in the caller. For the two-relevant-operands case, both operand values are // provided here. auto ComputeKnownBits = [&](unsigned BitWidth, const Value *V1, const Value *V2) { const DataLayout &DL = I->getModule()->getDataLayout(); KnownZero = APInt(BitWidth, 0); KnownOne = APInt(BitWidth, 0); computeKnownBits(const_cast<Value *>(V1), KnownZero, KnownOne, DL, 0, AC, UserI, DT); if (V2) { KnownZero2 = APInt(BitWidth, 0); KnownOne2 = APInt(BitWidth, 0); computeKnownBits(const_cast<Value *>(V2), KnownZero2, KnownOne2, DL, 0, AC, UserI, DT); } }; switch (UserI->getOpcode()) { default: break; case Instruction::Call: case Instruction::Invoke: if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(UserI)) switch (II->getIntrinsicID()) { default: break; case Intrinsic::bswap: // The alive bits of the input are the swapped alive bits of // the output. AB = AOut.byteSwap(); break; case Intrinsic::ctlz: if (OperandNo == 0) { // We need some output bits, so we need all bits of the // input to the left of, and including, the leftmost bit // known to be one. ComputeKnownBits(BitWidth, I, nullptr); AB = APInt::getHighBitsSet(BitWidth, std::min(BitWidth, KnownOne.countLeadingZeros()+1)); } break; case Intrinsic::cttz: if (OperandNo == 0) { // We need some output bits, so we need all bits of the // input to the right of, and including, the rightmost bit // known to be one. ComputeKnownBits(BitWidth, I, nullptr); AB = APInt::getLowBitsSet(BitWidth, std::min(BitWidth, KnownOne.countTrailingZeros()+1)); } break; } break; case Instruction::Add: case Instruction::Sub: // Find the highest live output bit. We don't need any more input // bits than that (adds, and thus subtracts, ripple only to the // left). AB = APInt::getLowBitsSet(BitWidth, AOut.getActiveBits()); break; case Instruction::Shl: if (OperandNo == 0) if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.lshr(ShiftAmt); // If the shift is nuw/nsw, then the high bits are not dead // (because we've promised that they *must* be zero). const ShlOperator *S = cast<ShlOperator>(UserI); if (S->hasNoSignedWrap()) AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt+1); else if (S->hasNoUnsignedWrap()) AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt); } break; case Instruction::LShr: if (OperandNo == 0) if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.shl(ShiftAmt); // If the shift is exact, then the low bits are not dead // (they must be zero). if (cast<LShrOperator>(UserI)->isExact()) AB |= APInt::getLowBitsSet(BitWidth, ShiftAmt); } break; case Instruction::AShr: if (OperandNo == 0) if (ConstantInt *CI = dyn_cast<ConstantInt>(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.shl(ShiftAmt); // Because the high input bit is replicated into the // high-order bits of the result, if we need any of those // bits, then we must keep the highest input bit. if ((AOut & APInt::getHighBitsSet(BitWidth, ShiftAmt)) .getBoolValue()) AB.setBit(BitWidth-1); // If the shift is exact, then the low bits are not dead // (they must be zero). if (cast<AShrOperator>(UserI)->isExact()) AB |= APInt::getLowBitsSet(BitWidth, ShiftAmt); } break; case Instruction::And: AB = AOut; // For bits that are known zero, the corresponding bits in the // other operand are dead (unless they're both zero, in which // case they can't both be dead, so just mark the LHS bits as // dead). if (OperandNo == 0) { ComputeKnownBits(BitWidth, I, UserI->getOperand(1)); AB &= ~KnownZero2; } else { if (!isa<Instruction>(UserI->getOperand(0))) ComputeKnownBits(BitWidth, UserI->getOperand(0), I); AB &= ~(KnownZero & ~KnownZero2); } break; case Instruction::Or: AB = AOut; // For bits that are known one, the corresponding bits in the // other operand are dead (unless they're both one, in which // case they can't both be dead, so just mark the LHS bits as // dead). if (OperandNo == 0) { ComputeKnownBits(BitWidth, I, UserI->getOperand(1)); AB &= ~KnownOne2; } else { if (!isa<Instruction>(UserI->getOperand(0))) ComputeKnownBits(BitWidth, UserI->getOperand(0), I); AB &= ~(KnownOne & ~KnownOne2); } break; case Instruction::Xor: case Instruction::PHI: AB = AOut; break; case Instruction::Trunc: AB = AOut.zext(BitWidth); break; case Instruction::ZExt: AB = AOut.trunc(BitWidth); break; case Instruction::SExt: AB = AOut.trunc(BitWidth); // Because the high input bit is replicated into the // high-order bits of the result, if we need any of those // bits, then we must keep the highest input bit. if ((AOut & APInt::getHighBitsSet(AOut.getBitWidth(), AOut.getBitWidth() - BitWidth)) .getBoolValue()) AB.setBit(BitWidth-1); break; case Instruction::Select: if (OperandNo != 0) AB = AOut; break; } } bool BDCE::runOnFunction(Function& F) { if (skipOptnoneFunction(F)) return false; AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); DenseMap<Instruction *, APInt> AliveBits; SmallVector<Instruction*, 128> Worklist; // The set of visited instructions (non-integer-typed only). SmallPtrSet<Instruction*, 128> Visited; // Collect the set of "root" instructions that are known live. for (Instruction &I : inst_range(F)) { if (!isAlwaysLive(&I)) continue; DEBUG(dbgs() << "BDCE: Root: " << I << "\n"); // For integer-valued instructions, set up an initial empty set of alive // bits and add the instruction to the work list. For other instructions // add their operands to the work list (for integer values operands, mark // all bits as live). if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) { if (!AliveBits.count(&I)) { AliveBits[&I] = APInt(IT->getBitWidth(), 0); Worklist.push_back(&I); } continue; } // Non-integer-typed instructions... for (Use &OI : I.operands()) { if (Instruction *J = dyn_cast<Instruction>(OI)) { if (IntegerType *IT = dyn_cast<IntegerType>(J->getType())) AliveBits[J] = APInt::getAllOnesValue(IT->getBitWidth()); Worklist.push_back(J); } } // To save memory, we don't add I to the Visited set here. Instead, we // check isAlwaysLive on every instruction when searching for dead // instructions later (we need to check isAlwaysLive for the // integer-typed instructions anyway). } // Propagate liveness backwards to operands. while (!Worklist.empty()) { Instruction *UserI = Worklist.pop_back_val(); DEBUG(dbgs() << "BDCE: Visiting: " << *UserI); APInt AOut; if (UserI->getType()->isIntegerTy()) { AOut = AliveBits[UserI]; DEBUG(dbgs() << " Alive Out: " << AOut); } DEBUG(dbgs() << "\n"); if (!UserI->getType()->isIntegerTy()) Visited.insert(UserI); APInt KnownZero, KnownOne, KnownZero2, KnownOne2; // Compute the set of alive bits for each operand. These are anded into the // existing set, if any, and if that changes the set of alive bits, the // operand is added to the work-list. for (Use &OI : UserI->operands()) { if (Instruction *I = dyn_cast<Instruction>(OI)) { if (IntegerType *IT = dyn_cast<IntegerType>(I->getType())) { unsigned BitWidth = IT->getBitWidth(); APInt AB = APInt::getAllOnesValue(BitWidth); if (UserI->getType()->isIntegerTy() && !AOut && !isAlwaysLive(UserI)) { AB = APInt(BitWidth, 0); } else { // If all bits of the output are dead, then all bits of the input // Bits of each operand that are used to compute alive bits of the // output are alive, all others are dead. determineLiveOperandBits(UserI, I, OI.getOperandNo(), AOut, AB, KnownZero, KnownOne, KnownZero2, KnownOne2); } // If we've added to the set of alive bits (or the operand has not // been previously visited), then re-queue the operand to be visited // again. APInt ABPrev(BitWidth, 0); auto ABI = AliveBits.find(I); if (ABI != AliveBits.end()) ABPrev = ABI->second; APInt ABNew = AB | ABPrev; if (ABNew != ABPrev || ABI == AliveBits.end()) { AliveBits[I] = std::move(ABNew); Worklist.push_back(I); } } else if (!Visited.count(I)) { Worklist.push_back(I); } } } } bool Changed = false; // The inverse of the live set is the dead set. These are those instructions // which have no side effects and do not influence the control flow or return // value of the function, and may therefore be deleted safely. // NOTE: We reuse the Worklist vector here for memory efficiency. for (Instruction &I : inst_range(F)) { // For live instructions that have all dead bits, first make them dead by // replacing all uses with something else. Then, if they don't need to // remain live (because they have side effects, etc.) we can remove them. if (I.getType()->isIntegerTy()) { auto ABI = AliveBits.find(&I); if (ABI != AliveBits.end()) { if (ABI->second.getBoolValue()) continue; DEBUG(dbgs() << "BDCE: Trivializing: " << I << " (all bits dead)\n"); // FIXME: In theory we could substitute undef here instead of zero. // This should be reconsidered once we settle on the semantics of // undef, poison, etc. Value *Zero = ConstantInt::get(I.getType(), 0); ++NumSimplified; I.replaceAllUsesWith(Zero); Changed = true; } } else if (Visited.count(&I)) { continue; } if (isAlwaysLive(&I)) continue; Worklist.push_back(&I); I.dropAllReferences(); Changed = true; } for (Instruction *&I : Worklist) { ++NumRemoved; I->eraseFromParent(); } return Changed; } FunctionPass *llvm::createBitTrackingDCEPass() { return new BDCE(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilRemoveDeadBlocks.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilRemoveDeadBlocks.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Pass to use value tracker to remove dead blocks. // // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "dxc/DXIL/DxilMetadataHelper.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/DxilNoops.h" #include <unordered_set> using namespace llvm; using namespace hlsl; // Removes BB from PHI nodes in SuccBB, deleting the PHI nodes if empty. static void RemoveIncomingValueFrom(BasicBlock *SuccBB, BasicBlock *BB) { for (auto inst_it = SuccBB->begin(); inst_it != SuccBB->end();) { Instruction *I = &*(inst_it++); if (PHINode *PN = dyn_cast<PHINode>(I)) PN->removeIncomingValue(BB, true); else break; } } struct DeadBlockDeleter { bool Run(Function &F, DxilValueCache *DVC); private: std::unordered_set<BasicBlock *> Seen; std::vector<BasicBlock *> WorkList; void Add(BasicBlock *BB) { if (!Seen.count(BB)) { WorkList.push_back(BB); Seen.insert(BB); } } }; bool DeadBlockDeleter::Run(Function &F, DxilValueCache *DVC) { Seen.clear(); WorkList.clear(); bool Changed = false; Add(&F.getEntryBlock()); // Go through blocks while (WorkList.size()) { BasicBlock *BB = WorkList.back(); WorkList.pop_back(); if (BranchInst *Br = dyn_cast<BranchInst>(BB->getTerminator())) { if (Br->isUnconditional()) { BasicBlock *Succ = Br->getSuccessor(0); Add(Succ); } else { if (ConstantInt *C = DVC->GetConstInt(Br->getCondition())) { bool IsTrue = C->getLimitedValue() != 0; BasicBlock *Succ = Br->getSuccessor(IsTrue ? 0 : 1); BasicBlock *NotSucc = Br->getSuccessor(!IsTrue ? 0 : 1); Add(Succ); // Rewrite conditional branch as unconditional branch if // we don't have structural information that needs it to // be alive. if (!Br->getMetadata(hlsl::DXIL::kDxBreakMDName)) { BranchInst *NewBr = BranchInst::Create(Succ, BB); hlsl::DxilMDHelper::CopyMetadata(*NewBr, *Br); RemoveIncomingValueFrom(NotSucc, BB); Br->eraseFromParent(); Br = nullptr; Changed = true; } } else { Add(Br->getSuccessor(0)); Add(Br->getSuccessor(1)); } } } else if (SwitchInst *Switch = dyn_cast<SwitchInst>(BB->getTerminator())) { Value *Cond = Switch->getCondition(); BasicBlock *Succ = nullptr; // If the condition to Switch is constant, replace Switch with a branch // to the current case successor. if (ConstantInt *ConstCond = DVC->GetConstInt(Cond)) { Succ = hlsl::dxilutil::GetSwitchSuccessorForCond(Switch, ConstCond); } if (Succ) { Add(Succ); // Create branch from BB to Succ that will replace Switch. // This adds BB to preds of Succ. BranchInst *NewBr = BranchInst::Create(Succ, BB); hlsl::DxilMDHelper::CopyMetadata(*NewBr, *Switch); // For any successors we're not going to, remove incoming block BB from // PHI nodes in those successors. unsigned numSucc = 0; for (unsigned i = 0; i < Switch->getNumSuccessors(); i++) { BasicBlock *NotSucc = Switch->getSuccessor(i); if (NotSucc != Succ) RemoveIncomingValueFrom(NotSucc, BB); else ++numSucc; } // We're replacing Switch with a single unconditional branch. If Switch // has N cases with the same Succ, we need to remove N-1 incoming values // of BB from the PHI nodes in Succ. This ensures that the preds of Succ // match the ones in its PHIs. for (unsigned i = 1; i < numSucc; i++) { RemoveIncomingValueFrom(Succ, BB); } // Finally, erase Switch, which will remove BB as pred from all // successors. Switch->eraseFromParent(); Switch = nullptr; Changed = true; } else { for (unsigned i = 0; i < Switch->getNumSuccessors(); i++) { Add(Switch->getSuccessor(i)); } } } } if (Seen.size() == F.size()) return Changed; std::vector<BasicBlock *> DeadBlocks; // Reconnect edges and everything for (auto it = F.begin(); it != F.end();) { BasicBlock *BB = &*(it++); if (Seen.count(BB)) continue; DeadBlocks.push_back(BB); // Make predecessors branch somewhere else and fix the phi nodes for (auto pred_it = pred_begin(BB); pred_it != pred_end(BB);) { BasicBlock *PredBB = *(pred_it++); if (!Seen.count(PredBB)) // Don't bother fixing it if it's gonna get // deleted anyway continue; TerminatorInst *TI = PredBB->getTerminator(); if (!TI) continue; BranchInst *Br = dyn_cast<BranchInst>(TI); if (!Br || Br->isUnconditional()) continue; BasicBlock *Other = Br->getSuccessor(0) == BB ? Br->getSuccessor(1) : Br->getSuccessor(0); BranchInst *NewBr = BranchInst::Create(Other, Br); hlsl::DxilMDHelper::CopyMetadata(*NewBr, *Br); Br->eraseFromParent(); } // Fix phi nodes in successors for (auto succ_it = succ_begin(BB); succ_it != succ_end(BB); succ_it++) { BasicBlock *SuccBB = *succ_it; if (!Seen.count(SuccBB)) continue; // Don't bother fixing it if it's gonna get deleted anyway RemoveIncomingValueFrom(SuccBB, BB); } // Erase all instructions in block while (BB->size()) { Instruction *I = &BB->back(); if (!I->getType()->isVoidTy()) I->replaceAllUsesWith(UndefValue::get(I->getType())); I->eraseFromParent(); } } for (BasicBlock *BB : DeadBlocks) { BB->eraseFromParent(); } DVC->ResetUnknowns(); return true; } static bool DeleteDeadBlocks(Function &F, DxilValueCache *DVC) { DeadBlockDeleter Deleter; bool Changed = false; constexpr unsigned MaxIteration = 10; for (unsigned i = 0; i < MaxIteration; i++) { bool LocalChanged = Deleter.Run(F, DVC); Changed |= LocalChanged; if (!LocalChanged) break; } return Changed; } static bool IsDxBreak(Instruction *I) { CallInst *CI = dyn_cast<CallInst>(I); if (!CI) return false; Function *CalledFunction = CI->getCalledFunction(); return CalledFunction && CalledFunction->getName() == hlsl::DXIL::kDxBreakFuncName; } static bool IsIsHelperLane(Instruction *I) { return hlsl::OP::IsDxilOpFuncCallInst(I, hlsl::DXIL::OpCode::IsHelperLane); } static bool ShouldNotReplaceValue(Value *V) { Instruction *I = dyn_cast<Instruction>(V); return I && (IsDxBreak(I) || IsIsHelperLane(I)); } namespace { struct ValueDeleter { std::unordered_set<Value *> Seen; std::vector<Value *> WorkList; void Add(Value *V) { if (!Seen.count(V)) { Seen.insert(V); WorkList.push_back(V); } } bool Run(Function &F, DxilValueCache *DVC) { for (BasicBlock &BB : F) { for (Instruction &I : BB) { if (I.mayHaveSideEffects()) Add(&I); else if (I.isTerminator()) Add(&I); } } while (WorkList.size()) { Value *V = WorkList.back(); WorkList.pop_back(); Instruction *I = dyn_cast<Instruction>(V); if (I) { for (unsigned i = 0; i < I->getNumOperands(); i++) { Value *op = I->getOperand(i); if (Instruction *OpI = dyn_cast<Instruction>(op)) { // If this operand could be reduced to a constant, stop adding all // its operands. Otherwise, we could unintentionally hold on to // dead instructions like: // // %my_actually_dead_inst = ... // %my_const = fmul 0.0, %my_actually_dead_inst // // %my_actually_dead_inst should be deleted with the rest of the // non-contributing instructions. if (Constant *C = DVC->GetConstValue(OpI)) I->setOperand(i, C); else Add(OpI); } } } } // Go through all dbg.value and see if we can replace their value with // constant. As we go and delete all non-contributing values, we want to // preserve as much debug info as possible. if (llvm::hasDebugInfo(*F.getParent())) { Function *DbgValueF = F.getParent()->getFunction(Intrinsic::getName(Intrinsic::dbg_value)); if (DbgValueF) { LLVMContext &Ctx = F.getContext(); for (User *U : DbgValueF->users()) { DbgValueInst *DbgVal = cast<DbgValueInst>(U); Value *Val = DbgVal->getValue(); if (Val) { if (Constant *C = DVC->GetConstValue(DbgVal->getValue())) { DbgVal->setArgOperand( 0, MetadataAsValue::get(Ctx, ValueAsMetadata::get(C))); } } } } } bool Changed = false; for (auto bb_it = F.getBasicBlockList().rbegin(), bb_end = F.getBasicBlockList().rend(); bb_it != bb_end; bb_it++) { BasicBlock *BB = &*bb_it; for (auto it = BB->begin(), end = BB->end(); it != end;) { Instruction *I = &*(it++); if (isa<DbgInfoIntrinsic>(I)) continue; if (IsDxBreak(I)) continue; if (!Seen.count(I)) { if (!I->user_empty()) I->replaceAllUsesWith(UndefValue::get(I->getType())); I->eraseFromParent(); Changed = true; } } } return Changed; } // Run }; } // namespace // Iteratively and aggressively delete instructions that don't // contribute to the shader's output. // // Find all things that could impact the program's output, including: // - terminator insts (branches, switches, returns) // - anything with side effect // Recursively find all the instructions they reference // Delete all the rest // // Also replace any values that the value cache determined can be // replaced by a constant, with the exception of a few intrinsics that // we expect to see in the output. // static bool DeleteNonContributingValues(Function &F, DxilValueCache *DVC) { ValueDeleter Deleter; DVC->ResetAll(); DVC->SetShouldSkipCallback(ShouldNotReplaceValue); bool Changed = Deleter.Run(F, DVC); DVC->SetShouldSkipCallback(nullptr); return Changed; } static void EnsureDxilModule(Module *M) { if (M->HasDxilModule()) return; for (Function &F : *M) { if (OP::IsDxilOpFunc(&F)) { M->GetOrCreateDxilModule(); break; } } } namespace { struct DxilRemoveDeadBlocks : public FunctionPass { static char ID; DxilRemoveDeadBlocks() : FunctionPass(ID) { initializeDxilRemoveDeadBlocksPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DxilValueCache>(); } bool runOnFunction(Function &F) override { DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); EnsureDxilModule(F.getParent()); // Ensure dxil module is available for DVC bool Changed = false; Changed |= hlsl::dxilutil::DeleteDeadAllocas(F); Changed |= DeleteDeadBlocks(F, DVC); Changed |= DeleteNonContributingValues(F, DVC); return Changed; } }; } // namespace char DxilRemoveDeadBlocks::ID; Pass *llvm::createDxilRemoveDeadBlocksPass() { return new DxilRemoveDeadBlocks(); } INITIALIZE_PASS_BEGIN(DxilRemoveDeadBlocks, "dxil-remove-dead-blocks", "DXIL Remove Dead Blocks", false, false) INITIALIZE_PASS_DEPENDENCY(DxilValueCache) INITIALIZE_PASS_END(DxilRemoveDeadBlocks, "dxil-remove-dead-blocks", "DXIL Remove Dead Blocks", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoadCombine.cpp
//===- LoadCombine.cpp - Combine Adjacent Loads ---------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This transformation combines adjacent loads. /// //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasSetTracker.h" #include "llvm/Analysis/TargetFolder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "load-combine" STATISTIC(NumLoadsAnalyzed, "Number of loads analyzed for combining"); STATISTIC(NumLoadsCombined, "Number of loads combined"); namespace { struct PointerOffsetPair { Value *Pointer; uint64_t Offset; }; struct LoadPOPPair { LoadPOPPair() = default; LoadPOPPair(LoadInst *L, PointerOffsetPair P, unsigned O) : Load(L), POP(P), InsertOrder(O) {} LoadInst *Load; PointerOffsetPair POP; /// \brief The new load needs to be created before the first load in IR order. unsigned InsertOrder; }; class LoadCombine : public BasicBlockPass { LLVMContext *C; AliasAnalysis *AA; public: LoadCombine() : BasicBlockPass(ID), C(nullptr), AA(nullptr) { initializeSROAPass(*PassRegistry::getPassRegistry()); } using llvm::Pass::doInitialization; bool doInitialization(Function &) override; bool runOnBasicBlock(BasicBlock &BB) override; void getAnalysisUsage(AnalysisUsage &AU) const override; StringRef getPassName() const override { return "LoadCombine"; } static char ID; typedef IRBuilder<true, TargetFolder> BuilderTy; private: BuilderTy *Builder; PointerOffsetPair getPointerOffsetPair(LoadInst &); bool combineLoads(DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> &); bool aggregateLoads(SmallVectorImpl<LoadPOPPair> &); bool combineLoads(SmallVectorImpl<LoadPOPPair> &); }; } bool LoadCombine::doInitialization(Function &F) { DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n"); C = &F.getContext(); return true; } PointerOffsetPair LoadCombine::getPointerOffsetPair(LoadInst &LI) { PointerOffsetPair POP; POP.Pointer = LI.getPointerOperand(); POP.Offset = 0; while (isa<BitCastInst>(POP.Pointer) || isa<GetElementPtrInst>(POP.Pointer)) { if (auto *GEP = dyn_cast<GetElementPtrInst>(POP.Pointer)) { auto &DL = LI.getModule()->getDataLayout(); unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType()); APInt Offset(BitWidth, 0); if (GEP->accumulateConstantOffset(DL, Offset)) POP.Offset += Offset.getZExtValue(); else // Can't handle GEPs with variable indices. return POP; POP.Pointer = GEP->getPointerOperand(); } else if (auto *BC = dyn_cast<BitCastInst>(POP.Pointer)) POP.Pointer = BC->getOperand(0); } return POP; } bool LoadCombine::combineLoads( DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> &LoadMap) { bool Combined = false; for (auto &Loads : LoadMap) { if (Loads.second.size() < 2) continue; std::sort(Loads.second.begin(), Loads.second.end(), [](const LoadPOPPair &A, const LoadPOPPair &B) { return A.POP.Offset < B.POP.Offset; }); if (aggregateLoads(Loads.second)) Combined = true; } return Combined; } /// \brief Try to aggregate loads from a sorted list of loads to be combined. /// /// It is guaranteed that no writes occur between any of the loads. All loads /// have the same base pointer. There are at least two loads. bool LoadCombine::aggregateLoads(SmallVectorImpl<LoadPOPPair> &Loads) { assert(Loads.size() >= 2 && "Insufficient loads!"); LoadInst *BaseLoad = nullptr; SmallVector<LoadPOPPair, 8> AggregateLoads; bool Combined = false; uint64_t PrevOffset = -1ull; uint64_t PrevSize = 0; for (auto &L : Loads) { if (PrevOffset == -1ull) { BaseLoad = L.Load; PrevOffset = L.POP.Offset; PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize( L.Load->getType()); AggregateLoads.push_back(L); continue; } if (L.Load->getAlignment() > BaseLoad->getAlignment()) continue; if (L.POP.Offset > PrevOffset + PrevSize) { // No other load will be combinable if (combineLoads(AggregateLoads)) Combined = true; AggregateLoads.clear(); PrevOffset = -1; continue; } if (L.POP.Offset != PrevOffset + PrevSize) // This load is offset less than the size of the last load. // FIXME: We may want to handle this case. continue; PrevOffset = L.POP.Offset; PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize( L.Load->getType()); AggregateLoads.push_back(L); } if (combineLoads(AggregateLoads)) Combined = true; return Combined; } /// \brief Given a list of combinable load. Combine the maximum number of them. bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) { // Remove loads from the end while the size is not a power of 2. unsigned TotalSize = 0; for (const auto &L : Loads) TotalSize += L.Load->getType()->getPrimitiveSizeInBits(); while (TotalSize != 0 && !isPowerOf2_32(TotalSize)) TotalSize -= Loads.pop_back_val().Load->getType()->getPrimitiveSizeInBits(); if (Loads.size() < 2) return false; DEBUG({ dbgs() << "***** Combining Loads ******\n"; for (const auto &L : Loads) { dbgs() << L.POP.Offset << ": " << *L.Load << "\n"; } }); // Find first load. This is where we put the new load. LoadPOPPair FirstLP; FirstLP.InsertOrder = -1u; for (const auto &L : Loads) if (L.InsertOrder < FirstLP.InsertOrder) FirstLP = L; unsigned AddressSpace = FirstLP.POP.Pointer->getType()->getPointerAddressSpace(); Builder->SetInsertPoint(FirstLP.Load); Value *Ptr = Builder->CreateConstGEP1_64( Builder->CreatePointerCast(Loads[0].POP.Pointer, Builder->getInt8PtrTy(AddressSpace)), Loads[0].POP.Offset); LoadInst *NewLoad = new LoadInst( Builder->CreatePointerCast( Ptr, PointerType::get(IntegerType::get(Ptr->getContext(), TotalSize), Ptr->getType()->getPointerAddressSpace())), Twine(Loads[0].Load->getName()) + ".combined", false, Loads[0].Load->getAlignment(), FirstLP.Load); for (const auto &L : Loads) { Builder->SetInsertPoint(L.Load); Value *V = Builder->CreateExtractInteger( L.Load->getModule()->getDataLayout(), NewLoad, cast<IntegerType>(L.Load->getType()), L.POP.Offset - Loads[0].POP.Offset, "combine.extract"); L.Load->replaceAllUsesWith(V); } NumLoadsCombined = NumLoadsCombined + Loads.size(); return true; } bool LoadCombine::runOnBasicBlock(BasicBlock &BB) { if (skipOptnoneFunction(BB)) return false; AA = &getAnalysis<AliasAnalysis>(); IRBuilder<true, TargetFolder> TheBuilder( BB.getContext(), TargetFolder(BB.getModule()->getDataLayout())); Builder = &TheBuilder; DenseMap<const Value *, SmallVector<LoadPOPPair, 8>> LoadMap; AliasSetTracker AST(*AA); bool Combined = false; unsigned Index = 0; for (auto &I : BB) { if (I.mayThrow() || (I.mayWriteToMemory() && AST.containsUnknown(&I))) { if (combineLoads(LoadMap)) Combined = true; LoadMap.clear(); AST.clear(); continue; } LoadInst *LI = dyn_cast<LoadInst>(&I); if (!LI) continue; ++NumLoadsAnalyzed; if (!LI->isSimple() || !LI->getType()->isIntegerTy()) continue; auto POP = getPointerOffsetPair(*LI); if (!POP.Pointer) continue; LoadMap[POP.Pointer].push_back(LoadPOPPair(LI, POP, Index++)); AST.add(LI); } if (combineLoads(LoadMap)) Combined = true; return Combined; } void LoadCombine::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired<AliasAnalysis>(); AU.addPreserved<AliasAnalysis>(); } char LoadCombine::ID = 0; BasicBlockPass *llvm::createLoadCombinePass() { return new LoadCombine(); } INITIALIZE_PASS_BEGIN(LoadCombine, "load-combine", "Combine Adjacent Loads", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(LoadCombine, "load-combine", "Combine Adjacent Loads", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/Scalarizer.cpp
//===--- Scalarizer.cpp - Scalarize vector operations ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass converts vector operations into scalar operations, in order // to expose optimization opportunities on the individual scalar operations. // It is mainly intended for targets that do not have vector units, but it // may also be useful for revectorizing code to different vector widths. // //===----------------------------------------------------------------------===// #include "llvm/ADT/STLExtras.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstVisitor.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/IR/DebugInfo.h" // HLSL Change -debug info in scalarizer. #include "llvm/IR/DIBuilder.h" // HLSL Change -debug info in scalarizer. using namespace llvm; #define DEBUG_TYPE "scalarizer" namespace { // Used to store the scattered form of a vector. typedef SmallVector<Value *, 8> ValueVector; // Used to map a vector Value to its scattered form. We use std::map // because we want iterators to persist across insertion and because the // values are relatively large. typedef std::map<Value *, ValueVector> ScatterMap; // Lists Instructions that have been replaced with scalar implementations, // along with a pointer to their scattered forms. typedef SmallVector<std::pair<Instruction *, ValueVector *>, 16> GatherList; // Provides a very limited vector-like interface for lazily accessing one // component of a scattered vector or vector pointer. class Scatterer { public: bool AllowFolding = false; // HLSL Change Scatterer() {} // Scatter V into Size components. If new instructions are needed, // insert them before BBI in BB. If Cache is nonnull, use it to cache // the results. #if 0 // HLSL Change Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, ValueVector *cachePtr = nullptr); #else // HLSL Change Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, bool AllowFolding, ValueVector *cachePtr = nullptr); #endif // HLSL Change // Return component I, creating a new Value for it if necessary. Value *operator[](unsigned I); // Return the number of components. unsigned size() const { return Size; } private: BasicBlock *BB; BasicBlock::iterator BBI; Value *V; ValueVector *CachePtr; PointerType *PtrTy; ValueVector Tmp; unsigned Size; }; // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp // called Name that compares X and Y in the same way as FCI. struct FCmpSplitter { FCmpSplitter(FCmpInst &fci) : FCI(fci) {} Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, const Twine &Name) const { Value *Cmp = Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name); // HLSL Change Begins -Transfer FPMath flag. if (Instruction *FPMath = dyn_cast<Instruction>(Cmp)) { FPMath->copyFastMathFlags(FCI.getFastMathFlags()); } // HLSL Change Ends return Cmp; } FCmpInst &FCI; }; // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp // called Name that compares X and Y in the same way as ICI. struct ICmpSplitter { ICmpSplitter(ICmpInst &ici) : ICI(ici) {} Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, const Twine &Name) const { return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name); } ICmpInst &ICI; }; // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create // a binary operator like BO called Name with operands X and Y. struct BinarySplitter { BinarySplitter(BinaryOperator &bo) : BO(bo) {} Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, const Twine &Name) const { Value *BinOp = Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name); // HLSL Change Begins -Transfer FPMath flag. if (isa<FPMathOperator>(&BO)) { if (Instruction *FPMath = dyn_cast<Instruction>(BinOp)) { FPMath->copyFastMathFlags(BO.getFastMathFlags()); } } // HLSL Change Ends return BinOp; } BinaryOperator &BO; }; // Information about a load or store that we're scalarizing. struct VectorLayout { VectorLayout() : VecTy(nullptr), ElemTy(nullptr), VecAlign(0), ElemSize(0) {} // Return the alignment of element I. uint64_t getElemAlign(unsigned I) { return MinAlign(VecAlign, I * ElemSize); } // The type of the vector. VectorType *VecTy; // The type of each element. Type *ElemTy; // The alignment of the vector. uint64_t VecAlign; // The size of each element. uint64_t ElemSize; }; class Scalarizer : public FunctionPass, public InstVisitor<Scalarizer, bool> { public: static char ID; // HLSL Change Begin bool AllowFolding = false; Scalarizer(bool AllowFolding) : FunctionPass(ID), AllowFolding(AllowFolding) { initializeScalarizerPass(*PassRegistry::getPassRegistry()); } // HLSL Change End Scalarizer() : FunctionPass(ID) { initializeScalarizerPass(*PassRegistry::getPassRegistry()); } bool doInitialization(Module &M) override; bool runOnFunction(Function &F) override; // InstVisitor methods. They return true if the instruction was scalarized, // false if nothing changed. bool visitInstruction(Instruction &) { return false; } bool visitSelectInst(SelectInst &SI); bool visitICmpInst(ICmpInst &); bool visitFCmpInst(FCmpInst &); bool visitBinaryOperator(BinaryOperator &); bool visitGetElementPtrInst(GetElementPtrInst &); bool visitCastInst(CastInst &); bool visitBitCastInst(BitCastInst &); bool visitShuffleVectorInst(ShuffleVectorInst &); bool visitPHINode(PHINode &); bool visitLoadInst(LoadInst &); bool visitStoreInst(StoreInst &); static void registerOptions() { // This is disabled by default because having separate loads and stores // makes it more likely that the -combiner-alias-analysis limits will be // reached. OptionRegistry::registerOption<bool, Scalarizer, &Scalarizer::ScalarizeLoadStore>( "scalarize-load-store", "Allow the scalarizer pass to scalarize loads and store", false); } private: Scatterer scatter(Instruction *, Value *); void gather(Instruction *, const ValueVector &); bool canTransferMetadata(unsigned Kind); void transferMetadata(Instruction *, const ValueVector &); bool getVectorLayout(Type *, unsigned, VectorLayout &, const DataLayout &); bool finish(); template<typename T> bool splitBinary(Instruction &, const T &); ScatterMap Scattered; GatherList Gathered; unsigned ParallelLoopAccessMDKind; bool ScalarizeLoadStore; }; char Scalarizer::ID = 0; } // end anonymous namespace INITIALIZE_PASS_WITH_OPTIONS(Scalarizer, "scalarizer", "Scalarize vector operations", false, false) #if 0 // HLSL Change Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, ValueVector *cachePtr) : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) { #else // HLSL Change Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, bool AllowFolding, ValueVector *cachePtr) : AllowFolding(AllowFolding), BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) { #endif // HLSL Change Type *Ty = V->getType(); PtrTy = dyn_cast<PointerType>(Ty); if (PtrTy) Ty = PtrTy->getElementType(); Size = Ty->getVectorNumElements(); if (!CachePtr) Tmp.resize(Size, nullptr); else if (CachePtr->empty()) CachePtr->resize(Size, nullptr); else assert(Size == CachePtr->size() && "Inconsistent vector sizes"); } // Return component I, creating a new Value for it if necessary. Value *Scatterer::operator[](unsigned I) { ValueVector &CV = (CachePtr ? *CachePtr : Tmp); // Try to reuse a previous value. if (CV[I]) return CV[I]; IRBuilder<> Builder(BB, BBI); Builder.AllowFolding = AllowFolding; // HLSL Change if (PtrTy) { if (!CV[0]) { Type *Ty = PointerType::get(PtrTy->getElementType()->getVectorElementType(), PtrTy->getAddressSpace()); CV[0] = Builder.CreateBitCast(V, Ty, V->getName() + ".i0"); } if (I != 0) CV[I] = Builder.CreateConstGEP1_32(nullptr, CV[0], I, V->getName() + ".i" + Twine(I)); } else { // Search through a chain of InsertElementInsts looking for element I. // Record other elements in the cache. The new V is still suitable // for all uncached indices. for (;;) { InsertElementInst *Insert = dyn_cast<InsertElementInst>(V); if (!Insert) break; ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2)); if (!Idx) break; unsigned J = Idx->getZExtValue(); V = Insert->getOperand(0); if (I == J) { CV[J] = Insert->getOperand(1); return CV[J]; } else if (!CV[J]) { // Only cache the first entry we find for each index we're not actively // searching for. This prevents us from going too far up the chain and // caching incorrect entries. CV[J] = Insert->getOperand(1); } } CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I), V->getName() + ".i" + Twine(I)); } return CV[I]; } bool Scalarizer::doInitialization(Module &M) { ParallelLoopAccessMDKind = M.getContext().getMDKindID("llvm.mem.parallel_loop_access"); ScalarizeLoadStore = M.getContext().getOption<bool, Scalarizer, &Scalarizer::ScalarizeLoadStore>(); return false; } bool Scalarizer::runOnFunction(Function &F) { for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) { BasicBlock *BB = BBI; for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { Instruction *I = II; bool Done = visit(I); ++II; if (Done && I->getType()->isVoidTy()) I->eraseFromParent(); } } return finish(); } // Return a scattered form of V that can be accessed by Point. V must be a // vector or a pointer to a vector. Scatterer Scalarizer::scatter(Instruction *Point, Value *V) { if (Argument *VArg = dyn_cast<Argument>(V)) { // Put the scattered form of arguments in the entry block, // so that it can be used everywhere. Function *F = VArg->getParent(); BasicBlock *BB = &F->getEntryBlock(); // HLSL Change - Begin // return Scatterer(BB, BB->begin(), V, &Scattered[V]); auto InsertPoint = BB->begin(); while (InsertPoint != BB->end() && isa<DbgInfoIntrinsic>(InsertPoint)) InsertPoint++; return Scatterer(BB, InsertPoint, V, AllowFolding, &Scattered[V]); // HLSL Change - End } if (Instruction *VOp = dyn_cast<Instruction>(V)) { // Put the scattered form of an instruction directly after the // instruction. BasicBlock *BB = VOp->getParent(); #if 0 // HLSL Change return Scatterer(BB, std::next(BasicBlock::iterator(VOp)), V, &Scattered[V]); #else // HLSL Change return Scatterer(BB, std::next(BasicBlock::iterator(VOp)), V, AllowFolding, &Scattered[V]); #endif // HLSL Change } // HLSL Change - Begin // Allow constant folding for Constant cases, so we don't // put an instruction before a PHI node. if (Constant *C = dyn_cast<Constant>(V)) { if (isa<PHINode>(Point)) { return Scatterer(Point->getParent(), Point, V, /* allowFolding */ true, &Scattered[V]); } } // HLSL Change - End // In the fallback case, just put the scattered before Point and // keep the result local to Point. // return Scatterer(Point->getParent(), Point, V); // HLSL Change return Scatterer(Point->getParent(), Point, V, AllowFolding); } // Replace Op with the gathered form of the components in CV. Defer the // deletion of Op and creation of the gathered form to the end of the pass, // so that we can avoid creating the gathered form if all uses of Op are // replaced with uses of CV. void Scalarizer::gather(Instruction *Op, const ValueVector &CV) { // Since we're not deleting Op yet, stub out its operands, so that it // doesn't make anything live unnecessarily. for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) Op->setOperand(I, UndefValue::get(Op->getOperand(I)->getType())); transferMetadata(Op, CV); // If we already have a scattered form of Op (created from ExtractElements // of Op itself), replace them with the new form. ValueVector &SV = Scattered[Op]; if (!SV.empty()) { for (unsigned I = 0, E = SV.size(); I != E; ++I) { Instruction *Old = dyn_cast_or_null<Instruction>(SV[I]); // HLSL Change Begin - skip unused scatter elt. if (!Old) continue; // HLSL Change End. CV[I]->takeName(Old); Old->replaceAllUsesWith(CV[I]); Old->eraseFromParent(); } } SV = CV; Gathered.push_back(GatherList::value_type(Op, &SV)); } // Return true if it is safe to transfer the given metadata tag from // vector to scalar instructions. bool Scalarizer::canTransferMetadata(unsigned Tag) { return (Tag == LLVMContext::MD_tbaa || Tag == LLVMContext::MD_fpmath || Tag == LLVMContext::MD_tbaa_struct || Tag == LLVMContext::MD_invariant_load || Tag == LLVMContext::MD_alias_scope || Tag == LLVMContext::MD_noalias || Tag == ParallelLoopAccessMDKind); } // Transfer metadata from Op to the instructions in CV if it is known // to be safe to do so. void Scalarizer::transferMetadata(Instruction *Op, const ValueVector &CV) { SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; Op->getAllMetadataOtherThanDebugLoc(MDs); for (unsigned I = 0, E = CV.size(); I != E; ++I) { if (Instruction *New = dyn_cast<Instruction>(CV[I])) { for (SmallVectorImpl<std::pair<unsigned, MDNode *>>::iterator MI = MDs.begin(), ME = MDs.end(); MI != ME; ++MI) if (canTransferMetadata(MI->first)) New->setMetadata(MI->first, MI->second); //New->setDebugLoc(Op->getDebugLoc()); // HLSL Change } } } // Try to fill in Layout from Ty, returning true on success. Alignment is // the alignment of the vector, or 0 if the ABI default should be used. bool Scalarizer::getVectorLayout(Type *Ty, unsigned Alignment, VectorLayout &Layout, const DataLayout &DL) { // Make sure we're dealing with a vector. Layout.VecTy = dyn_cast<VectorType>(Ty); if (!Layout.VecTy) return false; // Check that we're dealing with full-byte elements. Layout.ElemTy = Layout.VecTy->getElementType(); if (DL.getTypeSizeInBits(Layout.ElemTy) != DL.getTypeStoreSizeInBits(Layout.ElemTy)) return false; if (Alignment) Layout.VecAlign = Alignment; else Layout.VecAlign = DL.getABITypeAlignment(Layout.VecTy); Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy); return true; } // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name) // to create an instruction like I with operands X and Y and name Name. template<typename Splitter> bool Scalarizer::splitBinary(Instruction &I, const Splitter &Split) { VectorType *VT = dyn_cast<VectorType>(I.getType()); if (!VT) return false; unsigned NumElems = VT->getNumElements(); IRBuilder<> Builder(I.getParent(), &I); Builder.AllowFolding = AllowFolding; // HLSL Change Scatterer Op0 = scatter(&I, I.getOperand(0)); Scatterer Op1 = scatter(&I, I.getOperand(1)); assert(Op0.size() == NumElems && "Mismatched binary operation"); assert(Op1.size() == NumElems && "Mismatched binary operation"); ValueVector Res; Res.resize(NumElems); for (unsigned Elem = 0; Elem < NumElems; ++Elem) Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem], I.getName() + ".i" + Twine(Elem)); gather(&I, Res); return true; } bool Scalarizer::visitSelectInst(SelectInst &SI) { VectorType *VT = dyn_cast<VectorType>(SI.getType()); if (!VT) return false; unsigned NumElems = VT->getNumElements(); IRBuilder<> Builder(SI.getParent(), &SI); Builder.AllowFolding = this->AllowFolding; // HLSL Change Scatterer Op1 = scatter(&SI, SI.getOperand(1)); Scatterer Op2 = scatter(&SI, SI.getOperand(2)); assert(Op1.size() == NumElems && "Mismatched select"); assert(Op2.size() == NumElems && "Mismatched select"); ValueVector Res; Res.resize(NumElems); if (SI.getOperand(0)->getType()->isVectorTy()) { Scatterer Op0 = scatter(&SI, SI.getOperand(0)); assert(Op0.size() == NumElems && "Mismatched select"); for (unsigned I = 0; I < NumElems; ++I) Res[I] = Builder.CreateSelect(Op0[I], Op1[I], Op2[I], SI.getName() + ".i" + Twine(I)); } else { Value *Op0 = SI.getOperand(0); for (unsigned I = 0; I < NumElems; ++I) Res[I] = Builder.CreateSelect(Op0, Op1[I], Op2[I], SI.getName() + ".i" + Twine(I)); } gather(&SI, Res); return true; } bool Scalarizer::visitICmpInst(ICmpInst &ICI) { return splitBinary(ICI, ICmpSplitter(ICI)); } bool Scalarizer::visitFCmpInst(FCmpInst &FCI) { return splitBinary(FCI, FCmpSplitter(FCI)); } bool Scalarizer::visitBinaryOperator(BinaryOperator &BO) { return splitBinary(BO, BinarySplitter(BO)); } bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) { VectorType *VT = dyn_cast<VectorType>(GEPI.getType()); if (!VT) return false; IRBuilder<> Builder(GEPI.getParent(), &GEPI); Builder.AllowFolding = this->AllowFolding; // HLSL Change unsigned NumElems = VT->getNumElements(); unsigned NumIndices = GEPI.getNumIndices(); Scatterer Base = scatter(&GEPI, GEPI.getOperand(0)); SmallVector<Scatterer, 8> Ops; Ops.resize(NumIndices); for (unsigned I = 0; I < NumIndices; ++I) Ops[I] = scatter(&GEPI, GEPI.getOperand(I + 1)); ValueVector Res; Res.resize(NumElems); for (unsigned I = 0; I < NumElems; ++I) { SmallVector<Value *, 8> Indices; Indices.resize(NumIndices); for (unsigned J = 0; J < NumIndices; ++J) Indices[J] = Ops[J][I]; Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices, GEPI.getName() + ".i" + Twine(I)); if (GEPI.isInBounds()) if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I])) NewGEPI->setIsInBounds(); } gather(&GEPI, Res); return true; } bool Scalarizer::visitCastInst(CastInst &CI) { VectorType *VT = dyn_cast<VectorType>(CI.getDestTy()); if (!VT) return false; unsigned NumElems = VT->getNumElements(); IRBuilder<> Builder(CI.getParent(), &CI); Builder.AllowFolding = this->AllowFolding; // HLSL Change Scatterer Op0 = scatter(&CI, CI.getOperand(0)); assert(Op0.size() == NumElems && "Mismatched cast"); ValueVector Res; Res.resize(NumElems); for (unsigned I = 0; I < NumElems; ++I) Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(), CI.getName() + ".i" + Twine(I)); gather(&CI, Res); return true; } bool Scalarizer::visitBitCastInst(BitCastInst &BCI) { VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy()); VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy()); if (!DstVT || !SrcVT) return false; unsigned DstNumElems = DstVT->getNumElements(); unsigned SrcNumElems = SrcVT->getNumElements(); IRBuilder<> Builder(BCI.getParent(), &BCI); Builder.AllowFolding = this->AllowFolding; // HLSL Change Scatterer Op0 = scatter(&BCI, BCI.getOperand(0)); ValueVector Res; Res.resize(DstNumElems); if (DstNumElems == SrcNumElems) { for (unsigned I = 0; I < DstNumElems; ++I) Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(), BCI.getName() + ".i" + Twine(I)); } else if (DstNumElems > SrcNumElems) { // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the // individual elements to the destination. unsigned FanOut = DstNumElems / SrcNumElems; Type *MidTy = VectorType::get(DstVT->getElementType(), FanOut); unsigned ResI = 0; for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) { Value *V = Op0[Op0I]; Instruction *VI; // Look through any existing bitcasts before converting to <N x t2>. // In the best case, the resulting conversion might be a no-op. while ((VI = dyn_cast<Instruction>(V)) && VI->getOpcode() == Instruction::BitCast) V = VI->getOperand(0); V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast"); Scatterer Mid = scatter(&BCI, V); for (unsigned MidI = 0; MidI < FanOut; ++MidI) Res[ResI++] = Mid[MidI]; } } else { // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2. unsigned FanIn = SrcNumElems / DstNumElems; Type *MidTy = VectorType::get(SrcVT->getElementType(), FanIn); unsigned Op0I = 0; for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) { Value *V = UndefValue::get(MidTy); for (unsigned MidI = 0; MidI < FanIn; ++MidI) V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI), BCI.getName() + ".i" + Twine(ResI) + ".upto" + Twine(MidI)); Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(), BCI.getName() + ".i" + Twine(ResI)); } } gather(&BCI, Res); return true; } bool Scalarizer::visitShuffleVectorInst(ShuffleVectorInst &SVI) { VectorType *VT = dyn_cast<VectorType>(SVI.getType()); if (!VT) return false; unsigned NumElems = VT->getNumElements(); Scatterer Op0 = scatter(&SVI, SVI.getOperand(0)); Scatterer Op1 = scatter(&SVI, SVI.getOperand(1)); ValueVector Res; Res.resize(NumElems); for (unsigned I = 0; I < NumElems; ++I) { int Selector = SVI.getMaskValue(I); if (Selector < 0) Res[I] = UndefValue::get(VT->getElementType()); else if (unsigned(Selector) < Op0.size()) Res[I] = Op0[Selector]; else Res[I] = Op1[Selector - Op0.size()]; // HLSL Change Begins: (fix bug in upstream llvm) if (ExtractElementInst *EA = dyn_cast<ExtractElementInst>(Res[I])) { // Clone extractelement here, since it is associated with another inst. // Otherwise it will be added to our Gather, and after the incoming // instruction is processed, it will be replaced without updating our // Gather entry. This dead instruction will be accessed by finish(), // causing assert or crash. Res[I] = IRBuilder<>(&SVI).Insert(EA->clone()); } // HLSL Change Ends } gather(&SVI, Res); return true; } bool Scalarizer::visitPHINode(PHINode &PHI) { VectorType *VT = dyn_cast<VectorType>(PHI.getType()); if (!VT) return false; unsigned NumElems = VT->getNumElements(); IRBuilder<> Builder(PHI.getParent(), &PHI); Builder.AllowFolding = this->AllowFolding; // HLSL Change ValueVector Res; Res.resize(NumElems); unsigned NumOps = PHI.getNumOperands(); for (unsigned I = 0; I < NumElems; ++I) Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps, PHI.getName() + ".i" + Twine(I)); for (unsigned I = 0; I < NumOps; ++I) { Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I)); BasicBlock *IncomingBlock = PHI.getIncomingBlock(I); for (unsigned J = 0; J < NumElems; ++J) cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock); } gather(&PHI, Res); return true; } bool Scalarizer::visitLoadInst(LoadInst &LI) { if (!ScalarizeLoadStore) return false; if (!LI.isSimple()) return false; VectorLayout Layout; if (!getVectorLayout(LI.getType(), LI.getAlignment(), Layout, LI.getModule()->getDataLayout())) return false; unsigned NumElems = Layout.VecTy->getNumElements(); IRBuilder<> Builder(LI.getParent(), &LI); Builder.AllowFolding = this->AllowFolding; // HLSL Change Scatterer Ptr = scatter(&LI, LI.getPointerOperand()); ValueVector Res; Res.resize(NumElems); for (unsigned I = 0; I < NumElems; ++I) Res[I] = Builder.CreateAlignedLoad(Ptr[I], Layout.getElemAlign(I), LI.getName() + ".i" + Twine(I)); gather(&LI, Res); return true; } bool Scalarizer::visitStoreInst(StoreInst &SI) { if (!ScalarizeLoadStore) return false; if (!SI.isSimple()) return false; VectorLayout Layout; Value *FullValue = SI.getValueOperand(); if (!getVectorLayout(FullValue->getType(), SI.getAlignment(), Layout, SI.getModule()->getDataLayout())) return false; unsigned NumElems = Layout.VecTy->getNumElements(); IRBuilder<> Builder(SI.getParent(), &SI); Builder.AllowFolding = this->AllowFolding; // HLSL Change Scatterer Ptr = scatter(&SI, SI.getPointerOperand()); Scatterer Val = scatter(&SI, FullValue); ValueVector Stores; Stores.resize(NumElems); for (unsigned I = 0; I < NumElems; ++I) { unsigned Align = Layout.getElemAlign(I); Stores[I] = Builder.CreateAlignedStore(Val[I], Ptr[I], Align); } transferMetadata(&SI, Stores); return true; } // Delete the instructions that we scalarized. If a full vector result // is still needed, recreate it using InsertElements. bool Scalarizer::finish() { if (Gathered.empty()) return false; // HLSL Change Begins. Module &M = *Gathered.front().first->getModule(); LLVMContext &Ctx = M.getContext(); const DataLayout &DL = M.getDataLayout(); bool HasDbgInfo = hasDebugInfo(M); // Map from an extract element inst to a Value which replaced it. DenseMap<Instruction *, Value*> EltMap; // HLSL Change Ends. for (GatherList::iterator GMI = Gathered.begin(), GME = Gathered.end(); GMI != GME; ++GMI) { Instruction *Op = GMI->first; ValueVector &CV = *GMI->second; // HLSL Change Begin - debug info in scalarizer. if (HasDbgInfo) { if (auto *L = LocalAsMetadata::getIfExists(Op)) { if (auto *DINode = MetadataAsValue::getIfExists(Ctx, L)) { Type *Ty = Op->getType(); unsigned Count = Ty->getVectorNumElements(); Type *EltTy = Ty->getVectorElementType(); unsigned EltSizeInBits = DL.getTypeSizeInBits(EltTy); for (User *U : DINode->users()) { if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) { DIBuilder DIB(M, /*AllowUnresolved*/ false); auto *VarInfo = DVI->getVariable(); DebugLoc DbgLoc = DVI->getDebugLoc(); unsigned OffsetInBits = 0; if (DVI->getExpression()->isBitPiece()) OffsetInBits = DVI->getExpression()->getBitPieceOffset(); for (unsigned I = 0; I < Count; ++I) { DIExpression *EltExpr = DIB.createBitPieceExpression(OffsetInBits, EltSizeInBits); OffsetInBits += EltSizeInBits; DIB.insertDbgValueIntrinsic(CV[I], 0, VarInfo, EltExpr, DbgLoc, DVI); } } } } } } // HLSL Change End. if (!Op->use_empty()) { // HLSL Change Begins. // Remove the extract element users if possible. for (User *UI : Op->users()) { if (ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(UI)) { Value *Idx = EEI->getIndexOperand(); if (!isa<ConstantInt>(Idx)) continue; unsigned immIdx = cast<ConstantInt>(Idx)->getLimitedValue(); if (immIdx >= CV.size()) continue; Value *Elt = CV[immIdx]; // Try to find a map for Elt,if it's in EltMap. while (Instruction *EltI = dyn_cast<Instruction>(Elt)) { if (EltMap.count(EltI)) { Elt = EltMap[EltI]; } else break; } EEI->replaceAllUsesWith(Elt); EltMap[EEI] = Elt; } } if (Op->use_empty()) { Op->eraseFromParent(); continue; } // HLSL Change Ends. // The value is still needed, so recreate it using a series of // InsertElements. Type *Ty = Op->getType(); Value *Res = UndefValue::get(Ty); BasicBlock *BB = Op->getParent(); unsigned Count = Ty->getVectorNumElements(); IRBuilder<> Builder(BB, Op); Builder.AllowFolding = this->AllowFolding; // HLSL Change if (isa<PHINode>(Op)) Builder.SetInsertPoint(BB, BB->getFirstInsertionPt()); for (unsigned I = 0; I < Count; ++I) Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I), Op->getName() + ".upto" + Twine(I)); Res->takeName(Op); Op->replaceAllUsesWith(Res); } Op->eraseFromParent(); } // HLSL Change Begins. for (auto It: EltMap) { Instruction *I = It.first; if (I->user_empty()) I->eraseFromParent(); } // HLSL Change Ends. Gathered.clear(); Scattered.clear(); return true; } // HLSL Change Begin FunctionPass *llvm::createScalarizerPass(bool AllowFolding) { Scalarizer *pass = new Scalarizer(AllowFolding); return pass; } // HLSL Change End FunctionPass *llvm::createScalarizerPass() { return new Scalarizer(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/ScalarReplAggregatesHLSL.cpp
//===- ScalarReplAggregatesHLSL.cpp - Scalar Replacement of Aggregates ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // // Based on ScalarReplAggregates.cpp. The difference is HLSL version will keep // array so it can break up all structure. // //===----------------------------------------------------------------------===// #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilModule.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/HLLowerUDT.h" #include "dxc/HLSL/HLMatrixLowerHelper.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HLSL/HLUtil.h" #include "dxc/HlslIntrinsicOp.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include <deque> #include <queue> #include <unordered_map> #include <unordered_set> using namespace llvm; using namespace hlsl; #define DEBUG_TYPE "scalarreplhlsl" STATISTIC(NumReplaced, "Number of allocas broken up"); namespace { class SROA_Helper { public: // Split V into AllocaInsts with Builder and save the new AllocaInsts into // Elts. Then do SROA on V. static bool DoScalarReplacement(Value *V, std::vector<Value *> &Elts, Type *&BrokenUpTy, uint64_t &NumInstances, IRBuilder<> &Builder, bool bFlatVector, bool hasPrecise, DxilTypeSystem &typeSys, const DataLayout &DL, SmallVector<Value *, 32> &DeadInsts, DominatorTree *DT); static bool DoScalarReplacement(GlobalVariable *GV, std::vector<Value *> &Elts, IRBuilder<> &Builder, bool bFlatVector, bool hasPrecise, DxilTypeSystem &typeSys, const DataLayout &DL, SmallVector<Value *, 32> &DeadInsts, DominatorTree *DT); static unsigned GetEltAlign(unsigned ValueAlign, const DataLayout &DL, Type *EltTy, unsigned Offset); // Lower memcpy related to V. static bool LowerMemcpy(Value *V, DxilFieldAnnotation *annotation, DxilTypeSystem &typeSys, const DataLayout &DL, DominatorTree *DT, bool bAllowReplace); static void MarkEmptyStructUsers(Value *V, SmallVector<Value *, 32> &DeadInsts); static bool IsEmptyStructType(Type *Ty, DxilTypeSystem &typeSys); private: SROA_Helper(Value *V, ArrayRef<Value *> Elts, SmallVector<Value *, 32> &DeadInsts, DxilTypeSystem &ts, const DataLayout &dl, DominatorTree *dt) : OldVal(V), NewElts(Elts), DeadInsts(DeadInsts), typeSys(ts), DL(dl), DT(dt) {} void RewriteForScalarRepl(Value *V, IRBuilder<> &Builder); private: // Must be a pointer type val. Value *OldVal; // Flattened elements for OldVal. ArrayRef<Value *> NewElts; SmallVector<Value *, 32> &DeadInsts; DxilTypeSystem &typeSys; const DataLayout &DL; DominatorTree *DT; void RewriteForConstExpr(ConstantExpr *user, IRBuilder<> &Builder); void RewriteForGEP(GEPOperator *GEP, IRBuilder<> &Builder); void RewriteForAddrSpaceCast(Value *user, IRBuilder<> &Builder); void RewriteForLoad(LoadInst *loadInst); void RewriteForStore(StoreInst *storeInst); void RewriteMemIntrin(MemIntrinsic *MI, Value *OldV); void RewriteCall(CallInst *CI); void RewriteBitCast(BitCastInst *BCI); void RewriteCallArg(CallInst *CI, unsigned ArgIdx, bool bIn, bool bOut); }; } // namespace static unsigned getNestedLevelInStruct(const Type *ty) { unsigned lvl = 0; while (ty->isStructTy()) { if (ty->getStructNumElements() != 1) break; ty = ty->getStructElementType(0); lvl++; } return lvl; } // After SROA'ing a given value into a series of elements, // creates the debug info for the storage of the individual elements. static void addDebugInfoForElements(Value *ParentVal, Type *BrokenUpTy, uint64_t NumInstances, ArrayRef<Value *> Elems, const DataLayout &DatLayout, DIBuilder *DbgBuilder) { // Extract the data we need from the parent value, // depending on whether it is an alloca, argument or global variable. if (isa<GlobalVariable>(ParentVal)) { llvm_unreachable( "Not implemented: sroa debug info propagation for global vars."); } else { Type *ParentTy = nullptr; if (AllocaInst *ParentAlloca = dyn_cast<AllocaInst>(ParentVal)) ParentTy = ParentAlloca->getAllocatedType(); else ParentTy = cast<Argument>(ParentVal)->getType(); SmallVector<DbgDeclareInst *, 4> Declares; llvm::FindAllocaDbgDeclare(ParentVal, Declares); for (DbgDeclareInst *ParentDbgDeclare : Declares) { unsigned ParentBitPieceOffset = 0; DIVariable *ParentDbgVariable = nullptr; DIExpression *ParentDbgExpr = nullptr; DILocation *ParentDbgLocation = nullptr; Instruction *DbgDeclareInsertPt = nullptr; std::vector<DxilDIArrayDim> DIArrayDims; // Get the bit piece offset if ((ParentDbgExpr = ParentDbgDeclare->getExpression())) { if (ParentDbgExpr->isBitPiece()) { ParentBitPieceOffset = ParentDbgExpr->getBitPieceOffset(); } } ParentDbgVariable = ParentDbgDeclare->getVariable(); ParentDbgLocation = ParentDbgDeclare->getDebugLoc(); DbgDeclareInsertPt = ParentDbgDeclare; // Read the extra layout metadata, if any unsigned ParentBitPieceOffsetFromMD = 0; if (DxilMDHelper::GetVariableDebugLayout( ParentDbgDeclare, ParentBitPieceOffsetFromMD, DIArrayDims)) { // The offset is redundant for local variables and only necessary for // global variables. DXASSERT(ParentBitPieceOffsetFromMD == ParentBitPieceOffset, "Bit piece offset mismatch between llvm.dbg.declare and DXIL " "metadata."); } // If the type that was broken up is nested in arrays, // then each element will also be an array, // but the continuity between successive elements of the original // aggregate will have been broken, such that we must store the stride to // rebuild it. For example: [2 x {i32, float}] => [2 x i32], [2 x float], // each with stride 64 bits if (NumInstances > 1 && Elems.size() > 1) { // Existing dimensions already account for part of the stride uint64_t NewDimNumElements = NumInstances; for (const DxilDIArrayDim &ArrayDim : DIArrayDims) { DXASSERT(NewDimNumElements % ArrayDim.NumElements == 0, "Debug array stride is inconsistent with the number of " "elements."); NewDimNumElements /= ArrayDim.NumElements; } // Add a stride dimension DxilDIArrayDim NewDIArrayDim = {}; NewDIArrayDim.StrideInBits = (unsigned)DatLayout.getTypeAllocSizeInBits(BrokenUpTy); NewDIArrayDim.NumElements = (unsigned)NewDimNumElements; DIArrayDims.emplace_back(NewDIArrayDim); } else { DIArrayDims.clear(); } // Create the debug info for each element for (unsigned ElemIdx = 0; ElemIdx < Elems.size(); ++ElemIdx) { // Figure out the offset of the element in the broken up type unsigned ElemBitPieceOffset = ParentBitPieceOffset; if (StructType *ParentStructTy = dyn_cast<StructType>(BrokenUpTy)) { DXASSERT_NOMSG(Elems.size() == ParentStructTy->getNumElements()); ElemBitPieceOffset += (unsigned)DatLayout.getStructLayout(ParentStructTy) ->getElementOffsetInBits(ElemIdx); } else if (VectorType *ParentVecTy = dyn_cast<VectorType>(BrokenUpTy)) { DXASSERT_NOMSG(Elems.size() == ParentVecTy->getNumElements()); ElemBitPieceOffset += (unsigned)DatLayout.getTypeStoreSizeInBits( ParentVecTy->getElementType()) * ElemIdx; } else if (ArrayType *ParentArrayTy = dyn_cast<ArrayType>(BrokenUpTy)) { DXASSERT_NOMSG(Elems.size() == ParentArrayTy->getNumElements()); ElemBitPieceOffset += (unsigned)DatLayout.getTypeStoreSizeInBits( ParentArrayTy->getElementType()) * ElemIdx; } // The bit_piece can only represent the leading contiguous bytes. // If strides are involved, we'll need additional metadata. Type *ElemTy = Elems[ElemIdx]->getType()->getPointerElementType(); unsigned ElemBitPieceSize = (unsigned)DatLayout.getTypeStoreSizeInBits(ElemTy); for (const DxilDIArrayDim &ArrayDim : DIArrayDims) ElemBitPieceSize /= ArrayDim.NumElements; if (AllocaInst *ElemAlloca = dyn_cast<AllocaInst>(Elems[ElemIdx])) { // Local variables get an @llvm.dbg.declare plus optional metadata for // layout stride information. DIExpression *ElemDbgExpr = nullptr; if (ElemBitPieceOffset == 0 && DatLayout.getTypeAllocSizeInBits(ParentTy) == ElemBitPieceSize) { ElemDbgExpr = DbgBuilder->createExpression(); } else { ElemDbgExpr = DbgBuilder->createBitPieceExpression( ElemBitPieceOffset, ElemBitPieceSize); } DXASSERT_NOMSG(DbgBuilder != nullptr); DbgDeclareInst *EltDDI = cast<DbgDeclareInst>(DbgBuilder->insertDeclare( ElemAlloca, cast<DILocalVariable>(ParentDbgVariable), ElemDbgExpr, ParentDbgLocation, DbgDeclareInsertPt)); if (!DIArrayDims.empty()) DxilMDHelper::SetVariableDebugLayout(EltDDI, ElemBitPieceOffset, DIArrayDims); } else { llvm_unreachable("Non-AllocaInst SROA'd elements."); } } } } } /// Returns first GEP index that indexes a struct member, or 0 otherwise. /// Ignores initial ptr index. static unsigned FindFirstStructMemberIdxInGEP(GEPOperator *GEP) { StructType *ST = dyn_cast<StructType>( GEP->getPointerOperandType()->getPointerElementType()); int index = 1; for (auto it = gep_type_begin(GEP), E = gep_type_end(GEP); it != E; ++it, ++index) { if (ST) { DXASSERT(!HLMatrixType::isa(ST) && !dxilutil::IsHLSLObjectType(ST), "otherwise, indexing into hlsl object"); return index; } ST = dyn_cast<StructType>(it->getPointerElementType()); } return 0; } /// Return true when ptr should not be SROA'd or copied, but used directly /// by a function in its lowered form. Also collect uses for translation. /// What is meant by directly here: /// Possibly accessed through GEP array index or address space cast, but /// not under another struct member (always allow SROA of outer struct). typedef SmallMapVector<CallInst *, unsigned, 4> FunctionUseMap; static unsigned IsPtrUsedByLoweredFn(Value *V, FunctionUseMap &CollectedUses) { bool bFound = false; for (Use &U : V->uses()) { User *user = U.getUser(); if (CallInst *CI = dyn_cast<CallInst>(user)) { unsigned foundIdx = (unsigned)-1; Function *F = CI->getCalledFunction(); Type *Ty = V->getType(); if (F->isDeclaration() && !F->isIntrinsic() && Ty->isPointerTy()) { HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(F); if (group == HLOpcodeGroup::HLIntrinsic) { unsigned opIdx = U.getOperandNo(); switch ((IntrinsicOp)hlsl::GetHLOpcode(CI)) { // TODO: Lower these as well, along with function parameter types // case IntrinsicOp::IOP_TraceRay: // if (opIdx != HLOperandIndex::kTraceRayPayLoadOpIdx) // continue; // break; // case IntrinsicOp::IOP_ReportHit: // if (opIdx != HLOperandIndex::kReportIntersectionAttributeOpIdx) // continue; // break; // case IntrinsicOp::IOP_CallShader: // if (opIdx != HLOperandIndex::kCallShaderPayloadOpIdx) // continue; // break; case IntrinsicOp::IOP_DispatchMesh: if (opIdx != HLOperandIndex::kDispatchMeshOpPayload) continue; break; default: continue; } foundIdx = opIdx; // TODO: Lower these as well, along with function parameter types //} else if (group == HLOpcodeGroup::NotHL) { // foundIdx = U.getOperandNo(); } } if (foundIdx != (unsigned)-1) { bFound = true; auto insRes = CollectedUses.insert(std::make_pair(CI, foundIdx)); DXASSERT_LOCALVAR(insRes, insRes.second, "otherwise, multiple uses in single call"); } } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(user)) { // Not what we are looking for if GEP result is not [array of] struct. // If use is under struct member, we can still SROA the outer struct. if (!dxilutil::StripArrayTypes(GEP->getType()->getPointerElementType()) ->isStructTy() || FindFirstStructMemberIdxInGEP(GEP)) continue; if (IsPtrUsedByLoweredFn(user, CollectedUses)) bFound = true; } else if (isa<AddrSpaceCastInst>(user)) { if (IsPtrUsedByLoweredFn(user, CollectedUses)) bFound = true; } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(user)) { unsigned opcode = CE->getOpcode(); if (opcode == Instruction::AddrSpaceCast) if (IsPtrUsedByLoweredFn(user, CollectedUses)) bFound = true; } } return bFound; } /// Rewrite call to natively use an argument with addrspace cast/bitcast static CallInst *RewriteIntrinsicCallForCastedArg(CallInst *CI, unsigned argIdx) { Function *F = CI->getCalledFunction(); HLOpcodeGroup group = GetHLOpcodeGroupByName(F); DXASSERT_NOMSG(group == HLOpcodeGroup::HLIntrinsic); unsigned opcode = GetHLOpcode(CI); SmallVector<Type *, 8> newArgTypes(CI->getFunctionType()->param_begin(), CI->getFunctionType()->param_end()); SmallVector<Value *, 8> newArgs(CI->arg_operands()); Value *newArg = CI->getOperand(argIdx)->stripPointerCasts(); newArgTypes[argIdx] = newArg->getType(); newArgs[argIdx] = newArg; FunctionType *newFuncTy = FunctionType::get(CI->getType(), newArgTypes, false); Function *newF = GetOrCreateHLFunction(*F->getParent(), newFuncTy, group, opcode, F->getAttributes().getFnAttributes()); IRBuilder<> Builder(CI); return Builder.CreateCall(newF, newArgs); } /// Translate pointer for cases where intrinsics use UDT pointers directly /// Return existing or new ptr if needs preserving, /// otherwise nullptr to proceed with existing checks and SROA. static Value *TranslatePtrIfUsedByLoweredFn(Value *Ptr, DxilTypeSystem &TypeSys) { if (!Ptr->getType()->isPointerTy()) return nullptr; Type *Ty = Ptr->getType()->getPointerElementType(); SmallVector<unsigned, 4> outerToInnerLengths; Ty = dxilutil::StripArrayTypes(Ty, &outerToInnerLengths); if (!Ty->isStructTy()) return nullptr; if (HLMatrixType::isa(Ty) || dxilutil::IsHLSLObjectType(Ty)) return nullptr; unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); FunctionUseMap FunctionUses; if (!IsPtrUsedByLoweredFn(Ptr, FunctionUses)) return nullptr; // Translate vectors to arrays in type, but don't SROA Type *NewTy = GetLoweredUDT(cast<StructType>(Ty), &TypeSys); // No work to do here, but prevent SROA. if (Ty == NewTy && AddrSpace != DXIL::kTGSMAddrSpace) return Ptr; // If type changed, replace value, otherwise casting may still // require a rewrite of the calls. Value *NewPtr = Ptr; if (Ty != NewTy) { NewTy = dxilutil::WrapInArrayTypes(NewTy, outerToInnerLengths); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { Module &M = *GV->getParent(); // Rewrite init expression for arrays instead of vectors Constant *Init = GV->hasInitializer() ? GV->getInitializer() : UndefValue::get(Ptr->getType()); Constant *NewInit = TranslateInitForLoweredUDT(Init, NewTy, &TypeSys); // Replace with new GV, and rewrite vector load/store users GlobalVariable *NewGV = new GlobalVariable( M, NewTy, GV->isConstant(), GV->getLinkage(), NewInit, GV->getName(), /*InsertBefore*/ GV, GV->getThreadLocalMode(), AddrSpace); NewPtr = NewGV; } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Ptr)) { IRBuilder<> Builder(AI); AllocaInst *NewAI = Builder.CreateAlloca(NewTy, nullptr, AI->getName()); NewPtr = NewAI; } else { DXASSERT(false, "Ptr must be global or alloca"); } // This will rewrite vector load/store users // and insert bitcasts for CallInst users ReplaceUsesForLoweredUDT(Ptr, NewPtr); } // Rewrite the HLIntrinsic calls for (auto it : FunctionUses) { CallInst *CI = it.first; HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group == HLOpcodeGroup::NotHL) continue; CallInst *newCI = RewriteIntrinsicCallForCastedArg(CI, it.second); CI->replaceAllUsesWith(newCI); CI->eraseFromParent(); } return NewPtr; } /// isHomogeneousAggregate - Check if type T is a struct or array containing /// elements of the same type (which is always true for arrays). If so, /// return true with NumElts and EltTy set to the number of elements and the /// element type, respectively. static bool isHomogeneousAggregate(Type *T, unsigned &NumElts, Type *&EltTy) { if (ArrayType *AT = dyn_cast<ArrayType>(T)) { NumElts = AT->getNumElements(); EltTy = (NumElts == 0 ? nullptr : AT->getElementType()); return true; } if (StructType *ST = dyn_cast<StructType>(T)) { NumElts = ST->getNumContainedTypes(); EltTy = (NumElts == 0 ? nullptr : ST->getContainedType(0)); for (unsigned n = 1; n < NumElts; ++n) { if (ST->getContainedType(n) != EltTy) return false; } return true; } return false; } /// isCompatibleAggregate - Check if T1 and T2 are either the same type or are /// "homogeneous" aggregates with the same element type and number of elements. static bool isCompatibleAggregate(Type *T1, Type *T2) { if (T1 == T2) return true; unsigned NumElts1, NumElts2; Type *EltTy1, *EltTy2; if (isHomogeneousAggregate(T1, NumElts1, EltTy1) && isHomogeneousAggregate(T2, NumElts2, EltTy2) && NumElts1 == NumElts2 && EltTy1 == EltTy2) return true; return false; } /// LoadVectorArray - Load vector array like [2 x <4 x float>] from /// arrays like 4 [2 x float] or struct array like /// [2 x { <4 x float>, < 4 x uint> }] /// from arrays like [ 2 x <4 x float> ], [ 2 x <4 x uint> ]. static Value *LoadVectorOrStructArray(ArrayType *AT, ArrayRef<Value *> NewElts, SmallVector<Value *, 8> &idxList, IRBuilder<> &Builder) { Type *EltTy = AT->getElementType(); Value *retVal = llvm::UndefValue::get(AT); Type *i32Ty = Type::getInt32Ty(EltTy->getContext()); uint32_t arraySize = AT->getNumElements(); for (uint32_t i = 0; i < arraySize; i++) { Constant *idx = ConstantInt::get(i32Ty, i); idxList.emplace_back(idx); if (ArrayType *EltAT = dyn_cast<ArrayType>(EltTy)) { Value *EltVal = LoadVectorOrStructArray(EltAT, NewElts, idxList, Builder); retVal = Builder.CreateInsertValue(retVal, EltVal, i); } else { assert((EltTy->isVectorTy() || EltTy->isStructTy()) && "must be a vector or struct type"); bool isVectorTy = EltTy->isVectorTy(); Value *retVec = llvm::UndefValue::get(EltTy); if (isVectorTy) { for (uint32_t c = 0; c < EltTy->getVectorNumElements(); c++) { Value *GEP = Builder.CreateInBoundsGEP(NewElts[c], idxList); Value *elt = Builder.CreateLoad(GEP); retVec = Builder.CreateInsertElement(retVec, elt, c); } } else { for (uint32_t c = 0; c < EltTy->getStructNumElements(); c++) { Value *GEP = Builder.CreateInBoundsGEP(NewElts[c], idxList); Value *elt = Builder.CreateLoad(GEP); retVec = Builder.CreateInsertValue(retVec, elt, c); } } retVal = Builder.CreateInsertValue(retVal, retVec, i); } idxList.pop_back(); } return retVal; } /// LoadVectorArray - Store vector array like [2 x <4 x float>] to /// arrays like 4 [2 x float] or struct array like /// [2 x { <4 x float>, < 4 x uint> }] /// from arrays like [ 2 x <4 x float> ], [ 2 x <4 x uint> ]. static void StoreVectorOrStructArray(ArrayType *AT, Value *val, ArrayRef<Value *> NewElts, SmallVector<Value *, 8> &idxList, IRBuilder<> &Builder) { Type *EltTy = AT->getElementType(); Type *i32Ty = Type::getInt32Ty(EltTy->getContext()); uint32_t arraySize = AT->getNumElements(); for (uint32_t i = 0; i < arraySize; i++) { Value *elt = Builder.CreateExtractValue(val, i); Constant *idx = ConstantInt::get(i32Ty, i); idxList.emplace_back(idx); if (ArrayType *EltAT = dyn_cast<ArrayType>(EltTy)) { StoreVectorOrStructArray(EltAT, elt, NewElts, idxList, Builder); } else { assert((EltTy->isVectorTy() || EltTy->isStructTy()) && "must be a vector or struct type"); bool isVectorTy = EltTy->isVectorTy(); if (isVectorTy) { for (uint32_t c = 0; c < EltTy->getVectorNumElements(); c++) { Value *component = Builder.CreateExtractElement(elt, c); Value *GEP = Builder.CreateInBoundsGEP(NewElts[c], idxList); Builder.CreateStore(component, GEP); } } else { for (uint32_t c = 0; c < EltTy->getStructNumElements(); c++) { Value *field = Builder.CreateExtractValue(elt, c); Value *GEP = Builder.CreateInBoundsGEP(NewElts[c], idxList); Builder.CreateStore(field, GEP); } } } idxList.pop_back(); } } namespace { // Simple struct to split memcpy into ld/st struct MemcpySplitter { llvm::LLVMContext &m_context; DxilTypeSystem &m_typeSys; public: MemcpySplitter(llvm::LLVMContext &context, DxilTypeSystem &typeSys) : m_context(context), m_typeSys(typeSys) {} void Split(llvm::Function &F); static void PatchMemCpyWithZeroIdxGEP(Module &M); static void PatchMemCpyWithZeroIdxGEP(MemCpyInst *MI, const DataLayout &DL); static void SplitMemCpy(MemCpyInst *MI, const DataLayout &DL, DxilFieldAnnotation *fieldAnnotation, DxilTypeSystem &typeSys, const bool bEltMemCpy = true); }; // Copy data from srcPtr to destPtr. void SimplePtrCopy(Value *DestPtr, Value *SrcPtr, llvm::SmallVector<llvm::Value *, 16> &idxList, IRBuilder<> &Builder) { if (idxList.size() > 1) { DestPtr = Builder.CreateInBoundsGEP(DestPtr, idxList); SrcPtr = Builder.CreateInBoundsGEP(SrcPtr, idxList); } llvm::LoadInst *ld = Builder.CreateLoad(SrcPtr); Builder.CreateStore(ld, DestPtr); } // Copy srcVal to destPtr. void SimpleValCopy(Value *DestPtr, Value *SrcVal, llvm::SmallVector<llvm::Value *, 16> &idxList, IRBuilder<> &Builder) { Value *DestGEP = Builder.CreateInBoundsGEP(DestPtr, idxList); Value *Val = SrcVal; // Skip beginning pointer type. for (unsigned i = 1; i < idxList.size(); i++) { ConstantInt *idx = cast<ConstantInt>(idxList[i]); Type *Ty = Val->getType(); if (Ty->isAggregateType()) { Val = Builder.CreateExtractValue(Val, idx->getLimitedValue()); } } Builder.CreateStore(Val, DestGEP); } void SimpleCopy(Value *Dest, Value *Src, llvm::SmallVector<llvm::Value *, 16> &idxList, IRBuilder<> &Builder) { if (Src->getType()->isPointerTy()) SimplePtrCopy(Dest, Src, idxList, Builder); else SimpleValCopy(Dest, Src, idxList, Builder); } Value *CreateMergedGEP(Value *Ptr, SmallVector<Value *, 16> &idxList, IRBuilder<> &Builder) { if (GEPOperator *GEPPtr = dyn_cast<GEPOperator>(Ptr)) { SmallVector<Value *, 2> IdxList(GEPPtr->idx_begin(), GEPPtr->idx_end()); // skip idxLIst.begin() because it is included in GEPPtr idx. IdxList.append(idxList.begin() + 1, idxList.end()); return Builder.CreateInBoundsGEP(GEPPtr->getPointerOperand(), IdxList); } else { return Builder.CreateInBoundsGEP(Ptr, idxList); } } void EltMemCpy(Type *Ty, Value *Dest, Value *Src, SmallVector<Value *, 16> &idxList, IRBuilder<> &Builder, const DataLayout &DL) { Value *DestGEP = CreateMergedGEP(Dest, idxList, Builder); Value *SrcGEP = CreateMergedGEP(Src, idxList, Builder); unsigned size = DL.getTypeAllocSize(Ty); Builder.CreateMemCpy(DestGEP, SrcGEP, size, /* Align */ 1); } bool IsMemCpyTy(Type *Ty, DxilTypeSystem &typeSys) { if (!Ty->isAggregateType()) return false; if (HLMatrixType::isa(Ty)) return false; if (dxilutil::IsHLSLObjectType(Ty)) return false; if (StructType *ST = dyn_cast<StructType>(Ty)) { DxilStructAnnotation *STA = typeSys.GetStructAnnotation(ST); DXASSERT(STA, "require annotation here"); if (STA->IsEmptyStruct()) return false; // Skip 1 element struct which the element is basic type. // Because create memcpy will create gep on the struct, memcpy the basic // type only. if (ST->getNumElements() == 1) return IsMemCpyTy(ST->getElementType(0), typeSys); } return true; } // Split copy into ld/st. void SplitCpy(Type *Ty, Value *Dest, Value *Src, SmallVector<Value *, 16> &idxList, IRBuilder<> &Builder, const DataLayout &DL, DxilTypeSystem &typeSys, const DxilFieldAnnotation *fieldAnnotation, const bool bEltMemCpy = true) { if (PointerType *PT = dyn_cast<PointerType>(Ty)) { Constant *idx = Constant::getIntegerValue( IntegerType::get(Ty->getContext(), 32), APInt(32, 0)); idxList.emplace_back(idx); SplitCpy(PT->getElementType(), Dest, Src, idxList, Builder, DL, typeSys, fieldAnnotation, bEltMemCpy); idxList.pop_back(); } else if (HLMatrixType::isa(Ty)) { // If no fieldAnnotation, use row major as default. // Only load then store immediately should be fine. bool bRowMajor = true; if (fieldAnnotation) { DXASSERT(fieldAnnotation->HasMatrixAnnotation(), "must has matrix annotation"); bRowMajor = fieldAnnotation->GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor; } Module *M = Builder.GetInsertPoint()->getModule(); Value *DestMatPtr; Value *SrcMatPtr; if (idxList.size() == 1 && idxList[0] == ConstantInt::get(IntegerType::get(Ty->getContext(), 32), APInt(32, 0))) { // Avoid creating GEP(0) DestMatPtr = Dest; SrcMatPtr = Src; } else { DestMatPtr = Builder.CreateInBoundsGEP(Dest, idxList); SrcMatPtr = Builder.CreateInBoundsGEP(Src, idxList); } HLMatLoadStoreOpcode loadOp = bRowMajor ? HLMatLoadStoreOpcode::RowMatLoad : HLMatLoadStoreOpcode::ColMatLoad; HLMatLoadStoreOpcode storeOp = bRowMajor ? HLMatLoadStoreOpcode::RowMatStore : HLMatLoadStoreOpcode::ColMatStore; Value *Load = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLMatLoadStore, static_cast<unsigned>(loadOp), Ty, {SrcMatPtr}, *M); HLModule::EmitHLOperationCall(Builder, HLOpcodeGroup::HLMatLoadStore, static_cast<unsigned>(storeOp), Ty, {DestMatPtr, Load}, *M); } else if (StructType *ST = dyn_cast<StructType>(Ty)) { if (dxilutil::IsHLSLObjectType(ST)) { // Avoid split HLSL object. SimpleCopy(Dest, Src, idxList, Builder); return; } // Built-in structs have no type annotation DxilStructAnnotation *STA = typeSys.GetStructAnnotation(ST); if (STA && STA->IsEmptyStruct()) return; for (uint32_t i = 0; i < ST->getNumElements(); i++) { llvm::Type *ET = ST->getElementType(i); Constant *idx = llvm::Constant::getIntegerValue( IntegerType::get(Ty->getContext(), 32), APInt(32, i)); idxList.emplace_back(idx); if (bEltMemCpy && IsMemCpyTy(ET, typeSys)) { EltMemCpy(ET, Dest, Src, idxList, Builder, DL); } else { DxilFieldAnnotation *EltAnnotation = STA ? &STA->GetFieldAnnotation(i) : nullptr; SplitCpy(ET, Dest, Src, idxList, Builder, DL, typeSys, EltAnnotation, bEltMemCpy); } idxList.pop_back(); } } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { Type *ET = AT->getElementType(); for (uint32_t i = 0; i < AT->getNumElements(); i++) { Constant *idx = Constant::getIntegerValue( IntegerType::get(Ty->getContext(), 32), APInt(32, i)); idxList.emplace_back(idx); if (bEltMemCpy && IsMemCpyTy(ET, typeSys)) { EltMemCpy(ET, Dest, Src, idxList, Builder, DL); } else { SplitCpy(ET, Dest, Src, idxList, Builder, DL, typeSys, fieldAnnotation, bEltMemCpy); } idxList.pop_back(); } } else { SimpleCopy(Dest, Src, idxList, Builder); } } // Given a pointer to a value, produces a list of pointers to // all scalar elements of that value and their field annotations, at any nesting // level. void SplitPtr( Value *Ptr, // The root value pointer SmallVectorImpl<Value *> &IdxList, // GEP indices stack during recursion Type *Ty, // Type at the current GEP indirection level const DxilFieldAnnotation &Annotation, // Annotation at the current GEP indirection level SmallVectorImpl<Value *> &EltPtrList, // Accumulates pointers to each element found SmallVectorImpl<const DxilFieldAnnotation *> &EltAnnotationList, // Accumulates field annotations for each element // found DxilTypeSystem &TypeSys, IRBuilder<> &Builder) { if (PointerType *PT = dyn_cast<PointerType>(Ty)) { Constant *idx = Constant::getIntegerValue( IntegerType::get(Ty->getContext(), 32), APInt(32, 0)); IdxList.emplace_back(idx); SplitPtr(Ptr, IdxList, PT->getElementType(), Annotation, EltPtrList, EltAnnotationList, TypeSys, Builder); IdxList.pop_back(); return; } if (StructType *ST = dyn_cast<StructType>(Ty)) { if (!HLMatrixType::isa(Ty) && !dxilutil::IsHLSLObjectType(ST)) { const DxilStructAnnotation *SA = TypeSys.GetStructAnnotation(ST); for (uint32_t i = 0; i < ST->getNumElements(); i++) { llvm::Type *EltTy = ST->getElementType(i); Constant *idx = llvm::Constant::getIntegerValue( IntegerType::get(Ty->getContext(), 32), APInt(32, i)); IdxList.emplace_back(idx); SplitPtr(Ptr, IdxList, EltTy, SA->GetFieldAnnotation(i), EltPtrList, EltAnnotationList, TypeSys, Builder); IdxList.pop_back(); } return; } } if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { if (AT->getArrayNumElements() == 0) { // Skip cases like [0 x %struct], nothing to copy return; } Type *ElTy = AT->getElementType(); SmallVector<ArrayType *, 4> nestArrayTys; nestArrayTys.emplace_back(AT); // support multi level of array while (ElTy->isArrayTy()) { ArrayType *ElAT = cast<ArrayType>(ElTy); nestArrayTys.emplace_back(ElAT); ElTy = ElAT->getElementType(); } if (ElTy->isStructTy() && !HLMatrixType::isa(ElTy)) { DXASSERT(0, "Not support array of struct when split pointers."); return; } } // Return a pointer to the current element and its annotation Value *GEP = Builder.CreateInBoundsGEP(Ptr, IdxList); EltPtrList.emplace_back(GEP); EltAnnotationList.emplace_back(&Annotation); } // Support case when bitcast (gep ptr, 0,0) is transformed into bitcast ptr. unsigned MatchSizeByCheckElementType(Type *Ty, const DataLayout &DL, unsigned size, unsigned level) { unsigned ptrSize = DL.getTypeAllocSize(Ty); // Size match, return current level. if (ptrSize == size) { // Do not go deeper for matrix or object. if (HLMatrixType::isa(Ty) || dxilutil::IsHLSLObjectType(Ty)) return level; // For struct, go deeper if size not change. // This will leave memcpy to deeper level when flatten. if (StructType *ST = dyn_cast<StructType>(Ty)) { if (ST->getNumElements() == 1) { return MatchSizeByCheckElementType(ST->getElementType(0), DL, size, level + 1); } } // Don't do this for array. // Array will be flattened as struct of array. return level; } // Add ZeroIdx cannot make ptrSize bigger. if (ptrSize < size) return 0; // ptrSize > size. // Try to use element type to make size match. if (StructType *ST = dyn_cast<StructType>(Ty)) { return MatchSizeByCheckElementType(ST->getElementType(0), DL, size, level + 1); } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { return MatchSizeByCheckElementType(AT->getElementType(), DL, size, level + 1); } else { return 0; } } void PatchZeroIdxGEP(Value *Ptr, Value *RawPtr, MemCpyInst *MI, unsigned level, IRBuilder<> &Builder) { Value *zeroIdx = Builder.getInt32(0); Value *GEP = nullptr; if (GEPOperator *GEPPtr = dyn_cast<GEPOperator>(Ptr)) { SmallVector<Value *, 2> IdxList(GEPPtr->idx_begin(), GEPPtr->idx_end()); // level not + 1 because it is included in GEPPtr idx. IdxList.append(level, zeroIdx); GEP = Builder.CreateInBoundsGEP(GEPPtr->getPointerOperand(), IdxList); } else { SmallVector<Value *, 2> IdxList(level + 1, zeroIdx); GEP = Builder.CreateInBoundsGEP(Ptr, IdxList); } // Use BitCastInst::Create to prevent idxList from being optimized. CastInst *Cast = BitCastInst::Create(Instruction::BitCast, GEP, RawPtr->getType()); Builder.Insert(Cast); MI->replaceUsesOfWith(RawPtr, Cast); // Remove RawPtr if possible. if (RawPtr->user_empty()) { if (Instruction *I = dyn_cast<Instruction>(RawPtr)) { I->eraseFromParent(); } } } void MemcpySplitter::PatchMemCpyWithZeroIdxGEP(MemCpyInst *MI, const DataLayout &DL) { Value *Dest = MI->getRawDest(); Value *Src = MI->getRawSource(); // Only remove one level bitcast generated from inline. if (BitCastOperator *BC = dyn_cast<BitCastOperator>(Dest)) Dest = BC->getOperand(0); if (BitCastOperator *BC = dyn_cast<BitCastOperator>(Src)) Src = BC->getOperand(0); IRBuilder<> Builder(MI); ConstantInt *zero = Builder.getInt32(0); Type *DestTy = Dest->getType()->getPointerElementType(); Type *SrcTy = Src->getType()->getPointerElementType(); // Support case when bitcast (gep ptr, 0,0) is transformed into // bitcast ptr. // Also replace (gep ptr, 0) with ptr. ConstantInt *Length = cast<ConstantInt>(MI->getLength()); unsigned size = Length->getLimitedValue(); if (unsigned level = MatchSizeByCheckElementType(DestTy, DL, size, 0)) { PatchZeroIdxGEP(Dest, MI->getRawDest(), MI, level, Builder); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(Dest)) { if (GEP->getNumIndices() == 1) { Value *idx = *GEP->idx_begin(); if (idx == zero) { GEP->replaceAllUsesWith(GEP->getPointerOperand()); } } } if (unsigned level = MatchSizeByCheckElementType(SrcTy, DL, size, 0)) { PatchZeroIdxGEP(Src, MI->getRawSource(), MI, level, Builder); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) { if (GEP->getNumIndices() == 1) { Value *idx = *GEP->idx_begin(); if (idx == zero) { GEP->replaceAllUsesWith(GEP->getPointerOperand()); } } } } void MemcpySplitter::PatchMemCpyWithZeroIdxGEP(Module &M) { const DataLayout &DL = M.getDataLayout(); for (Function &F : M.functions()) { for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { // Avoid invalidating the iterator. Instruction *I = BI++; if (MemCpyInst *MI = dyn_cast<MemCpyInst>(I)) { PatchMemCpyWithZeroIdxGEP(MI, DL); } } } } } void DeleteMemcpy(MemCpyInst *MI) { Value *Op0 = MI->getOperand(0); Value *Op1 = MI->getOperand(1); // delete memcpy MI->eraseFromParent(); if (Instruction *op0 = dyn_cast<Instruction>(Op0)) { if (op0->user_empty()) op0->eraseFromParent(); } if (Op0 != Op1) { if (Instruction *op1 = dyn_cast<Instruction>(Op1)) { if (op1->user_empty()) op1->eraseFromParent(); } } } // If user is function call, return param annotation to get matrix major. DxilFieldAnnotation *FindAnnotationFromMatUser(Value *Mat, DxilTypeSystem &typeSys) { for (User *U : Mat->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { Function *F = CI->getCalledFunction(); if (DxilFunctionAnnotation *Anno = typeSys.GetFunctionAnnotation(F)) { for (unsigned i = 0; i < CI->getNumArgOperands(); i++) { if (CI->getArgOperand(i) == Mat) { return &Anno->GetParameterAnnotation(i); } } } } } return nullptr; } namespace { bool isCBVec4ArrayToScalarArray(Type *TyV, Value *Src, Type *TySrc, const DataLayout &DL) { Value *SrcPtr = Src; while (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(SrcPtr)) { SrcPtr = GEP->getPointerOperand(); } CallInst *CI = dyn_cast<CallInst>(SrcPtr); if (!CI) return false; Function *F = CI->getCalledFunction(); if (hlsl::GetHLOpcodeGroupByName(F) != HLOpcodeGroup::HLSubscript) return false; if (hlsl::GetHLOpcode(CI) != (unsigned)HLSubscriptOpcode::CBufferSubscript) return false; ArrayType *AT = dyn_cast<ArrayType>(TySrc); if (!AT) return false; VectorType *VT = dyn_cast<VectorType>(AT->getElementType()); if (!VT) return false; if (DL.getTypeSizeInBits(VT) != 128) return false; ArrayType *DstAT = dyn_cast<ArrayType>(TyV); if (!DstAT) return false; if (VT->getElementType() != DstAT->getElementType()) return false; unsigned sizeInBits = DL.getTypeSizeInBits(VT->getElementType()); if (sizeInBits < 32) return false; return true; } bool trySplitCBVec4ArrayToScalarArray(Value *Dest, Type *TyV, Value *Src, Type *TySrc, const DataLayout &DL, IRBuilder<> &B) { if (!isCBVec4ArrayToScalarArray(TyV, Src, TySrc, DL)) return false; ArrayType *AT = cast<ArrayType>(TyV); Type *EltTy = AT->getElementType(); unsigned sizeInBits = DL.getTypeSizeInBits(EltTy); unsigned vecSize = 4; if (sizeInBits == 64) vecSize = 2; unsigned arraySize = AT->getNumElements(); unsigned vecArraySize = arraySize / vecSize; Value *zeroIdx = B.getInt32(0); for (unsigned a = 0; a < vecArraySize; a++) { Value *SrcGEP = B.CreateGEP(Src, {zeroIdx, B.getInt32(a)}); Value *Ld = B.CreateLoad(SrcGEP); for (unsigned v = 0; v < vecSize; v++) { Value *Elt = B.CreateExtractElement(Ld, v); Value *DestGEP = B.CreateGEP(Dest, {zeroIdx, B.getInt32(a * vecSize + v)}); B.CreateStore(Elt, DestGEP); } } return true; } } // namespace void MemcpySplitter::SplitMemCpy(MemCpyInst *MI, const DataLayout &DL, DxilFieldAnnotation *fieldAnnotation, DxilTypeSystem &typeSys, const bool bEltMemCpy) { Value *Dest = MI->getRawDest(); Value *Src = MI->getRawSource(); // Only remove one level bitcast generated from inline. if (BitCastOperator *BC = dyn_cast<BitCastOperator>(Dest)) Dest = BC->getOperand(0); if (BitCastOperator *BC = dyn_cast<BitCastOperator>(Src)) Src = BC->getOperand(0); if (Dest == Src) { // delete self copy. DeleteMemcpy(MI); return; } IRBuilder<> Builder(MI); Type *DestTy = Dest->getType()->getPointerElementType(); Type *SrcTy = Src->getType()->getPointerElementType(); // Allow copy between different address space. if (DestTy != SrcTy) { if (trySplitCBVec4ArrayToScalarArray(Dest, DestTy, Src, SrcTy, DL, Builder)) { // delete memcpy DeleteMemcpy(MI); } return; } // Try to find fieldAnnotation from user of Dest/Src. if (!fieldAnnotation) { Type *EltTy = dxilutil::GetArrayEltTy(DestTy); if (HLMatrixType::isa(EltTy)) { fieldAnnotation = FindAnnotationFromMatUser(Dest, typeSys); } } llvm::SmallVector<llvm::Value *, 16> idxList; // split // Matrix is treated as scalar type, will not use memcpy. // So use nullptr for fieldAnnotation should be safe here. SplitCpy(Dest->getType(), Dest, Src, idxList, Builder, DL, typeSys, fieldAnnotation, bEltMemCpy); // delete memcpy DeleteMemcpy(MI); } void MemcpySplitter::Split(llvm::Function &F) { const DataLayout &DL = F.getParent()->getDataLayout(); SmallVector<Function *, 2> memcpys; for (Function &Fn : F.getParent()->functions()) { if (Fn.getIntrinsicID() == Intrinsic::memcpy) { memcpys.emplace_back(&Fn); } } for (Function *memcpy : memcpys) { for (auto U = memcpy->user_begin(); U != memcpy->user_end();) { MemCpyInst *MI = cast<MemCpyInst>(*(U++)); if (MI->getParent()->getParent() != &F) continue; // Matrix is treated as scalar type, will not use memcpy. // So use nullptr for fieldAnnotation should be safe here. SplitMemCpy(MI, DL, /*fieldAnnotation*/ nullptr, m_typeSys, /*bEltMemCpy*/ false); } } } } // namespace namespace { /// DeleteDeadInstructions - Erase instructions on the DeadInstrs list, /// recursively including all their operands that become trivially dead. void DeleteDeadInstructions(SmallVector<Value *, 32> &DeadInsts) { while (!DeadInsts.empty()) { Instruction *I = cast<Instruction>(DeadInsts.pop_back_val()); for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) if (Instruction *U = dyn_cast<Instruction>(*OI)) { // Zero out the operand and see if it becomes trivially dead. // (But, don't add allocas to the dead instruction list -- they are // already on the worklist and will be deleted separately.) *OI = nullptr; if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U)) DeadInsts.push_back(U); } I->eraseFromParent(); } } // markPrecise - To save the precise attribute on alloca inst which might be // removed by promote, mark precise attribute with function call on alloca inst // stores. bool markPrecise(Function &F) { bool Changed = false; BasicBlock &BB = F.getEntryBlock(); for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) if (AllocaInst *A = dyn_cast<AllocaInst>(I)) { // TODO: Only do this on basic types. if (HLModule::HasPreciseAttributeWithMetadata(A)) { HLModule::MarkPreciseAttributeOnPtrWithFunctionCall(A, *(F.getParent())); Changed = true; } } return Changed; } bool Cleanup(Function &F, DxilTypeSystem &typeSys) { // change rest memcpy into ld/st. MemcpySplitter splitter(F.getContext(), typeSys); splitter.Split(F); return markPrecise(F); } } // namespace namespace { /// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for /// SROA. It must be a struct or array type with a small number of elements. bool ShouldAttemptScalarRepl(AllocaInst *AI) { Type *T = AI->getAllocatedType(); // promote every struct. if (dyn_cast<StructType>(T)) return true; // promote every array. if (dyn_cast<ArrayType>(T)) return true; return false; } /// AllocaInfo - When analyzing uses of an alloca instruction, this captures /// information about the uses. All these fields are initialized to false /// and set to true when something is learned. struct AllocaInfo { /// The alloca to promote. AllocaInst *AI; /// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite /// looping and avoid redundant work. SmallPtrSet<PHINode *, 8> CheckedPHIs; /// isUnsafe - This is set to true if the alloca cannot be SROA'd. bool isUnsafe : 1; /// isMemCpySrc - This is true if this aggregate is memcpy'd from. bool isMemCpySrc : 1; /// isMemCpyDst - This is true if this aggregate is memcpy'd into. bool isMemCpyDst : 1; /// hasSubelementAccess - This is true if a subelement of the alloca is /// ever accessed, or false if the alloca is only accessed with mem /// intrinsics or load/store that only access the entire alloca at once. bool hasSubelementAccess : 1; /// hasALoadOrStore - This is true if there are any loads or stores to it. /// The alloca may just be accessed with memcpy, for example, which would /// not set this. bool hasALoadOrStore : 1; /// hasArrayIndexing - This is true if there are any dynamic array /// indexing to it. bool hasArrayIndexing : 1; /// hasVectorIndexing - This is true if there are any dynamic vector /// indexing to it. bool hasVectorIndexing : 1; explicit AllocaInfo(AllocaInst *ai) : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false), hasSubelementAccess(false), hasALoadOrStore(false), hasArrayIndexing(false), hasVectorIndexing(false) {} }; /// TypeHasComponent - Return true if T has a component type with the /// specified offset and size. If Size is zero, do not check the size. bool TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size, const DataLayout &DL) { Type *EltTy; uint64_t EltSize; if (StructType *ST = dyn_cast<StructType>(T)) { const StructLayout *Layout = DL.getStructLayout(ST); unsigned EltIdx = Layout->getElementContainingOffset(Offset); EltTy = ST->getContainedType(EltIdx); EltSize = DL.getTypeAllocSize(EltTy); Offset -= Layout->getElementOffset(EltIdx); } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) { EltTy = AT->getElementType(); EltSize = DL.getTypeAllocSize(EltTy); if (Offset >= AT->getNumElements() * EltSize) return false; Offset %= EltSize; } else if (VectorType *VT = dyn_cast<VectorType>(T)) { EltTy = VT->getElementType(); EltSize = DL.getTypeAllocSize(EltTy); if (Offset >= VT->getNumElements() * EltSize) return false; Offset %= EltSize; } else { return false; } if (Offset == 0 && (Size == 0 || EltSize == Size)) return true; // Check if the component spans multiple elements. if (Offset + Size > EltSize) return false; return TypeHasComponent(EltTy, Offset, Size, DL); } void MarkUnsafe(AllocaInfo &I, Instruction *User) { I.isUnsafe = true; DEBUG(dbgs() << " Transformation preventing inst: " << *User << '\n'); } /// isSafeGEP - Check if a GEP instruction can be handled for scalar /// replacement. It is safe when all the indices are constant, in-bounds /// references, and when the resulting offset corresponds to an element within /// the alloca type. The results are flagged in the Info parameter. Upon /// return, Offset is adjusted as specified by the GEP indices. void isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info) { gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI); if (GEPIt == E) return; bool NonConstant = false; unsigned NonConstantIdxSize = 0; // Compute the offset due to this GEP and check if the alloca has a // component element at that offset. SmallVector<Value *, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); auto indicesIt = Indices.begin(); // Walk through the GEP type indices, checking the types that this indexes // into. uint32_t arraySize = 0; bool isArrayIndexing = false; for (; GEPIt != E; ++GEPIt) { Type *Ty = *GEPIt; if (Ty->isStructTy() && !HLMatrixType::isa(Ty)) { // Don't go inside struct when mark hasArrayIndexing and // hasVectorIndexing. The following level won't affect scalar repl on the // struct. break; } if (GEPIt->isArrayTy()) { arraySize = GEPIt->getArrayNumElements(); isArrayIndexing = true; } if (GEPIt->isVectorTy()) { arraySize = GEPIt->getVectorNumElements(); isArrayIndexing = false; } // Allow dynamic indexing ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); if (!IdxVal) { // for dynamic index, use array size - 1 to check the offset *indicesIt = Constant::getIntegerValue( Type::getInt32Ty(GEPI->getContext()), APInt(32, arraySize - 1)); if (isArrayIndexing) Info.hasArrayIndexing = true; else Info.hasVectorIndexing = true; NonConstant = true; } indicesIt++; } // Continue iterate only for the NonConstant. for (; GEPIt != E; ++GEPIt) { Type *Ty = *GEPIt; if (Ty->isArrayTy()) { arraySize = GEPIt->getArrayNumElements(); } if (Ty->isVectorTy()) { arraySize = GEPIt->getVectorNumElements(); } // Allow dynamic indexing ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); if (!IdxVal) { // for dynamic index, use array size - 1 to check the offset *indicesIt = Constant::getIntegerValue( Type::getInt32Ty(GEPI->getContext()), APInt(32, arraySize - 1)); NonConstant = true; } indicesIt++; } // If this GEP is non-constant then the last operand must have been a // dynamic index into a vector. Pop this now as it has no impact on the // constant part of the offset. if (NonConstant) Indices.pop_back(); const DataLayout &DL = GEPI->getModule()->getDataLayout(); Offset += DL.getIndexedOffset(GEPI->getPointerOperandType(), Indices); if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, NonConstantIdxSize, DL)) MarkUnsafe(Info, GEPI); } /// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI /// alloca or has an offset and size that corresponds to a component element /// within it. The offset checked here may have been formed from a GEP with a /// pointer bitcasted to a different type. /// /// If AllowWholeAccess is true, then this allows uses of the entire alloca as a /// unit. If false, it only allows accesses known to be in a single element. void isSafeMemAccess(uint64_t Offset, uint64_t MemSize, Type *MemOpType, bool isStore, AllocaInfo &Info, Instruction *TheAccess, bool AllowWholeAccess) { // What hlsl cares is Info.hasVectorIndexing. // Do nothing here. } /// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer /// derived from the alloca, we can often still split the alloca into elements. /// This is useful if we have a large alloca where one element is phi'd /// together somewhere: we can SRoA and promote all the other elements even if /// we end up not being able to promote this one. /// /// All we require is that the uses of the PHI do not index into other parts of /// the alloca. The most important use case for this is single load and stores /// that are PHI'd together, which can happen due to code sinking. void isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info) { // If we've already checked this PHI, don't do it again. if (PHINode *PN = dyn_cast<PHINode>(I)) if (!Info.CheckedPHIs.insert(PN).second) return; const DataLayout &DL = I->getModule()->getDataLayout(); for (User *U : I->users()) { Instruction *UI = cast<Instruction>(U); if (BitCastInst *BC = dyn_cast<BitCastInst>(UI)) { isSafePHISelectUseForScalarRepl(BC, Offset, Info); } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { // Only allow "bitcast" GEPs for simplicity. We could generalize this, // but would have to prove that we're staying inside of an element being // promoted. if (!GEPI->hasAllZeroIndices()) return MarkUnsafe(Info, UI); isSafePHISelectUseForScalarRepl(GEPI, Offset, Info); } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { if (!LI->isSimple()) return MarkUnsafe(Info, UI); Type *LIType = LI->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info, LI, false /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { // Store is ok if storing INTO the pointer, not storing the pointer if (!SI->isSimple() || SI->getOperand(0) == I) return MarkUnsafe(Info, UI); Type *SIType = SI->getOperand(0)->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info, SI, false /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) { isSafePHISelectUseForScalarRepl(UI, Offset, Info); } else { return MarkUnsafe(Info, UI); } if (Info.isUnsafe) return; } } /// isSafeForScalarRepl - Check if instruction I is a safe use with regard to /// performing scalar replacement of alloca AI. The results are flagged in /// the Info parameter. Offset indicates the position within AI that is /// referenced by this instruction. void isSafeForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info) { if (I->getType()->isPointerTy()) { // Don't check object pointers. if (dxilutil::IsHLSLObjectType(I->getType()->getPointerElementType())) return; } const DataLayout &DL = I->getModule()->getDataLayout(); for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { isSafeForScalarRepl(BC, Offset, Info); } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { uint64_t GEPOffset = Offset; isSafeGEP(GEPI, GEPOffset, Info); if (!Info.isUnsafe) isSafeForScalarRepl(GEPI, GEPOffset, Info); } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); if (!Length || Length->isNegative()) return MarkUnsafe(Info, User); isSafeMemAccess(Offset, Length->getZExtValue(), nullptr, U.getOperandNo() == 0, Info, MI, true /*AllowWholeAccess*/); } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { if (!LI->isSimple()) return MarkUnsafe(Info, User); Type *LIType = LI->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info, LI, true /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { // Store is ok if storing INTO the pointer, not storing the pointer if (!SI->isSimple() || SI->getOperand(0) == I) return MarkUnsafe(Info, User); Type *SIType = SI->getOperand(0)->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info, SI, true /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { if (II->getIntrinsicID() != Intrinsic::lifetime_start && II->getIntrinsicID() != Intrinsic::lifetime_end) return MarkUnsafe(Info, User); } else if (isa<PHINode>(User) || isa<SelectInst>(User)) { isSafePHISelectUseForScalarRepl(User, Offset, Info); } else if (CallInst *CI = dyn_cast<CallInst>(User)) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); // Most HL functions are safe for scalar repl. if (HLOpcodeGroup::NotHL == group) return MarkUnsafe(Info, User); else if (HLOpcodeGroup::HLIntrinsic == group) { // TODO: should we check HL parameter type for UDT overload instead of // basing on IOP? IntrinsicOp opcode = static_cast<IntrinsicOp>(GetHLOpcode(CI)); if (IntrinsicOp::IOP_TraceRay == opcode || IntrinsicOp::IOP_ReportHit == opcode || IntrinsicOp::IOP_CallShader == opcode) { return MarkUnsafe(Info, User); } } } else { return MarkUnsafe(Info, User); } if (Info.isUnsafe) return; } } /// HasPadding - Return true if the specified type has any structure or /// alignment padding in between the elements that would be split apart /// by SROA; return false otherwise. static bool HasPadding(Type *Ty, const DataLayout &DL) { if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { Ty = ATy->getElementType(); return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty); } // SROA currently handles only Arrays and Structs. StructType *STy = cast<StructType>(Ty); const StructLayout *SL = DL.getStructLayout(STy); unsigned PrevFieldBitOffset = 0; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { unsigned FieldBitOffset = SL->getElementOffsetInBits(i); // Check to see if there is any padding between this element and the // previous one. if (i) { unsigned PrevFieldEnd = PrevFieldBitOffset + DL.getTypeSizeInBits(STy->getElementType(i - 1)); if (PrevFieldEnd < FieldBitOffset) return true; } PrevFieldBitOffset = FieldBitOffset; } // Check for tail padding. if (unsigned EltCount = STy->getNumElements()) { unsigned PrevFieldEnd = PrevFieldBitOffset + DL.getTypeSizeInBits(STy->getElementType(EltCount - 1)); if (PrevFieldEnd < SL->getSizeInBits()) return true; } return false; } /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, /// or 1 if safe after canonicalization has been performed. bool isSafeAllocaToScalarRepl(AllocaInst *AI) { // Loop over the use list of the alloca. We can only transform it if all of // the users are safe to transform. AllocaInfo Info(AI); isSafeForScalarRepl(AI, 0, Info); if (Info.isUnsafe) { DEBUG(dbgs() << "Cannot transform: " << *AI << '\n'); return false; } // vector indexing need translate vector into array if (Info.hasVectorIndexing) return false; const DataLayout &DL = AI->getModule()->getDataLayout(); // Okay, we know all the users are promotable. If the aggregate is a memcpy // source and destination, we have to be careful. In particular, the memcpy // could be moving around elements that live in structure padding of the LLVM // types, but may actually be used. In these cases, we refuse to promote the // struct. if (Info.isMemCpySrc && Info.isMemCpyDst && HasPadding(AI->getAllocatedType(), DL)) return false; return true; } } // namespace namespace { struct GVDbgOffset { GlobalVariable *base; unsigned debugOffset; }; bool hasDynamicVectorIndexing(Value *V) { for (User *U : V->users()) { if (!U->getType()->isPointerTy()) continue; if (dyn_cast<GEPOperator>(U)) { gep_type_iterator GEPIt = gep_type_begin(U), E = gep_type_end(U); for (; GEPIt != E; ++GEPIt) { if (isa<VectorType>(*GEPIt)) { Value *VecIdx = GEPIt.getOperand(); if (!isa<ConstantInt>(VecIdx)) return true; } } } // Also recursively check the uses of this User to find a possible // dynamically indexed GEP of this GEP. if (hasDynamicVectorIndexing(U)) return true; } return false; } } // namespace namespace { void RemoveUnusedInternalGlobalVariable(Module &M) { std::vector<GlobalVariable *> staticGVs; for (GlobalVariable &GV : M.globals()) { if (dxilutil::IsStaticGlobal(&GV) || dxilutil::IsSharedMemoryGlobal(&GV)) { staticGVs.emplace_back(&GV); } } for (GlobalVariable *GV : staticGVs) { bool onlyStoreUse = true; for (User *user : GV->users()) { if (isa<StoreInst>(user)) continue; if (isa<ConstantExpr>(user) && user->user_empty()) continue; // Check matrix store. if (HLMatrixType::isa(GV->getType()->getPointerElementType())) { if (CallInst *CI = dyn_cast<CallInst>(user)) { if (GetHLOpcodeGroupByName(CI->getCalledFunction()) == HLOpcodeGroup::HLMatLoadStore) { HLMatLoadStoreOpcode opcode = static_cast<HLMatLoadStoreOpcode>(GetHLOpcode(CI)); if (opcode == HLMatLoadStoreOpcode::ColMatStore || opcode == HLMatLoadStoreOpcode::RowMatStore) continue; } } } onlyStoreUse = false; break; } if (onlyStoreUse) { for (auto UserIt = GV->user_begin(); UserIt != GV->user_end();) { Value *User = *(UserIt++); if (Instruction *I = dyn_cast<Instruction>(User)) { I->eraseFromParent(); } } GV->eraseFromParent(); } } } bool isGroupShareOrConstStaticArray(GlobalVariable *GV) { // Disable scalarization of groupshared/const_static vector arrays if (!(GV->getType()->getAddressSpace() == DXIL::kTGSMAddrSpace || (GV->isConstant() && GV->hasInitializer() && GV->getLinkage() == GlobalValue::LinkageTypes::InternalLinkage))) return false; Type *Ty = GV->getType()->getPointerElementType(); return Ty->isArrayTy(); } bool SROAGlobalAndAllocas(HLModule &HLM, bool bHasDbgInfo) { Module &M = *HLM.GetModule(); DxilTypeSystem &typeSys = HLM.GetTypeSystem(); const DataLayout &DL = M.getDataLayout(); // Make sure big alloca split first. // This will simplify memcpy check between part of big alloca and small // alloca. Big alloca will be split to smaller piece first, when process the // alloca, it will be alloca flattened from big alloca instead of a GEP of // big alloca. auto size_cmp = [&DL](const Value *a0, const Value *a1) -> bool { Type *a0ty = a0->getType()->getPointerElementType(); Type *a1ty = a1->getType()->getPointerElementType(); bool isUnitSzStruct0 = a0ty->isStructTy() && a0ty->getStructNumElements() == 1; bool isUnitSzStruct1 = a1ty->isStructTy() && a1ty->getStructNumElements() == 1; auto sz0 = DL.getTypeAllocSize(a0ty); auto sz1 = DL.getTypeAllocSize(a1ty); if (sz0 == sz1 && (isUnitSzStruct0 || isUnitSzStruct1)) { sz0 = getNestedLevelInStruct(a0ty); sz1 = getNestedLevelInStruct(a1ty); } // If sizes are equal, and the new value is a GV, // replace the existing node if it isn't GV or comes later alphabetically // Thus, entries are sorted by size, global variableness, and then name return sz0 < sz1 || (sz0 == sz1 && isa<GlobalVariable>(a1) && (!isa<GlobalVariable>(a0) || a0->getName() > a1->getName())); }; std::priority_queue<Value *, std::vector<Value *>, std::function<bool(Value *, Value *)>> WorkList(size_cmp); // Flatten internal global. llvm::SetVector<GlobalVariable *> staticGVs; DenseMap<GlobalVariable *, GVDbgOffset> GVDbgOffsetMap; for (GlobalVariable &GV : M.globals()) { if (GV.user_empty()) continue; if (dxilutil::IsStaticGlobal(&GV) || dxilutil::IsSharedMemoryGlobal(&GV)) { staticGVs.insert(&GV); GVDbgOffset &dbgOffset = GVDbgOffsetMap[&GV]; dbgOffset.base = &GV; dbgOffset.debugOffset = 0; } else { // merge GEP use for global. dxilutil::MergeGepUse(&GV); } } // Add static GVs to work list. for (GlobalVariable *GV : staticGVs) WorkList.push(GV); DenseMap<Function *, DominatorTree> domTreeMap; for (Function &F : M) { if (F.isDeclaration()) continue; // Collect domTree. domTreeMap[&F].recalculate(F); // Scan the entry basic block, adding allocas to the worklist. BasicBlock &BB = F.getEntryBlock(); for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) if (AllocaInst *A = dyn_cast<AllocaInst>(I)) { if (!A->user_empty()) { WorkList.push(A); // merge GEP use for the allocs dxilutil::MergeGepUse(A); } } } // Establish debug metadata layout name in the context in advance so the name // is serialized in both debug and non-debug compilations. (void)M.getContext().getMDKindID( DxilMDHelper::kDxilVariableDebugLayoutMDName); DIBuilder DIB(M, /*AllowUnresolved*/ false); /// DeadInsts - Keep track of instructions we have made dead, so that /// we can remove them after we are done working. SmallVector<Value *, 32> DeadInsts; // Only used to create ConstantExpr. IRBuilder<> Builder(M.getContext()); std::unordered_map<Value *, StringRef> EltNameMap; bool Changed = false; while (!WorkList.empty()) { Value *V = WorkList.top(); WorkList.pop(); if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { // Handle dead allocas trivially. These can be formed by SROA'ing arrays // with unused elements. if (AI->use_empty()) { AI->eraseFromParent(); Changed = true; continue; } Function *F = AI->getParent()->getParent(); const bool bAllowReplace = true; DominatorTree &DT = domTreeMap[F]; if (SROA_Helper::LowerMemcpy(AI, /*annotation*/ nullptr, typeSys, DL, &DT, bAllowReplace)) { if (AI->use_empty()) AI->eraseFromParent(); Changed = true; continue; } // If this alloca is impossible for us to promote, reject it early. if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) continue; // Check to see if we can perform the core SROA transformation. We cannot // transform the allocation instruction if it is an array allocation // (allocations OF arrays are ok though), and an allocation of a scalar // value cannot be decomposed at all. uint64_t AllocaSize = DL.getTypeAllocSize(AI->getAllocatedType()); // Do not promote [0 x %struct]. if (AllocaSize == 0) continue; Type *Ty = AI->getAllocatedType(); // Skip empty struct type. if (SROA_Helper::IsEmptyStructType(Ty, typeSys)) { SROA_Helper::MarkEmptyStructUsers(AI, DeadInsts); DeleteDeadInstructions(DeadInsts); continue; } if (Value *NewV = TranslatePtrIfUsedByLoweredFn(AI, typeSys)) { if (NewV != AI) { DXASSERT(AI->getNumUses() == 0, "must have zero users."); // Update debug declare. SmallVector<DbgDeclareInst *, 4> dbgDecls; llvm::FindAllocaDbgDeclare(AI, dbgDecls); for (DbgDeclareInst *DDI : dbgDecls) { DDI->setArgOperand( 0, MetadataAsValue::get(NewV->getContext(), ValueAsMetadata::get(NewV))); } AI->eraseFromParent(); Changed = true; } continue; } // If the alloca looks like a good candidate for scalar replacement, and // if // all its users can be transformed, then split up the aggregate into its // separate elements. if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) { std::vector<Value *> Elts; IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(AI)); bool hasPrecise = HLModule::HasPreciseAttributeWithMetadata(AI); Type *BrokenUpTy = nullptr; uint64_t NumInstances = 1; bool SROAed = SROA_Helper::DoScalarReplacement( AI, Elts, BrokenUpTy, NumInstances, Builder, /*bFlatVector*/ true, hasPrecise, typeSys, DL, DeadInsts, &DT); if (SROAed) { Type *Ty = AI->getAllocatedType(); // Skip empty struct parameters. if (StructType *ST = dyn_cast<StructType>(Ty)) { if (!HLMatrixType::isa(Ty)) { DxilStructAnnotation *SA = typeSys.GetStructAnnotation(ST); if (SA && SA->IsEmptyStruct()) { for (User *U : AI->users()) { if (StoreInst *SI = dyn_cast<StoreInst>(U)) DeadInsts.emplace_back(SI); } DeleteDeadInstructions(DeadInsts); AI->replaceAllUsesWith(UndefValue::get(AI->getType())); AI->eraseFromParent(); continue; } } } addDebugInfoForElements(AI, BrokenUpTy, NumInstances, Elts, DL, &DIB); // Push Elts into workList. for (unsigned EltIdx = 0; EltIdx < Elts.size(); ++EltIdx) { AllocaInst *EltAlloca = cast<AllocaInst>(Elts[EltIdx]); WorkList.push(EltAlloca); } // Now erase any instructions that were made dead while rewriting the // alloca. DeleteDeadInstructions(DeadInsts); ++NumReplaced; DXASSERT(AI->getNumUses() == 0, "must have zero users."); AI->eraseFromParent(); Changed = true; continue; } } } else { GlobalVariable *GV = cast<GlobalVariable>(V); // Handle dead GVs trivially. These can be formed by RAUWing one GV // with another, leaving the original in the worklist if (GV->use_empty()) { staticGVs.remove(GV); GV->eraseFromParent(); Changed = true; continue; } if (staticGVs.count(GV)) { Type *Ty = GV->getType()->getPointerElementType(); // Skip basic types. if (!Ty->isAggregateType() && !Ty->isVectorTy()) continue; // merge GEP use for global. dxilutil::MergeGepUse(GV); } const bool bAllowReplace = true; // SROA_Parameter_HLSL has no access to a domtree, if one is needed, it'll // be generated if (SROA_Helper::LowerMemcpy(GV, /*annotation*/ nullptr, typeSys, DL, nullptr /*DT */, bAllowReplace)) { continue; } // Flat Global vector if no dynamic vector indexing. bool bFlatVector = !hasDynamicVectorIndexing(GV); if (bFlatVector) { GVDbgOffset &dbgOffset = GVDbgOffsetMap[GV]; GlobalVariable *baseGV = dbgOffset.base; // Disable scalarization of groupshared/const_static vector arrays if (isGroupShareOrConstStaticArray(baseGV)) bFlatVector = false; } std::vector<Value *> Elts; bool SROAed = false; if (GlobalVariable *NewEltGV = dyn_cast_or_null<GlobalVariable>( TranslatePtrIfUsedByLoweredFn(GV, typeSys))) { GVDbgOffset dbgOffset = GVDbgOffsetMap[GV]; // Don't need to update when skip SROA on base GV. if (NewEltGV == dbgOffset.base) continue; if (GV != NewEltGV) { GVDbgOffsetMap[NewEltGV] = dbgOffset; // Remove GV from GVDbgOffsetMap. GVDbgOffsetMap.erase(GV); if (GV != dbgOffset.base) { // Remove GV when it is replaced by NewEltGV and is not a base GV. GV->removeDeadConstantUsers(); GV->eraseFromParent(); staticGVs.remove(GV); } GV = NewEltGV; } } else { // SROA_Parameter_HLSL has no access to a domtree, if one is needed, // it'll be generated SROAed = SROA_Helper::DoScalarReplacement( GV, Elts, Builder, bFlatVector, // TODO: set precise. /*hasPrecise*/ false, typeSys, DL, DeadInsts, /*DT*/ nullptr); } if (SROAed) { GVDbgOffset dbgOffset = GVDbgOffsetMap[GV]; unsigned offset = 0; // Push Elts into workList. for (auto iter = Elts.begin(); iter != Elts.end(); iter++) { WorkList.push(*iter); GlobalVariable *EltGV = cast<GlobalVariable>(*iter); if (bHasDbgInfo) { StringRef OriginEltName = EltGV->getName(); StringRef OriginName = dbgOffset.base->getName(); StringRef EltName = OriginEltName.substr(OriginName.size()); StringRef EltParentName = OriginEltName.substr(0, OriginName.size()); DXASSERT_LOCALVAR(EltParentName, EltParentName == OriginName, "parent name mismatch"); EltNameMap[EltGV] = EltName; } GVDbgOffset &EltDbgOffset = GVDbgOffsetMap[EltGV]; EltDbgOffset.base = dbgOffset.base; EltDbgOffset.debugOffset = dbgOffset.debugOffset + offset; unsigned size = DL.getTypeAllocSizeInBits(EltGV->getType()->getElementType()); offset += size; } GV->removeDeadConstantUsers(); // Now erase any instructions that were made dead while rewriting the // alloca. DeleteDeadInstructions(DeadInsts); ++NumReplaced; } else { // Add debug info for flattened globals. if (bHasDbgInfo && staticGVs.count(GV) == 0) { GVDbgOffset &dbgOffset = GVDbgOffsetMap[GV]; DebugInfoFinder &Finder = HLM.GetOrCreateDebugInfoFinder(); Type *Ty = GV->getType()->getElementType(); unsigned size = DL.getTypeAllocSizeInBits(Ty); unsigned align = DL.getPrefTypeAlignment(Ty); HLModule::CreateElementGlobalVariableDebugInfo( dbgOffset.base, Finder, GV, size, align, dbgOffset.debugOffset, EltNameMap[GV]); } } // Remove GV from GVDbgOffsetMap. GVDbgOffsetMap.erase(GV); } } // Remove unused internal global. RemoveUnusedInternalGlobalVariable(M); // Cleanup memcpy for allocas and mark precise. for (Function &F : M) { if (F.isDeclaration()) continue; Cleanup(F, typeSys); } return true; } } // namespace //===----------------------------------------------------------------------===// // SRoA Helper //===----------------------------------------------------------------------===// /// RewriteGEP - Rewrite the GEP to be relative to new element when can find a /// new element which is struct field. If cannot find, create new element GEPs /// and try to rewrite GEP with new GEPS. void SROA_Helper::RewriteForGEP(GEPOperator *GEP, IRBuilder<> &Builder) { assert(OldVal == GEP->getPointerOperand() && ""); Value *NewPointer = nullptr; SmallVector<Value *, 8> NewArgs; gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); for (; GEPIt != E; ++GEPIt) { if (GEPIt->isStructTy()) { // must be const ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); assert(IdxVal->getLimitedValue() < NewElts.size() && ""); NewPointer = NewElts[IdxVal->getLimitedValue()]; // The idx is used for NewPointer, not part of newGEP idx, GEPIt++; break; } else if (GEPIt->isArrayTy()) { // Add array idx. NewArgs.push_back(GEPIt.getOperand()); } else if (GEPIt->isPointerTy()) { // Add pointer idx. NewArgs.push_back(GEPIt.getOperand()); } else if (GEPIt->isVectorTy()) { // Add vector idx. NewArgs.push_back(GEPIt.getOperand()); } else { llvm_unreachable("should break from structTy"); } } if (NewPointer) { // Struct split. // Add rest of idx. for (; GEPIt != E; ++GEPIt) { NewArgs.push_back(GEPIt.getOperand()); } // If only 1 level struct, just use the new pointer. Value *NewGEP = NewPointer; if (NewArgs.size() > 1) { NewGEP = Builder.CreateInBoundsGEP(NewPointer, NewArgs); NewGEP->takeName(GEP); } assert(NewGEP->getType() == GEP->getType() && "type mismatch"); GEP->replaceAllUsesWith(NewGEP); } else { // End at array of basic type. Type *Ty = GEP->getType()->getPointerElementType(); if (Ty->isVectorTy() || (Ty->isStructTy() && !dxilutil::IsHLSLObjectType(Ty)) || Ty->isArrayTy()) { SmallVector<Value *, 8> NewArgs; NewArgs.append(GEP->idx_begin(), GEP->idx_end()); SmallVector<Value *, 8> NewGEPs; // create new geps for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *NewGEP = Builder.CreateGEP(nullptr, NewElts[i], NewArgs); NewGEPs.emplace_back(NewGEP); } const bool bAllowReplace = isa<AllocaInst>(OldVal); if (!SROA_Helper::LowerMemcpy(GEP, /*annotation*/ nullptr, typeSys, DL, DT, bAllowReplace)) { SROA_Helper helper(GEP, NewGEPs, DeadInsts, typeSys, DL, DT); helper.RewriteForScalarRepl(GEP, Builder); for (Value *NewGEP : NewGEPs) { if (NewGEP->user_empty() && isa<Instruction>(NewGEP)) { // Delete unused newGEP. cast<Instruction>(NewGEP)->eraseFromParent(); } } } } else { Value *vecIdx = NewArgs.back(); if (ConstantInt *immVecIdx = dyn_cast<ConstantInt>(vecIdx)) { // Replace vecArray[arrayIdx][immVecIdx] // with scalarArray_immVecIdx[arrayIdx] // Pop the vecIdx. NewArgs.pop_back(); Value *NewGEP = NewElts[immVecIdx->getLimitedValue()]; if (NewArgs.size() > 1) { NewGEP = Builder.CreateInBoundsGEP(NewGEP, NewArgs); NewGEP->takeName(GEP); } assert(NewGEP->getType() == GEP->getType() && "type mismatch"); GEP->replaceAllUsesWith(NewGEP); } else { // dynamic vector indexing. assert(0 && "should not reach here"); } } } // Remove the use so that the caller can keep iterating over its other users DXASSERT(GEP->user_empty(), "All uses of the GEP should have been eliminated"); if (isa<Instruction>(GEP)) { GEP->setOperand(GEP->getPointerOperandIndex(), UndefValue::get(GEP->getPointerOperand()->getType())); DeadInsts.push_back(GEP); } else { cast<Constant>(GEP)->destroyConstant(); } } /// isVectorOrStructArray - Check if T is array of vector or struct. static bool isVectorOrStructArray(Type *T) { if (!T->isArrayTy()) return false; T = dxilutil::GetArrayEltTy(T); return T->isStructTy() || T->isVectorTy(); } static void SimplifyStructValUsage(Value *StructVal, std::vector<Value *> Elts, SmallVectorImpl<Value *> &DeadInsts) { for (User *user : StructVal->users()) { if (ExtractValueInst *Extract = dyn_cast<ExtractValueInst>(user)) { DXASSERT(Extract->getNumIndices() == 1, "only support 1 index case"); unsigned index = Extract->getIndices()[0]; Value *Elt = Elts[index]; Extract->replaceAllUsesWith(Elt); DeadInsts.emplace_back(Extract); } else if (InsertValueInst *Insert = dyn_cast<InsertValueInst>(user)) { DXASSERT(Insert->getNumIndices() == 1, "only support 1 index case"); unsigned index = Insert->getIndices()[0]; if (Insert->getAggregateOperand() == StructVal) { // Update field. std::vector<Value *> NewElts = Elts; NewElts[index] = Insert->getInsertedValueOperand(); SimplifyStructValUsage(Insert, NewElts, DeadInsts); } else { // Insert to another bigger struct. IRBuilder<> Builder(Insert); Value *TmpStructVal = UndefValue::get(StructVal->getType()); for (unsigned i = 0; i < Elts.size(); i++) { TmpStructVal = Builder.CreateInsertValue(TmpStructVal, Elts[i], {i}); } Insert->replaceUsesOfWith(StructVal, TmpStructVal); } } } } /// RewriteForLoad - Replace OldVal with flattened NewElts in LoadInst. void SROA_Helper::RewriteForLoad(LoadInst *LI) { Type *LIType = LI->getType(); Type *ValTy = OldVal->getType()->getPointerElementType(); IRBuilder<> Builder(LI); if (LIType->isVectorTy()) { // Replace: // %res = load { 2 x i32 }* %alloc // with: // %load.0 = load i32* %alloc.0 // %insert.0 insertvalue { 2 x i32 } zeroinitializer, i32 %load.0, 0 // %load.1 = load i32* %alloc.1 // %insert = insertvalue { 2 x i32 } %insert.0, i32 %load.1, 1 Value *Insert = UndefValue::get(LIType); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *Load = Builder.CreateLoad(NewElts[i], "load"); Insert = Builder.CreateInsertElement(Insert, Load, i, "insert"); } LI->replaceAllUsesWith(Insert); } else if (isCompatibleAggregate(LIType, ValTy)) { if (isVectorOrStructArray(LIType)) { // Replace: // %res = load [2 x <2 x float>] * %alloc // with: // %load.0 = load [4 x float]* %alloc.0 // %insert.0 insertvalue [4 x float] zeroinitializer,i32 %load.0,0 // %load.1 = load [4 x float]* %alloc.1 // %insert = insertvalue [4 x float] %insert.0, i32 %load.1, 1 // ... Type *i32Ty = Type::getInt32Ty(LIType->getContext()); Value *zero = ConstantInt::get(i32Ty, 0); SmallVector<Value *, 8> idxList; idxList.emplace_back(zero); Value *newLd = LoadVectorOrStructArray(cast<ArrayType>(LIType), NewElts, idxList, Builder); LI->replaceAllUsesWith(newLd); } else { // Replace: // %res = load { i32, i32 }* %alloc // with: // %load.0 = load i32* %alloc.0 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, // 0 // %load.1 = load i32* %alloc.1 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 // (Also works for arrays instead of structs) Module *M = LI->getModule(); Value *Insert = UndefValue::get(LIType); std::vector<Value *> LdElts(NewElts.size()); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *Ptr = NewElts[i]; Type *Ty = Ptr->getType()->getPointerElementType(); Value *Load = nullptr; if (!HLMatrixType::isa(Ty)) Load = Builder.CreateLoad(Ptr, "load"); else { // Generate Matrix Load. Load = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLMatLoadStore, static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatLoad), Ty, {Ptr}, *M); } LdElts[i] = Load; Insert = Builder.CreateInsertValue(Insert, Load, i, "insert"); } LI->replaceAllUsesWith(Insert); if (LIType->isStructTy()) { SimplifyStructValUsage(Insert, LdElts, DeadInsts); } } } else { llvm_unreachable("other type don't need rewrite"); } // Remove the use so that the caller can keep iterating over its other users LI->setOperand(LI->getPointerOperandIndex(), UndefValue::get(LI->getPointerOperand()->getType())); DeadInsts.push_back(LI); } /// RewriteForStore - Replace OldVal with flattened NewElts in StoreInst. void SROA_Helper::RewriteForStore(StoreInst *SI) { Value *Val = SI->getOperand(0); Type *SIType = Val->getType(); IRBuilder<> Builder(SI); Type *ValTy = OldVal->getType()->getPointerElementType(); if (SIType->isVectorTy()) { // Replace: // store <2 x float> %val, <2 x float>* %alloc // with: // %val.0 = extractelement { 2 x float } %val, 0 // store i32 %val.0, i32* %alloc.0 // %val.1 = extractelement { 2 x float } %val, 1 // store i32 %val.1, i32* %alloc.1 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *Extract = Builder.CreateExtractElement(Val, i, Val->getName()); Builder.CreateStore(Extract, NewElts[i]); } } else if (isCompatibleAggregate(SIType, ValTy)) { if (isVectorOrStructArray(SIType)) { // Replace: // store [2 x <2 x i32>] %val, [2 x <2 x i32>]* %alloc, align 16 // with: // %val.0 = extractvalue [2 x <2 x i32>] %val, 0 // %all0c.0.0 = getelementptr inbounds [2 x i32], [2 x i32]* %alloc.0, // i32 0, i32 0 // %val.0.0 = extractelement <2 x i32> %243, i64 0 // store i32 %val.0.0, i32* %all0c.0.0 // %alloc.1.0 = getelementptr inbounds [2 x i32], [2 x i32]* %alloc.1, // i32 0, i32 0 // %val.0.1 = extractelement <2 x i32> %243, i64 1 // store i32 %val.0.1, i32* %alloc.1.0 // %val.1 = extractvalue [2 x <2 x i32>] %val, 1 // %alloc.0.0 = getelementptr inbounds [2 x i32], [2 x i32]* %alloc.0, // i32 0, i32 1 // %val.1.0 = extractelement <2 x i32> %248, i64 0 // store i32 %val.1.0, i32* %alloc.0.0 // %all0c.1.1 = getelementptr inbounds [2 x i32], [2 x i32]* %alloc.1, // i32 0, i32 1 // %val.1.1 = extractelement <2 x i32> %248, i64 1 // store i32 %val.1.1, i32* %all0c.1.1 ArrayType *AT = cast<ArrayType>(SIType); Type *i32Ty = Type::getInt32Ty(SIType->getContext()); Value *zero = ConstantInt::get(i32Ty, 0); SmallVector<Value *, 8> idxList; idxList.emplace_back(zero); StoreVectorOrStructArray(AT, Val, NewElts, idxList, Builder); } else { // Replace: // store { i32, i32 } %val, { i32, i32 }* %alloc // with: // %val.0 = extractvalue { i32, i32 } %val, 0 // store i32 %val.0, i32* %alloc.0 // %val.1 = extractvalue { i32, i32 } %val, 1 // store i32 %val.1, i32* %alloc.1 // (Also works for arrays instead of structs) Module *M = SI->getModule(); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *Extract = Builder.CreateExtractValue(Val, i, Val->getName()); if (!HLMatrixType::isa(Extract->getType())) { Builder.CreateStore(Extract, NewElts[i]); } else { // Generate Matrix Store. HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLMatLoadStore, static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatStore), Extract->getType(), {NewElts[i], Extract}, *M); } } } } else { llvm_unreachable("other type don't need rewrite"); } // Remove the use so that the caller can keep iterating over its other users SI->setOperand(SI->getPointerOperandIndex(), UndefValue::get(SI->getPointerOperand()->getType())); DeadInsts.push_back(SI); } /// RewriteMemIntrin - MI is a memcpy/memset/memmove from or to AI. /// Rewrite it to copy or set the elements of the scalarized memory. void SROA_Helper::RewriteMemIntrin(MemIntrinsic *MI, Value *OldV) { // If this is a memcpy/memmove, construct the other pointer as the // appropriate type. The "Other" pointer is the pointer that goes to memory // that doesn't have anything to do with the alloca that we are promoting. For // memset, this Value* stays null. Value *OtherPtr = nullptr; unsigned MemAlignment = MI->getAlignment(); if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy if (OldV == MTI->getRawDest()) OtherPtr = MTI->getRawSource(); else { assert(OldV == MTI->getRawSource()); OtherPtr = MTI->getRawDest(); } } // If there is an other pointer, we want to convert it to the same pointer // type as AI has, so we can GEP through it safely. if (OtherPtr) { unsigned AddrSpace = cast<PointerType>(OtherPtr->getType())->getAddressSpace(); // Remove bitcasts and all-zero GEPs from OtherPtr. This is an // optimization, but it's also required to detect the corner case where // both pointer operands are referencing the same memory, and where // OtherPtr may be a bitcast or GEP that currently being rewritten. (This // function is only called for mem intrinsics that access the whole // aggregate, so non-zero GEPs are not an issue here.) OtherPtr = OtherPtr->stripPointerCasts(); // Copying the alloca to itself is a no-op: just delete it. if (OtherPtr == OldVal || OtherPtr == NewElts[0]) { // This code will run twice for a no-op memcpy -- once for each operand. // Put only one reference to MI on the DeadInsts list. for (SmallVectorImpl<Value *>::const_iterator I = DeadInsts.begin(), E = DeadInsts.end(); I != E; ++I) if (*I == MI) return; // Remove the uses so that the caller can keep iterating over its other // users MI->setOperand(0, UndefValue::get(MI->getOperand(0)->getType())); MI->setOperand(1, UndefValue::get(MI->getOperand(1)->getType())); DeadInsts.push_back(MI); return; } // If the pointer is not the right type, insert a bitcast to the right // type. Type *NewTy = PointerType::get(OldVal->getType()->getPointerElementType(), AddrSpace); if (OtherPtr->getType() != NewTy) OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI); } // Process each element of the aggregate. bool SROADest = MI->getRawDest() == OldV; Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext())); const DataLayout &DL = MI->getModule()->getDataLayout(); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // If this is a memcpy/memmove, emit a GEP of the other element address. Value *OtherElt = nullptr; unsigned OtherEltAlign = MemAlignment; if (OtherPtr) { Value *Idx[2] = {Zero, ConstantInt::get(Type::getInt32Ty(MI->getContext()), i)}; OtherElt = GetElementPtrInst::CreateInBounds( OtherPtr, Idx, OtherPtr->getName() + "." + Twine(i), MI); uint64_t EltOffset; PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); Type *OtherTy = OtherPtrTy->getElementType(); if (StructType *ST = dyn_cast<StructType>(OtherTy)) { EltOffset = DL.getStructLayout(ST)->getElementOffset(i); } else { Type *EltTy = cast<SequentialType>(OtherTy)->getElementType(); EltOffset = DL.getTypeAllocSize(EltTy) * i; } // The alignment of the other pointer is the guaranteed alignment of the // element, which is affected by both the known alignment of the whole // mem intrinsic and the alignment of the element. If the alignment of // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the // known alignment is just 4 bytes. OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset); } Value *EltPtr = NewElts[i]; Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType(); // If we got down to a scalar, insert a load or store as appropriate. if (EltTy->isSingleValueType()) { if (isa<MemTransferInst>(MI)) { if (SROADest) { // From Other to Alloca. Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI); new StoreInst(Elt, EltPtr, MI); } else { // From Alloca to Other. Value *Elt = new LoadInst(EltPtr, "tmp", MI); new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI); } continue; } assert(isa<MemSetInst>(MI)); // If the stored element is zero (common case), just store a null // constant. Constant *StoreVal; if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) { if (CI->isZero()) { StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> } else { // If EltTy is a vector type, get the element type. Type *ValTy = EltTy->getScalarType(); // Construct an integer with the right value. unsigned EltSize = DL.getTypeSizeInBits(ValTy); APInt OneVal(EltSize, CI->getZExtValue()); APInt TotalVal(OneVal); // Set each byte. for (unsigned i = 0; 8 * i < EltSize; ++i) { TotalVal = TotalVal.shl(8); TotalVal |= OneVal; } // Convert the integer value to the appropriate type. StoreVal = ConstantInt::get(CI->getContext(), TotalVal); if (ValTy->isPointerTy()) StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); else if (ValTy->isFloatingPointTy()) StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); assert(StoreVal->getType() == ValTy && "Type mismatch!"); // If the requested value was a vector constant, create it. if (EltTy->isVectorTy()) { unsigned NumElts = cast<VectorType>(EltTy)->getNumElements(); StoreVal = ConstantVector::getSplat(NumElts, StoreVal); } } new StoreInst(StoreVal, EltPtr, MI); continue; } // Otherwise, if we're storing a byte variable, use a memset call for // this element. } unsigned EltSize = DL.getTypeAllocSize(EltTy); if (!EltSize) continue; IRBuilder<> Builder(MI); // Finally, insert the meminst for this element. if (isa<MemSetInst>(MI)) { Builder.CreateMemSet(EltPtr, MI->getArgOperand(1), EltSize, MI->isVolatile()); } else { assert(isa<MemTransferInst>(MI)); Value *Dst = SROADest ? EltPtr : OtherElt; // Dest ptr Value *Src = SROADest ? OtherElt : EltPtr; // Src ptr if (isa<MemCpyInst>(MI)) Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign, MI->isVolatile()); else Builder.CreateMemMove(Dst, Src, EltSize, OtherEltAlign, MI->isVolatile()); } } // Remove the use so that the caller can keep iterating over its other users MI->setOperand(0, UndefValue::get(MI->getOperand(0)->getType())); if (isa<MemTransferInst>(MI)) MI->setOperand(1, UndefValue::get(MI->getOperand(1)->getType())); DeadInsts.push_back(MI); } void SROA_Helper::RewriteBitCast(BitCastInst *BCI) { // Unused bitcast may be leftover from temporary memcpy if (BCI->use_empty()) { BCI->eraseFromParent(); return; } Type *DstTy = BCI->getType(); Value *Val = BCI->getOperand(0); Type *SrcTy = Val->getType(); if (!DstTy->isPointerTy()) { assert(0 && "Type mismatch."); return; } if (!SrcTy->isPointerTy()) { assert(0 && "Type mismatch."); return; } DstTy = DstTy->getPointerElementType(); SrcTy = SrcTy->getPointerElementType(); if (!DstTy->isStructTy()) { // This is an llvm.lifetime.* intrinsic. Replace bitcast by a bitcast for // each element. SmallVector<IntrinsicInst *, 16> ToReplace; DXASSERT(onlyUsedByLifetimeMarkers(BCI), "expected struct bitcast to only be used by lifetime intrinsics"); for (User *User : BCI->users()) { IntrinsicInst *Intrin = cast<IntrinsicInst>(User); ToReplace.push_back(Intrin); } const DataLayout &DL = BCI->getModule()->getDataLayout(); for (IntrinsicInst *Intrin : ToReplace) { IRBuilder<> Builder(Intrin); for (Value *Elt : NewElts) { assert(Elt->getType()->isPointerTy()); Type *ElPtrTy = Elt->getType(); Type *ElTy = ElPtrTy->getPointerElementType(); Value *SizeV = Builder.getInt64(DL.getTypeAllocSize(ElTy)); Value *Ptr = Builder.CreateBitCast(Elt, Builder.getInt8PtrTy()); Value *Args[] = {SizeV, Ptr}; CallInst *C = Builder.CreateCall(Intrin->getCalledFunction(), Args); C->setDoesNotThrow(); } assert(Intrin->use_empty()); Intrin->eraseFromParent(); } assert(BCI->use_empty()); BCI->eraseFromParent(); return; } if (!SrcTy->isStructTy()) { assert(0 && "Type mismatch."); return; } // Only support bitcast to parent struct type. StructType *DstST = cast<StructType>(DstTy); StructType *SrcST = cast<StructType>(SrcTy); bool bTypeMatch = false; unsigned level = 0; while (SrcST) { level++; Type *EltTy = SrcST->getElementType(0); if (EltTy == DstST) { bTypeMatch = true; break; } SrcST = dyn_cast<StructType>(EltTy); } if (!bTypeMatch) { // If the layouts match, just replace the type SrcST = cast<StructType>(SrcTy); if (SrcST->isLayoutIdentical(DstST)) { BCI->mutateType(Val->getType()); BCI->replaceAllUsesWith(Val); BCI->eraseFromParent(); return; } assert(0 && "Type mismatch."); return; } std::vector<Value *> idxList(level + 1); ConstantInt *zeroIdx = ConstantInt::get(Type::getInt32Ty(Val->getContext()), 0); for (unsigned i = 0; i < (level + 1); i++) idxList[i] = zeroIdx; IRBuilder<> Builder(BCI); Builder.AllowFolding = false; // We need an Instruction, so make sure we don't get a constant Instruction *GEP = cast<Instruction>(Builder.CreateInBoundsGEP(Val, idxList)); BCI->replaceAllUsesWith(GEP); BCI->eraseFromParent(); IRBuilder<> GEPBuilder(GEP); RewriteForGEP(cast<GEPOperator>(GEP), GEPBuilder); } /// RewriteCallArg - For Functions which don't flat, /// replace OldVal with alloca and /// copy in copy out data between alloca and flattened NewElts /// in CallInst. void SROA_Helper::RewriteCallArg(CallInst *CI, unsigned ArgIdx, bool bIn, bool bOut) { Function *F = CI->getParent()->getParent(); IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(F)); const DataLayout &DL = F->getParent()->getDataLayout(); Value *userTyV = CI->getArgOperand(ArgIdx); PointerType *userTy = cast<PointerType>(userTyV->getType()); Type *userTyElt = userTy->getElementType(); Value *Alloca = AllocaBuilder.CreateAlloca(userTyElt); IRBuilder<> Builder(CI); if (bIn) { MemCpyInst *cpy = cast<MemCpyInst>(Builder.CreateMemCpy( Alloca, userTyV, DL.getTypeAllocSize(userTyElt), false)); RewriteMemIntrin(cpy, cpy->getRawSource()); } CI->setArgOperand(ArgIdx, Alloca); if (bOut) { Builder.SetInsertPoint(CI->getNextNode()); MemCpyInst *cpy = cast<MemCpyInst>(Builder.CreateMemCpy( userTyV, Alloca, DL.getTypeAllocSize(userTyElt), false)); RewriteMemIntrin(cpy, cpy->getRawSource()); } } // Flatten matching OldVal arg to NewElts, optionally loading values (loadElts). // Does not replace or clean up old CallInst. static CallInst *CreateFlattenedHLIntrinsicCall(CallInst *CI, Value *OldVal, ArrayRef<Value *> NewElts, bool loadElts) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); Function *F = CI->getCalledFunction(); DXASSERT_NOMSG(group == HLOpcodeGroup::HLIntrinsic); unsigned opcode = GetHLOpcode(CI); IRBuilder<> Builder(CI); SmallVector<Value *, 4> flatArgs; for (Value *arg : CI->arg_operands()) { if (arg == OldVal) { for (Value *Elt : NewElts) { if (loadElts && Elt->getType()->isPointerTy()) Elt = Builder.CreateLoad(Elt); flatArgs.emplace_back(Elt); } } else flatArgs.emplace_back(arg); } SmallVector<Type *, 4> flatParamTys; for (Value *arg : flatArgs) flatParamTys.emplace_back(arg->getType()); FunctionType *flatFuncTy = FunctionType::get(CI->getType(), flatParamTys, false); Function *flatF = GetOrCreateHLFunction(*F->getParent(), flatFuncTy, group, opcode, F->getAttributes().getFnAttributes()); return Builder.CreateCall(flatF, flatArgs); } static CallInst *RewriteWithFlattenedHLIntrinsicCall(CallInst *CI, Value *OldVal, ArrayRef<Value *> NewElts, bool loadElts) { CallInst *flatCI = CreateFlattenedHLIntrinsicCall(CI, OldVal, NewElts, /*loadElts*/ loadElts); CI->replaceAllUsesWith(flatCI); // Clear CI operands so we don't try to translate old call again for (auto &opit : CI->operands()) opit.set(UndefValue::get(opit->getType())); return flatCI; } /// RewriteCall - Replace OldVal with flattened NewElts in CallInst. void SROA_Helper::RewriteCall(CallInst *CI) { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group != HLOpcodeGroup::NotHL) { unsigned opcode = GetHLOpcode(CI); if (group == HLOpcodeGroup::HLIntrinsic) { IntrinsicOp IOP = static_cast<IntrinsicOp>(opcode); switch (IOP) { case IntrinsicOp::MOP_Append: { // Buffer Append already expand in code gen. // Must be OutputStream Append here. // Every Elt has a pointer type. // For Append, this is desired, so don't load. RewriteWithFlattenedHLIntrinsicCall(CI, OldVal, NewElts, /*loadElts*/ false); DeadInsts.push_back(CI); } break; case IntrinsicOp::IOP_TraceRay: { if (OldVal == CI->getArgOperand(HLOperandIndex::kTraceRayRayDescOpIdx)) { RewriteCallArg(CI, HLOperandIndex::kTraceRayRayDescOpIdx, /*bIn*/ true, /*bOut*/ false); } else { DXASSERT(OldVal == CI->getArgOperand(HLOperandIndex::kTraceRayPayLoadOpIdx), "else invalid TraceRay"); RewriteCallArg(CI, HLOperandIndex::kTraceRayPayLoadOpIdx, /*bIn*/ true, /*bOut*/ true); } } break; case IntrinsicOp::IOP_ReportHit: { RewriteCallArg(CI, HLOperandIndex::kReportIntersectionAttributeOpIdx, /*bIn*/ true, /*bOut*/ false); } break; case IntrinsicOp::IOP_CallShader: { RewriteCallArg(CI, HLOperandIndex::kCallShaderPayloadOpIdx, /*bIn*/ true, /*bOut*/ true); } break; case IntrinsicOp::MOP_TraceRayInline: { if (OldVal == CI->getArgOperand(HLOperandIndex::kTraceRayInlineRayDescOpIdx)) { RewriteWithFlattenedHLIntrinsicCall(CI, OldVal, NewElts, /*loadElts*/ true); DeadInsts.push_back(CI); break; } } LLVM_FALLTHROUGH; default: // RayQuery this pointer replacement. if (OldVal->getType()->isPointerTy() && CI->getNumArgOperands() >= HLOperandIndex::kHandleOpIdx && OldVal == CI->getArgOperand(HLOperandIndex::kHandleOpIdx) && dxilutil::IsHLSLRayQueryType( OldVal->getType()->getPointerElementType())) { // For RayQuery methods, we want to replace the RayQuery this pointer // with a load and use of the underlying handle value. // This will allow elimination of RayQuery types earlier. RewriteWithFlattenedHLIntrinsicCall(CI, OldVal, NewElts, /*loadElts*/ true); DeadInsts.push_back(CI); break; } DXASSERT(0, "cannot flatten hlsl intrinsic."); } } // TODO: check other high level dx operations if need to. } else { DXASSERT(0, "should done at inline"); } } /// RewriteForAddrSpaceCast - Rewrite the AddrSpaceCast, either ConstExpr or /// Inst. void SROA_Helper::RewriteForAddrSpaceCast(Value *CE, IRBuilder<> &Builder) { SmallVector<Value *, 8> NewCasts; // create new AddrSpaceCast. for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *NewCast = Builder.CreateAddrSpaceCast( NewElts[i], PointerType::get(NewElts[i]->getType()->getPointerElementType(), CE->getType()->getPointerAddressSpace())); NewCasts.emplace_back(NewCast); } SROA_Helper helper(CE, NewCasts, DeadInsts, typeSys, DL, DT); helper.RewriteForScalarRepl(CE, Builder); // Remove the use so that the caller can keep iterating over its other users DXASSERT(CE->user_empty(), "All uses of the addrspacecast should have been eliminated"); if (Instruction *I = dyn_cast<Instruction>(CE)) I->eraseFromParent(); else cast<Constant>(CE)->destroyConstant(); } /// RewriteForConstExpr - Rewrite the GEP which is ConstantExpr. void SROA_Helper::RewriteForConstExpr(ConstantExpr *CE, IRBuilder<> &Builder) { if (GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) { if (OldVal == GEP->getPointerOperand()) { // Flatten GEP. RewriteForGEP(GEP, Builder); return; } } if (CE->getOpcode() == Instruction::AddrSpaceCast) { if (OldVal == CE->getOperand(0)) { // Flatten AddrSpaceCast. RewriteForAddrSpaceCast(CE, Builder); return; } } for (Value::use_iterator UI = CE->use_begin(), E = CE->use_end(); UI != E;) { Use &TheUse = *UI++; if (Instruction *I = dyn_cast<Instruction>(TheUse.getUser())) { IRBuilder<> tmpBuilder(I); // Replace CE with constInst. Instruction *tmpInst = CE->getAsInstruction(); tmpBuilder.Insert(tmpInst); TheUse.set(tmpInst); } else { RewriteForConstExpr(cast<ConstantExpr>(TheUse.getUser()), Builder); } } // Remove the use so that the caller can keep iterating over its other users DXASSERT(CE->user_empty(), "All uses of the constantexpr should have been eliminated"); CE->destroyConstant(); } /// RewriteForScalarRepl - OldVal is being split into NewElts, so rewrite /// users of V, which references it, to use the separate elements. void SROA_Helper::RewriteForScalarRepl(Value *V, IRBuilder<> &Builder) { // Don't iterate upon the uses explicitly because we'll be removing them, // and potentially adding new ones (if expanding memcpys) during the // iteration. Use *PrevUse = nullptr; while (!V->use_empty()) { Use &TheUse = *V->use_begin(); DXASSERT_LOCALVAR( PrevUse, &TheUse != PrevUse, "Infinite loop while SROA'ing value, use isn't getting eliminated."); PrevUse = &TheUse; // Each of these must either call ->eraseFromParent() // or null out the use of V so that we make progress. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(TheUse.getUser())) { RewriteForConstExpr(CE, Builder); } else { Instruction *User = cast<Instruction>(TheUse.getUser()); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { IRBuilder<> Builder(GEP); RewriteForGEP(cast<GEPOperator>(GEP), Builder); } else if (LoadInst *ldInst = dyn_cast<LoadInst>(User)) RewriteForLoad(ldInst); else if (StoreInst *stInst = dyn_cast<StoreInst>(User)) RewriteForStore(stInst); else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) RewriteMemIntrin(MI, V); else if (CallInst *CI = dyn_cast<CallInst>(User)) RewriteCall(CI); else if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) RewriteBitCast(BCI); else if (AddrSpaceCastInst *CI = dyn_cast<AddrSpaceCastInst>(User)) { RewriteForAddrSpaceCast(CI, Builder); } else { assert(0 && "not support."); } } } } static ArrayType *CreateNestArrayTy(Type *FinalEltTy, ArrayRef<ArrayType *> nestArrayTys) { Type *newAT = FinalEltTy; for (auto ArrayTy = nestArrayTys.rbegin(), E = nestArrayTys.rend(); ArrayTy != E; ++ArrayTy) newAT = ArrayType::get(newAT, (*ArrayTy)->getNumElements()); return cast<ArrayType>(newAT); } /// DoScalarReplacement - Split V into AllocaInsts with Builder and save the new /// AllocaInsts into Elts. Then do SROA on V. bool SROA_Helper::DoScalarReplacement(Value *V, std::vector<Value *> &Elts, Type *&BrokenUpTy, uint64_t &NumInstances, IRBuilder<> &Builder, bool bFlatVector, bool hasPrecise, DxilTypeSystem &typeSys, const DataLayout &DL, SmallVector<Value *, 32> &DeadInsts, DominatorTree *DT) { DEBUG(dbgs() << "Found inst to SROA: " << *V << '\n'); Type *Ty = V->getType(); // Skip none pointer types. if (!Ty->isPointerTy()) return false; Ty = Ty->getPointerElementType(); // Skip none aggregate types. if (!Ty->isAggregateType()) return false; // Skip matrix types. if (HLMatrixType::isa(Ty)) return false; IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(Builder.GetInsertPoint())); if (StructType *ST = dyn_cast<StructType>(Ty)) { // Skip HLSL object types and RayQuery. if (dxilutil::IsHLSLObjectType(ST)) { return false; } BrokenUpTy = ST; NumInstances = 1; unsigned numTypes = ST->getNumContainedTypes(); Elts.reserve(numTypes); DxilStructAnnotation *SA = typeSys.GetStructAnnotation(ST); // Skip empty struct. if (SA && SA->IsEmptyStruct()) return true; for (int i = 0, e = numTypes; i != e; ++i) { AllocaInst *NA = AllocaBuilder.CreateAlloca( ST->getContainedType(i), nullptr, V->getName() + "." + Twine(i)); bool markPrecise = hasPrecise; if (SA) { DxilFieldAnnotation &FA = SA->GetFieldAnnotation(i); markPrecise |= FA.IsPrecise(); } if (markPrecise) HLModule::MarkPreciseAttributeWithMetadata(NA); Elts.push_back(NA); } } else { ArrayType *AT = cast<ArrayType>(Ty); if (AT->getNumContainedTypes() == 0) { // Skip case like [0 x %struct]. return false; } Type *ElTy = AT->getElementType(); SmallVector<ArrayType *, 4> nestArrayTys; nestArrayTys.emplace_back(AT); NumInstances = AT->getNumElements(); // support multi level of array while (ElTy->isArrayTy()) { ArrayType *ElAT = cast<ArrayType>(ElTy); nestArrayTys.emplace_back(ElAT); NumInstances *= ElAT->getNumElements(); ElTy = ElAT->getElementType(); } BrokenUpTy = ElTy; if (ElTy->isStructTy() && // Skip Matrix type. !HLMatrixType::isa(ElTy)) { if (!dxilutil::IsHLSLObjectType(ElTy)) { // for array of struct // split into arrays of struct elements StructType *ElST = cast<StructType>(ElTy); unsigned numTypes = ElST->getNumContainedTypes(); Elts.reserve(numTypes); DxilStructAnnotation *SA = typeSys.GetStructAnnotation(ElST); // Skip empty struct. if (SA && SA->IsEmptyStruct()) return true; for (int i = 0, e = numTypes; i != e; ++i) { AllocaInst *NA = AllocaBuilder.CreateAlloca( CreateNestArrayTy(ElST->getContainedType(i), nestArrayTys), nullptr, V->getName() + "." + Twine(i)); bool markPrecise = hasPrecise; if (SA) { DxilFieldAnnotation &FA = SA->GetFieldAnnotation(i); markPrecise |= FA.IsPrecise(); } if (markPrecise) HLModule::MarkPreciseAttributeWithMetadata(NA); Elts.push_back(NA); } } else { // For local resource array which not dynamic indexing, // split it. if (dxilutil::HasDynamicIndexing(V) || // Only support 1 dim split. nestArrayTys.size() > 1) return false; BrokenUpTy = AT; NumInstances = 1; for (int i = 0, e = AT->getNumElements(); i != e; ++i) { AllocaInst *NA = AllocaBuilder.CreateAlloca( ElTy, nullptr, V->getName() + "." + Twine(i)); Elts.push_back(NA); } } } else if (ElTy->isVectorTy()) { // Skip vector if required. if (!bFlatVector) return false; // for array of vector // split into arrays of scalar VectorType *ElVT = cast<VectorType>(ElTy); BrokenUpTy = ElVT; Elts.reserve(ElVT->getNumElements()); ArrayType *scalarArrayTy = CreateNestArrayTy(ElVT->getElementType(), nestArrayTys); for (int i = 0, e = ElVT->getNumElements(); i != e; ++i) { AllocaInst *NA = AllocaBuilder.CreateAlloca( scalarArrayTy, nullptr, V->getName() + "." + Twine(i)); if (hasPrecise) HLModule::MarkPreciseAttributeWithMetadata(NA); Elts.push_back(NA); } } else // Skip array of basic types. return false; } // Now that we have created the new alloca instructions, rewrite all the // uses of the old alloca. SROA_Helper helper(V, Elts, DeadInsts, typeSys, DL, DT); helper.RewriteForScalarRepl(V, Builder); return true; } static Constant *GetEltInit(Type *Ty, Constant *Init, unsigned idx, Type *EltTy) { if (isa<UndefValue>(Init)) return UndefValue::get(EltTy); if (dyn_cast<StructType>(Ty)) { return Init->getAggregateElement(idx); } else if (dyn_cast<VectorType>(Ty)) { return Init->getAggregateElement(idx); } else { ArrayType *AT = cast<ArrayType>(Ty); ArrayType *EltArrayTy = cast<ArrayType>(EltTy); std::vector<Constant *> Elts; if (!AT->getElementType()->isArrayTy()) { for (unsigned i = 0; i < AT->getNumElements(); i++) { // Get Array[i] Constant *InitArrayElt = Init->getAggregateElement(i); // Get Array[i].idx InitArrayElt = InitArrayElt->getAggregateElement(idx); Elts.emplace_back(InitArrayElt); } return ConstantArray::get(EltArrayTy, Elts); } else { Type *EltTy = AT->getElementType(); ArrayType *NestEltArrayTy = cast<ArrayType>(EltArrayTy->getElementType()); // Nested array. for (unsigned i = 0; i < AT->getNumElements(); i++) { // Get Array[i] Constant *InitArrayElt = Init->getAggregateElement(i); // Get Array[i].idx InitArrayElt = GetEltInit(EltTy, InitArrayElt, idx, NestEltArrayTy); Elts.emplace_back(InitArrayElt); } return ConstantArray::get(EltArrayTy, Elts); } } } unsigned SROA_Helper::GetEltAlign(unsigned ValueAlign, const DataLayout &DL, Type *EltTy, unsigned Offset) { unsigned Alignment = ValueAlign; if (ValueAlign == 0) { // The minimum alignment which users can rely on when the explicit // alignment is omitted or zero is that required by the ABI for this // type. Alignment = DL.getABITypeAlignment(EltTy); } return MinAlign(Alignment, Offset); } /// DoScalarReplacement - Split V into AllocaInsts with Builder and save the new /// AllocaInsts into Elts. Then do SROA on V. bool SROA_Helper::DoScalarReplacement(GlobalVariable *GV, std::vector<Value *> &Elts, IRBuilder<> &Builder, bool bFlatVector, bool hasPrecise, DxilTypeSystem &typeSys, const DataLayout &DL, SmallVector<Value *, 32> &DeadInsts, DominatorTree *DT) { DEBUG(dbgs() << "Found inst to SROA: " << *GV << '\n'); Type *Ty = GV->getType(); // Skip none pointer types. if (!Ty->isPointerTy()) return false; Ty = Ty->getPointerElementType(); // Skip none aggregate types. if (!Ty->isAggregateType() && !bFlatVector) return false; // Skip basic types. if (Ty->isSingleValueType() && !Ty->isVectorTy()) return false; // Skip matrix types. if (HLMatrixType::isa(Ty)) return false; Module *M = GV->getParent(); Constant *Init = GV->hasInitializer() ? GV->getInitializer() : UndefValue::get(Ty); bool isConst = GV->isConstant(); GlobalVariable::ThreadLocalMode TLMode = GV->getThreadLocalMode(); unsigned AddressSpace = GV->getType()->getAddressSpace(); GlobalValue::LinkageTypes linkage = GV->getLinkage(); const unsigned Alignment = GV->getAlignment(); if (StructType *ST = dyn_cast<StructType>(Ty)) { // Skip HLSL object types. if (dxilutil::IsHLSLObjectType(ST)) return false; unsigned numTypes = ST->getNumContainedTypes(); Elts.reserve(numTypes); unsigned Offset = 0; // DxilStructAnnotation *SA = typeSys.GetStructAnnotation(ST); for (int i = 0, e = numTypes; i != e; ++i) { Type *EltTy = ST->getElementType(i); Constant *EltInit = GetEltInit(Ty, Init, i, EltTy); GlobalVariable *EltGV = new llvm::GlobalVariable( *M, ST->getContainedType(i), /*IsConstant*/ isConst, linkage, /*InitVal*/ EltInit, GV->getName() + "." + Twine(i), /*InsertBefore*/ nullptr, TLMode, AddressSpace); EltGV->setAlignment(GetEltAlign(Alignment, DL, EltTy, Offset)); Offset += DL.getTypeAllocSize(EltTy); // DxilFieldAnnotation &FA = SA->GetFieldAnnotation(i); // TODO: set precise. // if (hasPrecise || FA.IsPrecise()) // HLModule::MarkPreciseAttributeWithMetadata(NA); Elts.push_back(EltGV); } } else if (VectorType *VT = dyn_cast<VectorType>(Ty)) { // TODO: support dynamic indexing on vector by change it to array. unsigned numElts = VT->getNumElements(); Elts.reserve(numElts); Type *EltTy = VT->getElementType(); unsigned Offset = 0; // DxilStructAnnotation *SA = typeSys.GetStructAnnotation(ST); for (int i = 0, e = numElts; i != e; ++i) { Constant *EltInit = GetEltInit(Ty, Init, i, EltTy); GlobalVariable *EltGV = new llvm::GlobalVariable( *M, EltTy, /*IsConstant*/ isConst, linkage, /*InitVal*/ EltInit, GV->getName() + "." + Twine(i), /*InsertBefore*/ nullptr, TLMode, AddressSpace); EltGV->setAlignment(GetEltAlign(Alignment, DL, EltTy, Offset)); Offset += DL.getTypeAllocSize(EltTy); // DxilFieldAnnotation &FA = SA->GetFieldAnnotation(i); // TODO: set precise. // if (hasPrecise || FA.IsPrecise()) // HLModule::MarkPreciseAttributeWithMetadata(NA); Elts.push_back(EltGV); } } else { ArrayType *AT = cast<ArrayType>(Ty); if (AT->getNumContainedTypes() == 0) { // Skip case like [0 x %struct]. return false; } Type *ElTy = AT->getElementType(); SmallVector<ArrayType *, 4> nestArrayTys; nestArrayTys.emplace_back(AT); // support multi level of array while (ElTy->isArrayTy()) { ArrayType *ElAT = cast<ArrayType>(ElTy); nestArrayTys.emplace_back(ElAT); ElTy = ElAT->getElementType(); } if (ElTy->isStructTy() && // Skip Matrix and Resource type. !HLMatrixType::isa(ElTy) && !dxilutil::IsHLSLResourceType(ElTy)) { // for array of struct // split into arrays of struct elements StructType *ElST = cast<StructType>(ElTy); unsigned numTypes = ElST->getNumContainedTypes(); Elts.reserve(numTypes); unsigned Offset = 0; // DxilStructAnnotation *SA = typeSys.GetStructAnnotation(ElST); for (int i = 0, e = numTypes; i != e; ++i) { Type *EltTy = CreateNestArrayTy(ElST->getContainedType(i), nestArrayTys); Constant *EltInit = GetEltInit(Ty, Init, i, EltTy); GlobalVariable *EltGV = new llvm::GlobalVariable( *M, EltTy, /*IsConstant*/ isConst, linkage, /*InitVal*/ EltInit, GV->getName() + "." + Twine(i), /*InsertBefore*/ nullptr, TLMode, AddressSpace); EltGV->setAlignment(GetEltAlign(Alignment, DL, EltTy, Offset)); Offset += DL.getTypeAllocSize(EltTy); // DxilFieldAnnotation &FA = SA->GetFieldAnnotation(i); // TODO: set precise. // if (hasPrecise || FA.IsPrecise()) // HLModule::MarkPreciseAttributeWithMetadata(NA); Elts.push_back(EltGV); } } else if (ElTy->isVectorTy()) { // Skip vector if required. if (!bFlatVector) return false; // for array of vector // split into arrays of scalar VectorType *ElVT = cast<VectorType>(ElTy); Elts.reserve(ElVT->getNumElements()); ArrayType *scalarArrayTy = CreateNestArrayTy(ElVT->getElementType(), nestArrayTys); unsigned Offset = 0; for (int i = 0, e = ElVT->getNumElements(); i != e; ++i) { Constant *EltInit = GetEltInit(Ty, Init, i, scalarArrayTy); GlobalVariable *EltGV = new llvm::GlobalVariable( *M, scalarArrayTy, /*IsConstant*/ isConst, linkage, /*InitVal*/ EltInit, GV->getName() + "." + Twine(i), /*InsertBefore*/ nullptr, TLMode, AddressSpace); // TODO: set precise. // if (hasPrecise) // HLModule::MarkPreciseAttributeWithMetadata(NA); EltGV->setAlignment(GetEltAlign(Alignment, DL, scalarArrayTy, Offset)); Offset += DL.getTypeAllocSize(scalarArrayTy); Elts.push_back(EltGV); } } else // Skip array of basic types. return false; } // Now that we have created the new alloca instructions, rewrite all the // uses of the old alloca. SROA_Helper helper(GV, Elts, DeadInsts, typeSys, DL, DT); helper.RewriteForScalarRepl(GV, Builder); return true; } // Replaces uses of constant C in the current function // with V, when those uses are dominated by V. // Returns true if it was completely replaced. static bool ReplaceConstantWithInst(Constant *C, Value *V, IRBuilder<> &Builder) { bool bReplacedAll = true; Function *F = Builder.GetInsertBlock()->getParent(); Instruction *VInst = dyn_cast<Instruction>(V); // Lazily calculate dominance DominatorTree DT; bool Calculated = false; auto Dominates = [&](llvm::Instruction *Def, llvm::Instruction *User) { if (!Calculated) { DT.recalculate(*F); Calculated = true; } return DT.dominates(Def, User); }; for (auto it = C->user_begin(); it != C->user_end();) { User *U = *(it++); if (Instruction *I = dyn_cast<Instruction>(U)) { if (I->getParent()->getParent() != F) continue; if (VInst && Dominates(VInst, I)) I->replaceUsesOfWith(C, V); else bReplacedAll = false; } else { // Skip unused ConstantExpr. if (U->user_empty()) continue; ConstantExpr *CE = cast<ConstantExpr>(U); Instruction *Inst = CE->getAsInstruction(); Builder.Insert(Inst); Inst->replaceUsesOfWith(C, V); if (!ReplaceConstantWithInst(CE, Inst, Builder)) bReplacedAll = false; } } C->removeDeadConstantUsers(); return bReplacedAll; } static void ReplaceUnboundedArrayUses(Value *V, Value *Src) { for (auto it = V->user_begin(); it != V->user_end();) { User *U = *(it++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { SmallVector<Value *, 4> idxList(GEP->idx_begin(), GEP->idx_end()); // Must set the insert point to the GEP itself (instead of the memcpy), // because the indices might not dominate the memcpy. IRBuilder<> Builder(GEP); Value *NewGEP = Builder.CreateGEP(Src, idxList); GEP->replaceAllUsesWith(NewGEP); } else if (BitCastInst *BC = dyn_cast<BitCastInst>(U)) { BC->setOperand(0, Src); } else { DXASSERT(false, "otherwise unbounded array used in unexpected instruction"); } } } static bool IsUnboundedArrayMemcpy(Type *destTy, Type *srcTy) { return (destTy->isArrayTy() && srcTy->isArrayTy()) && (destTy->getArrayNumElements() == 0 || srcTy->getArrayNumElements() == 0); } static bool ArePointersToStructsOfIdenticalLayouts(Type *DstTy, Type *SrcTy) { if (!SrcTy->isPointerTy() || !DstTy->isPointerTy()) return false; DstTy = DstTy->getPointerElementType(); SrcTy = SrcTy->getPointerElementType(); if (!SrcTy->isStructTy() || !DstTy->isStructTy()) return false; StructType *DstST = cast<StructType>(DstTy); StructType *SrcST = cast<StructType>(SrcTy); return SrcST->isLayoutIdentical(DstST); } static std::vector<Value *> GetConstValueIdxList(IRBuilder<> &builder, std::vector<unsigned> idxlist) { std::vector<Value *> idxConstList; for (unsigned idx : idxlist) { idxConstList.push_back(ConstantInt::get(builder.getInt32Ty(), idx)); } return idxConstList; } static void CopyElementsOfStructsWithIdenticalLayout(IRBuilder<> &builder, Value *destPtr, Value *srcPtr, Type *ty, std::vector<unsigned> &idxlist) { if (ty->isStructTy()) { for (unsigned i = 0; i < ty->getStructNumElements(); i++) { idxlist.push_back(i); CopyElementsOfStructsWithIdenticalLayout( builder, destPtr, srcPtr, ty->getStructElementType(i), idxlist); idxlist.pop_back(); } } else if (ty->isArrayTy()) { for (unsigned i = 0; i < ty->getArrayNumElements(); i++) { idxlist.push_back(i); CopyElementsOfStructsWithIdenticalLayout( builder, destPtr, srcPtr, ty->getArrayElementType(), idxlist); idxlist.pop_back(); } } else if (ty->isIntegerTy() || ty->isFloatTy() || ty->isDoubleTy() || ty->isHalfTy() || ty->isVectorTy()) { Value *srcGEP = builder.CreateInBoundsGEP( srcPtr, GetConstValueIdxList(builder, idxlist)); Value *destGEP = builder.CreateInBoundsGEP( destPtr, GetConstValueIdxList(builder, idxlist)); LoadInst *LI = builder.CreateLoad(srcGEP); builder.CreateStore(LI, destGEP); } else { DXASSERT(0, "encountered unsupported type when copying elements of " "identical structs."); } } static void removeLifetimeUsers(Value *V) { std::set<Value *> users(V->users().begin(), V->users().end()); for (Value *U : users) { if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) { II->eraseFromParent(); } } else if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U) || isa<GetElementPtrInst>(U)) { // Recurse into bitcast, addrspacecast, GEP. removeLifetimeUsers(U); if (U->use_empty()) cast<Instruction>(U)->eraseFromParent(); } } } // Conservatively remove all lifetime users of both source and target. // Otherwise, wrong lifetimes could be inherited either way. // TODO: We should be merging the lifetimes. For convenience, just remove them // for now to be safe. static void updateLifetimeForReplacement(Value *From, Value *To) { removeLifetimeUsers(From); removeLifetimeUsers(To); } static bool DominateAllUsers(Instruction *I, Value *V, DominatorTree *DT); namespace { bool replaceScalarArrayGEPWithVectorArrayGEP(User *GEP, Value *VectorArray, IRBuilder<> &Builder, unsigned sizeInDwords) { gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); Value *PtrOffset = GEPIt.getOperand(); ++GEPIt; Value *ArrayIdx = GEPIt.getOperand(); ++GEPIt; ArrayIdx = Builder.CreateAdd(PtrOffset, ArrayIdx); DXASSERT_LOCALVAR(E, GEPIt == E, "invalid gep on scalar array"); unsigned shift = 2; unsigned mask = 0x3; switch (sizeInDwords) { case 2: shift = 1; mask = 1; break; case 1: shift = 2; mask = 0x3; break; default: DXASSERT(0, "invalid scalar size"); break; } Value *VecIdx = Builder.CreateLShr(ArrayIdx, shift); Value *VecPtr = Builder.CreateGEP( VectorArray, {ConstantInt::get(VecIdx->getType(), 0), VecIdx}); Value *CompIdx = Builder.CreateAnd(ArrayIdx, mask); Value *NewGEP = Builder.CreateGEP( VecPtr, {ConstantInt::get(CompIdx->getType(), 0), CompIdx}); if (isa<ConstantExpr>(GEP) && isa<Instruction>(NewGEP)) { if (!ReplaceConstantWithInst(cast<Constant>(GEP), NewGEP, Builder)) { // If new instructions unable to be used, clean them up. if (NewGEP->user_empty()) cast<Instruction>(NewGEP)->eraseFromParent(); if (isa<Instruction>(VecPtr) && VecPtr->user_empty()) cast<Instruction>(VecPtr)->eraseFromParent(); if (isa<Instruction>(CompIdx) && CompIdx->user_empty()) cast<Instruction>(CompIdx)->eraseFromParent(); if (isa<Instruction>(VecIdx) && VecIdx->user_empty()) cast<Instruction>(VecIdx)->eraseFromParent(); return false; } return true; } else { GEP->replaceAllUsesWith(NewGEP); } return true; } bool replaceScalarArrayWithVectorArray(Value *ScalarArray, Value *VectorArray, MemCpyInst *MC, unsigned sizeInDwords) { bool bReplacedAll = true; LLVMContext &Context = ScalarArray->getContext(); // All users should be element type. // Replace users of AI or GV. for (auto it = ScalarArray->user_begin(); it != ScalarArray->user_end();) { User *U = *(it++); if (U->user_empty()) continue; if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { // Avoid replacing the dest of the memcpy to support partial replacement. if (MC->getArgOperand(0) != BCI) BCI->setOperand(0, VectorArray); continue; } if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { IRBuilder<> Builder(Context); // If we need to replace the constant with an instruction, start at the // memcpy, so we replace only users dominated by it. if (isa<Instruction>(VectorArray)) Builder.SetInsertPoint(MC); if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { if (!replaceScalarArrayGEPWithVectorArrayGEP(U, VectorArray, Builder, sizeInDwords)) bReplacedAll = false; } else if (CE->getOpcode() == Instruction::AddrSpaceCast) { Value *NewAddrSpaceCast = Builder.CreateAddrSpaceCast( VectorArray, PointerType::get(VectorArray->getType()->getPointerElementType(), CE->getType()->getPointerAddressSpace())); if (!replaceScalarArrayWithVectorArray(CE, NewAddrSpaceCast, MC, sizeInDwords)) { bReplacedAll = false; if (Instruction *NewInst = dyn_cast<Instruction>(NewAddrSpaceCast)) if (NewInst->user_empty()) NewInst->eraseFromParent(); } } else if (CE->hasOneUse() && CE->user_back() == MC) { continue; } else { DXASSERT(0, "not implemented"); } } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { IRBuilder<> Builder(GEP); if (!replaceScalarArrayGEPWithVectorArrayGEP(U, VectorArray, Builder, sizeInDwords)) bReplacedAll = false; else GEP->eraseFromParent(); } else { DXASSERT(0, "not implemented"); } } return bReplacedAll; } // For pattern like // float4 cb[16]; // float v[64] = cb; bool tryToReplaceCBVec4ArrayToScalarArray(Value *V, Type *TyV, Value *Src, Type *TySrc, MemCpyInst *MC, const DataLayout &DL) { if (!isCBVec4ArrayToScalarArray(TyV, Src, TySrc, DL)) return false; ArrayType *AT = cast<ArrayType>(TyV); Type *EltTy = AT->getElementType(); unsigned sizeInBits = DL.getTypeSizeInBits(EltTy); // Convert array of float4 to array of float. if (replaceScalarArrayWithVectorArray(V, Src, MC, sizeInBits >> 5)) { Value *DstBC = MC->getArgOperand(0); MC->setArgOperand(0, UndefValue::get(MC->getArgOperand(0)->getType())); if (DstBC->user_empty()) { // Replacement won't include the memcpy dest. Now remove that use. if (BitCastInst *BCI = dyn_cast<BitCastInst>(DstBC)) { Value *Dst = BCI->getOperand(0); Type *DstTy = Dst->getType(); if (Dst == V) BCI->setOperand(0, UndefValue::get(DstTy)); else llvm_unreachable("Unexpected dest of memcpy."); } } else { llvm_unreachable("Unexpected users of memcpy bitcast."); } return true; } return false; } } // namespace static bool ReplaceMemcpy(Value *V, Value *Src, MemCpyInst *MC, DxilFieldAnnotation *annotation, DxilTypeSystem &typeSys, const DataLayout &DL, DominatorTree *DT) { // If the only user of the src and dst is the memcpy, // this memcpy was probably produced by splitting another. // Regardless, the goal here is to replace, not remove the memcpy // we won't have enough information to determine if we can do that before // mem2reg if (V != Src && V->hasOneUse() && Src->hasOneUse()) return false; // If the source of the memcpy (Src) doesn't dominate all users of dest (V), // full replacement isn't possible without complicated PHI insertion // This will likely replace with ld/st which will be replaced in mem2reg if (Instruction *SrcI = dyn_cast<Instruction>(Src)) if (!DominateAllUsers(SrcI, V, DT)) return false; Type *TyV = V->getType()->getPointerElementType(); Type *TySrc = Src->getType()->getPointerElementType(); if (Constant *C = dyn_cast<Constant>(V)) { updateLifetimeForReplacement(V, Src); if (TyV == TySrc) { if (isa<Constant>(Src)) { V->replaceAllUsesWith(Src); } else { // Replace Constant with a non-Constant. IRBuilder<> Builder(MC); if (!ReplaceConstantWithInst(C, Src, Builder)) return false; } } else { // Try convert special pattern for cbuffer which copy array of float4 to // array of float. if (!tryToReplaceCBVec4ArrayToScalarArray(V, TyV, Src, TySrc, MC, DL)) { IRBuilder<> Builder(MC); Src = Builder.CreateBitCast(Src, V->getType()); if (!ReplaceConstantWithInst(C, Src, Builder)) return false; } } } else { if (TyV == TySrc) { if (V != Src) { updateLifetimeForReplacement(V, Src); V->replaceAllUsesWith(Src); } } else if (!IsUnboundedArrayMemcpy(TyV, TySrc)) { Value *DestVal = MC->getRawDest(); Value *SrcVal = MC->getRawSource(); if (!isa<BitCastInst>(SrcVal) || !isa<BitCastInst>(DestVal)) { DXASSERT(0, "Encountered unexpected instruction sequence"); return false; } BitCastInst *DestBCI = cast<BitCastInst>(DestVal); BitCastInst *SrcBCI = cast<BitCastInst>(SrcVal); Type *DstTy = DestBCI->getSrcTy(); Type *SrcTy = SrcBCI->getSrcTy(); if (ArePointersToStructsOfIdenticalLayouts(DstTy, SrcTy)) { const DataLayout &DL = SrcBCI->getModule()->getDataLayout(); unsigned SrcSize = DL.getTypeAllocSize( SrcBCI->getOperand(0)->getType()->getPointerElementType()); unsigned MemcpySize = cast<ConstantInt>(MC->getLength())->getZExtValue(); if (SrcSize != MemcpySize) { DXASSERT(0, "Cannot handle partial memcpy"); return false; } if (DestBCI->hasOneUse() && SrcBCI->hasOneUse()) { IRBuilder<> Builder(MC); StructType *srcStTy = cast<StructType>( SrcBCI->getOperand(0)->getType()->getPointerElementType()); std::vector<unsigned> idxlist = {0}; CopyElementsOfStructsWithIdenticalLayout( Builder, DestBCI->getOperand(0), SrcBCI->getOperand(0), srcStTy, idxlist); } } else { if (DstTy == SrcTy) { Value *DstPtr = DestBCI->getOperand(0); Value *SrcPtr = SrcBCI->getOperand(0); if (isa<GEPOperator>(DstPtr) || isa<GEPOperator>(SrcPtr)) { MemcpySplitter::SplitMemCpy(MC, DL, annotation, typeSys); return true; } else { updateLifetimeForReplacement(V, Src); DstPtr->replaceAllUsesWith(SrcPtr); } } else { DXASSERT(0, "Can't handle structs of different layouts"); return false; } } } else { updateLifetimeForReplacement(V, Src); DXASSERT(IsUnboundedArrayMemcpy(TyV, TySrc), "otherwise mismatched types in memcpy are not unbounded array"); ReplaceUnboundedArrayUses(V, Src); } } if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Src)) { // For const GV, if has stored, mark as non-constant. if (GV->isConstant()) { hlutil::PointerStatus PS(GV, 0, /*bLdStOnly*/ true); PS.analyze(typeSys, /*bStructElt*/ false); if (PS.HasStored()) GV->setConstant(false); } } Value *RawDest = MC->getOperand(0); Value *RawSrc = MC->getOperand(1); MC->eraseFromParent(); if (Instruction *I = dyn_cast<Instruction>(RawDest)) { if (I->user_empty()) I->eraseFromParent(); } if (Instruction *I = dyn_cast<Instruction>(RawSrc)) { if (I->user_empty()) I->eraseFromParent(); } return true; } static bool ReplaceUseOfZeroInitEntry(Instruction *I, Value *V) { BasicBlock *BB = I->getParent(); Function *F = I->getParent()->getParent(); for (auto U = V->user_begin(); U != V->user_end();) { Instruction *UI = dyn_cast<Instruction>(*(U++)); if (!UI) continue; if (UI->getParent()->getParent() != F) continue; if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) { if (!ReplaceUseOfZeroInitEntry(I, UI)) return false; else continue; } if (BB != UI->getParent() || UI == I) continue; // I is the last inst in the block after split. // Any inst in current block is before I. if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { // Replace uses of the load with a constant zero. Constant *replacement = Constant::getNullValue(LI->getType()); LI->replaceAllUsesWith(replacement); LI->eraseFromParent(); continue; } return false; } return true; } // If a V user is dominated by memcpy (I), // skip it - memcpy dest can simply alias to src for this user. // If the V user may follow the memcpy (I), // return false - memcpy dest not safe to replace with src. // Otherwise, // replace use with zeroinitializer. static bool ReplaceUseOfZeroInit(Instruction *I, Value *V, DominatorTree &DT, SmallPtrSet<BasicBlock *, 8> &Reachable) { BasicBlock *BB = I->getParent(); Function *F = I->getParent()->getParent(); for (auto U = V->user_begin(); U != V->user_end();) { Instruction *UI = dyn_cast<Instruction>(*(U++)); if (!UI || UI == I) continue; if (UI->getParent()->getParent() != F) continue; // Skip properly dominated users if (DT.properlyDominates(BB, UI->getParent())) continue; // If user is found in memcpy successor list // then the user is not safe to replace with zeroinitializer. if (Reachable.count(UI->getParent())) return false; // Remaining cases are where I: // - is at the end of the same block // - does not precede UI on any path if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) { if (ReplaceUseOfZeroInit(I, UI, DT, Reachable)) continue; } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { // Replace uses of the load with a constant zero. Constant *replacement = Constant::getNullValue(LI->getType()); LI->replaceAllUsesWith(replacement); LI->eraseFromParent(); continue; } return false; } return true; } // Recursively collect all successors of BB and BB's successors. // BB will not be in set unless it's reachable through its successors. static void CollectReachableBBs(BasicBlock *BB, SmallPtrSet<BasicBlock *, 8> &Reachable) { for (auto S : successors(BB)) { if (Reachable.insert(S).second) CollectReachableBBs(S, Reachable); } } // When zero initialized GV has only one define, all uses before the def should // use zero. static bool ReplaceUseOfZeroInitBeforeDef(Instruction *I, GlobalVariable *GV) { BasicBlock *BB = I->getParent(); Function *F = I->getParent()->getParent(); // Make sure I is the last inst for BB. BasicBlock *NewBB = nullptr; if (I != BB->getTerminator()) NewBB = BB->splitBasicBlock(I->getNextNode()); bool bSuccess = false; if (&F->getEntryBlock() == I->getParent()) { bSuccess = ReplaceUseOfZeroInitEntry(I, GV); } else { DominatorTree DT; DT.recalculate(*F); SmallPtrSet<BasicBlock *, 8> Reachable; CollectReachableBBs(BB, Reachable); bSuccess = ReplaceUseOfZeroInit(I, GV, DT, Reachable); } // Re-merge basic block to keep things simpler if (NewBB) llvm::MergeBlockIntoPredecessor(NewBB); return bSuccess; } // Use `DT` to trace all users and make sure `I`'s BB dominates them all static bool DominateAllUsersDom(Instruction *I, Value *V, DominatorTree *DT) { BasicBlock *BB = I->getParent(); Function *F = I->getParent()->getParent(); for (auto U = V->user_begin(); U != V->user_end();) { Instruction *UI = dyn_cast<Instruction>(*(U++)); // If not an instruction or from a differnt function, nothing to check, move // along. if (!UI || UI->getParent()->getParent() != F) continue; if (!DT->dominates(BB, UI->getParent())) return false; if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) { if (!DominateAllUsersDom(I, UI, DT)) return false; } } return true; } // Determine if `I` dominates all the users of `V` static bool DominateAllUsers(Instruction *I, Value *V, DominatorTree *DT) { Function *F = I->getParent()->getParent(); // The Entry Block dominates everything, trivially true if (&F->getEntryBlock() == I->getParent()) return true; if (!DT) { DominatorTree TempDT; TempDT.recalculate(*F); return DominateAllUsersDom(I, V, &TempDT); } else { return DominateAllUsersDom(I, V, DT); } } // Return resource properties if handle value is HLAnnotateHandle static DxilResourceProperties GetResPropsFromHLAnnotateHandle(Value *handle) { if (CallInst *handleCI = dyn_cast<CallInst>(handle)) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(handleCI->getCalledFunction()); if (group == HLOpcodeGroup::HLAnnotateHandle) { Constant *Props = cast<Constant>(handleCI->getArgOperand( HLOperandIndex::kAnnotateHandleResourcePropertiesOpIdx)); return resource_helper::loadPropsFromConstant(*Props); } } return {}; } static bool isReadOnlyResSubscriptOrLoad(CallInst *PtrCI) { hlsl::HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(PtrCI->getCalledFunction()); if (group == HLOpcodeGroup::HLSubscript) { HLSubscriptOpcode opcode = static_cast<HLSubscriptOpcode>(hlsl::GetHLOpcode(PtrCI)); if (opcode == HLSubscriptOpcode::CBufferSubscript) { // Ptr from CBuffer is readonly. return true; } else if (opcode == HLSubscriptOpcode::DefaultSubscript) { DxilResourceProperties RP = GetResPropsFromHLAnnotateHandle( PtrCI->getArgOperand(HLOperandIndex::kSubscriptObjectOpIdx)); if (RP.getResourceClass() == DXIL::ResourceClass::SRV) return true; } } else if (group == HLOpcodeGroup::HLIntrinsic) { IntrinsicOp hlOpcode = (IntrinsicOp)GetHLOpcode(PtrCI); if (hlOpcode == IntrinsicOp::MOP_Load) { // This is templated BAB.Load<UDT>() case. DxilResourceProperties RP = GetResPropsFromHLAnnotateHandle( PtrCI->getArgOperand(HLOperandIndex::kHandleOpIdx)); if (RP.getResourceClass() == DXIL::ResourceClass::SRV) return true; } } return false; } static void collectAllStores(const Value *V, SmallVector<const Instruction *, 4> &Stores) { for (const User *U : V->users()) { if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(U)) { collectAllStores(BC, Stores); } else if (const MemCpyInst *MC = dyn_cast<MemCpyInst>(U)) { if (MC->getRawDest() == V) Stores.emplace_back(MC); } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { collectAllStores(GEP, Stores); } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { Stores.emplace_back(SI); } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { Function *F = CI->getCalledFunction(); if (F->isIntrinsic()) { if (F->getIntrinsicID() == Intrinsic::lifetime_start || F->getIntrinsicID() == Intrinsic::lifetime_end) continue; } HLOpcodeGroup group = hlsl::GetHLOpcodeGroupByName(F); switch (group) { case HLOpcodeGroup::HLMatLoadStore: { HLMatLoadStoreOpcode opcode = static_cast<HLMatLoadStoreOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLMatLoadStoreOpcode::ColMatLoad: case HLMatLoadStoreOpcode::RowMatLoad: break; case HLMatLoadStoreOpcode::ColMatStore: case HLMatLoadStoreOpcode::RowMatStore: Stores.emplace_back(CI); break; default: DXASSERT(0, "invalid opcode"); Stores.emplace_back(CI); break; } } break; case HLOpcodeGroup::HLSubscript: { HLSubscriptOpcode opcode = static_cast<HLSubscriptOpcode>(hlsl::GetHLOpcode(CI)); switch (opcode) { case HLSubscriptOpcode::VectorSubscript: case HLSubscriptOpcode::ColMatElement: case HLSubscriptOpcode::ColMatSubscript: case HLSubscriptOpcode::RowMatElement: case HLSubscriptOpcode::RowMatSubscript: collectAllStores(CI, Stores); break; default: // Rest are resource ptr like buf[i]. // Only read of resource handle. break; } } break; default: { // If not sure its out param or not. Take as out param. Stores.emplace_back(CI); } } } } } // Make sure all store on V dominate I. static bool allStoresDominateInst(Value *V, Instruction *I, DominatorTree *DT) { if (!DT) return false; SmallVector<const Instruction *, 4> Stores; collectAllStores(V, Stores); for (const Instruction *S : Stores) { if (!DT->dominates(S, I)) return false; } return true; } bool SROA_Helper::LowerMemcpy(Value *V, DxilFieldAnnotation *annotation, DxilTypeSystem &typeSys, const DataLayout &DL, DominatorTree *DT, bool bAllowReplace) { Type *Ty = V->getType(); if (!Ty->isPointerTy()) { return false; } // Get access status and collect memcpy uses. // if MemcpyOnce, replace with dest with src if dest is not out param. // else flat memcpy. unsigned size = DL.getTypeAllocSize(Ty->getPointerElementType()); hlutil::PointerStatus PS(V, size, /*bLdStOnly*/ false); const bool bStructElt = false; PS.analyze(typeSys, bStructElt); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { if (GV->hasInitializer() && !isa<UndefValue>(GV->getInitializer())) { if (PS.storedType == hlutil::PointerStatus::StoredType::NotStored) { PS.storedType = hlutil::PointerStatus::StoredType::InitializerStored; } else if (PS.storedType == hlutil::PointerStatus::StoredType::MemcopyDestOnce) { // For single mem store, if the store does not dominate all users. // Mark it as Stored. // In cases like: // struct A { float4 x[25]; }; // A a; // static A a2; // void set(A aa) { aa = a; } // call set inside entry function then use a2. if (isa<ConstantAggregateZero>(GV->getInitializer())) { Instruction *Memcpy = PS.StoringMemcpy; if (!ReplaceUseOfZeroInitBeforeDef(Memcpy, GV)) { PS.storedType = hlutil::PointerStatus::StoredType::Stored; } } } else { PS.storedType = hlutil::PointerStatus::StoredType::Stored; } } } if (bAllowReplace && !PS.HasMultipleAccessingFunctions) { if (PS.storedType == hlutil::PointerStatus::StoredType::MemcopyDestOnce && // Skip argument for input argument has input value, it is not dest once // anymore. !isa<Argument>(V)) { // Replace with src of memcpy. MemCpyInst *MC = PS.StoringMemcpy; if (MC->getSourceAddressSpace() == MC->getDestAddressSpace()) { Value *Src = MC->getOperand(1); // Only remove one level bitcast generated from inline. if (BitCastOperator *BC = dyn_cast<BitCastOperator>(Src)) Src = BC->getOperand(0); if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) { // For GEP, the ptr could have other GEP read/write. // Only scan one GEP is not enough. Value *Ptr = GEP->getPointerOperand(); while (GEPOperator *NestedGEP = dyn_cast<GEPOperator>(Ptr)) Ptr = NestedGEP->getPointerOperand(); if (CallInst *PtrCI = dyn_cast<CallInst>(Ptr)) { if (isReadOnlyResSubscriptOrLoad(PtrCI)) { // Ptr from CBuffer/SRV is safe. if (ReplaceMemcpy(V, Src, MC, annotation, typeSys, DL, DT)) { if (V->user_empty()) return true; return LowerMemcpy(V, annotation, typeSys, DL, DT, bAllowReplace); } } } } else if (!isa<CallInst>(Src)) { // Resource ptr should not be replaced. // Need to make sure src not updated after current memcpy. // Check Src only have 1 store now. // If Src has more than 1 store but only used once by memcpy, check if // the stores dominate the memcpy. hlutil::PointerStatus SrcPS(Src, size, /*bLdStOnly*/ false); SrcPS.analyze(typeSys, bStructElt); if (SrcPS.storedType != hlutil::PointerStatus::StoredType::Stored || (SrcPS.loadedType == hlutil::PointerStatus::LoadedType::MemcopySrcOnce && allStoresDominateInst(Src, MC, DT))) { if (ReplaceMemcpy(V, Src, MC, annotation, typeSys, DL, DT)) { if (V->user_empty()) return true; return LowerMemcpy(V, annotation, typeSys, DL, DT, bAllowReplace); } } } } } else if (PS.loadedType == hlutil::PointerStatus::LoadedType::MemcopySrcOnce) { // Replace dst of memcpy. MemCpyInst *MC = PS.LoadingMemcpy; if (MC->getSourceAddressSpace() == MC->getDestAddressSpace()) { Value *Dest = MC->getOperand(0); // Only remove one level bitcast generated from inline. if (BitCastOperator *BC = dyn_cast<BitCastOperator>(Dest)) Dest = BC->getOperand(0); // For GEP, the ptr could have other GEP read/write. // Only scan one GEP is not enough. // And resource ptr should not be replaced. // Nor should (output) argument ptr be replaced. if (!isa<GEPOperator>(Dest) && !isa<CallInst>(Dest) && !isa<BitCastOperator>(Dest) && !isa<Argument>(Dest)) { // Need to make sure Dest not updated after current memcpy. // Check Dest only have 1 store now. hlutil::PointerStatus DestPS(Dest, size, /*bLdStOnly*/ false); DestPS.analyze(typeSys, bStructElt); if (DestPS.storedType != hlutil::PointerStatus::StoredType::Stored) { if (ReplaceMemcpy(Dest, V, MC, annotation, typeSys, DL, DT)) { // V still needs to be flattened. // Lower memcpy come from Dest. return LowerMemcpy(V, annotation, typeSys, DL, DT, bAllowReplace); } } } } } } for (MemCpyInst *MC : PS.memcpySet) { MemcpySplitter::SplitMemCpy(MC, DL, annotation, typeSys); } return false; } /// MarkEmptyStructUsers - Add instruction related to Empty struct to DeadInsts. void SROA_Helper::MarkEmptyStructUsers(Value *V, SmallVector<Value *, 32> &DeadInsts) { UndefValue *undef = UndefValue::get(V->getType()); for (auto itU = V->user_begin(), E = V->user_end(); itU != E;) { Value *U = *(itU++); // Kill memcpy, set operands to undef for call and ret, and recurse if (MemCpyInst *MC = dyn_cast<MemCpyInst>(U)) { DeadInsts.emplace_back(MC); } else if (CallInst *CI = dyn_cast<CallInst>(U)) { for (auto &operand : CI->operands()) { if (operand == V) operand.set(undef); } } else if (ReturnInst *Ret = dyn_cast<ReturnInst>(U)) { Ret->setOperand(0, undef); } else if (isa<Constant>(U) || isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U)) { // Recurse users MarkEmptyStructUsers(U, DeadInsts); } else { DXASSERT(false, "otherwise, recursing unexpected empty struct user"); } } if (Instruction *I = dyn_cast<Instruction>(V)) { // Only need to add no use inst here. // DeleteDeadInst will delete everything. if (I->user_empty()) DeadInsts.emplace_back(I); } } bool SROA_Helper::IsEmptyStructType(Type *Ty, DxilTypeSystem &typeSys) { if (isa<ArrayType>(Ty)) Ty = Ty->getArrayElementType(); if (StructType *ST = dyn_cast<StructType>(Ty)) { if (!HLMatrixType::isa(Ty)) { DxilStructAnnotation *SA = typeSys.GetStructAnnotation(ST); if (SA && SA->IsEmptyStruct()) return true; } } return false; } // Recursively search all loads and stores of Ptr, and record all the scopes // they are in. static void FindAllScopesOfLoadsAndStores(Value *Ptr, std::unordered_set<MDNode *> *OutScopes) { for (User *U : Ptr->users()) { if (isa<GEPOperator>(U) || isa<BitCastOperator>(U)) { FindAllScopesOfLoadsAndStores(U, OutScopes); continue; } Instruction *I = dyn_cast<Instruction>(U); if (!I) continue; DebugLoc DL = I->getDebugLoc(); if (!DL) continue; if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<MemCpyInst>(I) || isa<CallInst>(I)) // Could be some arbitrary HL op { DILocation *loc = DL.get(); while (loc) { DILocalScope *scope = dyn_cast<DILocalScope>(loc->getScope()); while (scope) { OutScopes->insert(scope); if (auto lexicalScope = dyn_cast<DILexicalBlockBase>(scope)) scope = lexicalScope->getScope(); else if (isa<DISubprogram>(scope)) break; } loc = loc->getInlinedAt(); } } } } //===----------------------------------------------------------------------===// // Delete all dbg.declare instructions for allocas that are never touched in // scopes. This could greatly improve compilation speed and binary size at the // cost of in-scope but unused (part of) variables being invisible in certain // functions. For example: // // struct Context { // float a, b, c; // }; // void bar(inout Context ctx) { // ctx.b = ctx.a * 2; // } // void foo(inout Context ctx) { // ctx.a = 10; // bar(ctx); // } // // float main() : SV_Target { // Context ctx = (Context)0; // foo(ctx); // return ctx.a + ctx.b + ctx.c; // } // // Before running this, shader would generate dbg.declare for members 'a', 'b', // and 'c' for variable 'ctx' in every scope ('main', 'foo', and 'bar'). In // the call stack with 'foo' and 'bar', member 'c' is never used in any way, so // it's a waste to generate dbg.declare for member 'c' for the 'ctx' in scope // 'foo' and scope 'bar', so they can be removed. //===----------------------------------------------------------------------===// static void DeleteOutOfScopeDebugInfo(Function &F) { if (!llvm::hasDebugInfo(*F.getParent())) return; std::unordered_set<MDNode *> Scopes; for (Instruction &I : F.getEntryBlock()) { auto AI = dyn_cast<AllocaInst>(&I); if (!AI) continue; Scopes.clear(); FindAllScopesOfLoadsAndStores(AI, &Scopes); for (auto it = hlsl::dxilutil::mdv_users_begin(AI), end = hlsl::dxilutil::mdv_users_end(AI); it != end;) { User *U = *(it++); if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) { DILocalVariable *var = DDI->getVariable(); DILocalScope *scope = var->getScope(); if (!Scopes.count(scope)) { DDI->eraseFromParent(); } } } } } //===----------------------------------------------------------------------===// // SROA on function parameters. //===----------------------------------------------------------------------===// static void LegalizeDxilInputOutputs(Function *F, DxilFunctionAnnotation *EntryAnnotation, const DataLayout &DL, DxilTypeSystem &typeSys); static void InjectReturnAfterNoReturnPreserveOutput(HLModule &HLM); namespace { class SROA_Parameter_HLSL : public ModulePass { HLModule *m_pHLModule; public: static char ID; // Pass identification, replacement for typeid explicit SROA_Parameter_HLSL() : ModulePass(ID) {} StringRef getPassName() const override { return "SROA Parameter HLSL"; } static void RewriteBitcastWithIdenticalStructs(Function *F); static void RewriteBitcastWithIdenticalStructs(BitCastInst *BCI); static bool DeleteSimpleStoreOnlyAlloca(AllocaInst *AI); static bool IsSimpleStoreOnlyAlloca(AllocaInst *AI); bool runOnModule(Module &M) override { // Patch memcpy to cover case bitcast (gep ptr, 0,0) is transformed into // bitcast ptr. MemcpySplitter::PatchMemCpyWithZeroIdxGEP(M); m_pHLModule = &M.GetOrCreateHLModule(); const DataLayout &DL = M.getDataLayout(); // Load up debug information, to cross-reference values and the instructions // used to load them. m_HasDbgInfo = nullptr != M.getNamedMetadata("llvm.dbg.cu"); InjectReturnAfterNoReturnPreserveOutput(*m_pHLModule); std::deque<Function *> WorkList; std::vector<Function *> DeadHLFunctions; for (Function &F : M.functions()) { HLOpcodeGroup group = GetHLOpcodeGroup(&F); // Skip HL operations. if (group != HLOpcodeGroup::NotHL || group == HLOpcodeGroup::HLExtIntrinsic) { if (F.user_empty()) DeadHLFunctions.emplace_back(&F); continue; } if (F.isDeclaration()) { // Skip llvm intrinsic. if (F.isIntrinsic()) continue; // Skip unused external function. if (F.user_empty()) continue; } // Skip void(void) functions. if (F.getReturnType()->isVoidTy() && F.arg_size() == 0) continue; // Skip library function, except to LegalizeDxilInputOutputs if (&F != m_pHLModule->GetEntryFunction() && !m_pHLModule->IsEntryThatUsesSignatures(&F)) { if (!F.isDeclaration()) LegalizeDxilInputOutputs(&F, m_pHLModule->GetFunctionAnnotation(&F), DL, m_pHLModule->GetTypeSystem()); continue; } WorkList.emplace_back(&F); } // Remove dead hl functions here. // This is for hl functions which has body and always inline. for (Function *F : DeadHLFunctions) { F->eraseFromParent(); } // Preprocess aggregate function param used as function call arg. for (Function *F : WorkList) { preprocessArgUsedInCall(F); } // Process the worklist while (!WorkList.empty()) { Function *F = WorkList.front(); WorkList.pop_front(); RewriteBitcastWithIdenticalStructs(F); createFlattenedFunction(F); } // Replace functions with flattened version when we flat all the functions. for (auto Iter : funcMap) replaceCall(Iter.first, Iter.second); // Update patch constant function. for (Function &F : M.functions()) { if (F.isDeclaration()) continue; if (!m_pHLModule->HasDxilFunctionProps(&F)) continue; DxilFunctionProps &funcProps = m_pHLModule->GetDxilFunctionProps(&F); if (funcProps.shaderKind == DXIL::ShaderKind::Hull) { Function *oldPatchConstantFunc = funcProps.ShaderProps.HS.patchConstantFunc; if (funcMap.count(oldPatchConstantFunc)) m_pHLModule->SetPatchConstantFunctionForHS( &F, funcMap[oldPatchConstantFunc]); } } // Remove flattened functions. for (auto Iter : funcMap) { Function *F = Iter.first; Function *flatF = Iter.second; flatF->takeName(F); F->eraseFromParent(); } // SROA globals and allocas. SROAGlobalAndAllocas(*m_pHLModule, m_HasDbgInfo); // Move up allocas that might have been pushed down by instruction inserts SmallVector<AllocaInst *, 16> simpleStoreOnlyAllocas; for (Function &F : M) { if (F.isDeclaration()) continue; Instruction *insertPt = nullptr; simpleStoreOnlyAllocas.clear(); // SROA only potentially "incorrectly" inserts non-allocas into the entry // block. for (llvm::Instruction &I : F.getEntryBlock()) { // In really pathologically huge shaders, there could be thousands of // unused allocas (and hundreds of thousands of dbg.declares). Record // and remove them now so they don't horrifically slow down the // compilation. if (isa<AllocaInst>(I) && IsSimpleStoreOnlyAlloca(cast<AllocaInst>(&I))) simpleStoreOnlyAllocas.push_back(cast<AllocaInst>(&I)); if (!insertPt) { // Find the first non-alloca to move the allocas above if (!isa<AllocaInst>(I) && !isa<DbgInfoIntrinsic>(I)) insertPt = &I; } else if (isa<AllocaInst>(I)) { // Move any alloca to before the first non-alloca I.moveBefore(insertPt); } } for (AllocaInst *AI : simpleStoreOnlyAllocas) { DeleteSimpleStoreOnlyAlloca(AI); } DeleteOutOfScopeDebugInfo(F); } return true; } private: void DeleteDeadInstructions(); void preprocessArgUsedInCall(Function *F); void moveFunctionBody(Function *F, Function *flatF); void replaceCall(Function *F, Function *flatF); void createFlattenedFunction(Function *F); void flattenArgument(Function *F, Value *Arg, bool bForParam, DxilParameterAnnotation &paramAnnotation, std::vector<Value *> &FlatParamList, std::vector<DxilParameterAnnotation> &FlatRetAnnotationList, BasicBlock *EntryBlock, ArrayRef<DbgDeclareInst *> DDIs); Value *castResourceArgIfRequired(Value *V, Type *Ty, bool bOut, DxilParamInputQual inputQual, IRBuilder<> &Builder); Value *castArgumentIfRequired(Value *V, Type *Ty, bool bOut, DxilParamInputQual inputQual, DxilFieldAnnotation &annotation, IRBuilder<> &Builder, DxilTypeSystem &TypeSys); // Replace use of parameter which changed type when flatten. // Also add information to Arg if required. void replaceCastParameter(Value *NewParam, Value *OldParam, Function &F, Argument *Arg, const DxilParamInputQual inputQual, IRBuilder<> &Builder); void allocateSemanticIndex( std::vector<DxilParameterAnnotation> &FlatAnnotationList, unsigned startArgIndex, llvm::StringMap<Type *> &semanticTypeMap); // static std::vector<Value*> GetConstValueIdxList(IRBuilder<>& builder, // std::vector<unsigned> idxlist); /// DeadInsts - Keep track of instructions we have made dead, so that /// we can remove them after we are done working. SmallVector<Value *, 32> DeadInsts; // Map from orginal function to the flatten version. MapVector<Function *, Function *> funcMap; // Need deterministic order of iteration // Map from original arg/param to flatten cast version. std::unordered_map<Value *, std::pair<Value *, DxilParamInputQual>> castParamMap; // Map form first element of a vector the list of all elements of the vector. std::unordered_map<Value *, SmallVector<Value *, 4>> vectorEltsMap; // Set for row major matrix parameter. std::unordered_set<Value *> castRowMajorParamMap; bool m_HasDbgInfo; }; // When replacing aggregates by its scalar elements, // the first element will preserve the original semantic, // and the subsequent ones will temporarily use this value. // We then run a pass to fix the semantics and properly renumber them // once the aggregate has been fully expanded. // // For example: // struct Foo { float a; float b; }; // void main(Foo foo : TEXCOORD0, float bar : TEXCOORD0) // // Will be expanded to // void main(float a : TEXCOORD0, float b : *, float bar : TEXCOORD0) // // And then fixed up to // void main(float a : TEXCOORD0, float b : TEXCOORD1, float bar : TEXCOORD0) // // (which will later on fail validation due to duplicate semantics). constexpr const char *ContinuedPseudoSemantic = "*"; } // namespace char SROA_Parameter_HLSL::ID = 0; INITIALIZE_PASS(SROA_Parameter_HLSL, "scalarrepl-param-hlsl", "Scalar Replacement of Aggregates HLSL (parameters)", false, false) void SROA_Parameter_HLSL::RewriteBitcastWithIdenticalStructs(Function *F) { if (F->isDeclaration()) return; // Gather list of bitcast involving src and dest structs with identical layout std::vector<BitCastInst *> worklist; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { if (BitCastInst *BCI = dyn_cast<BitCastInst>(&*I)) { Type *DstTy = BCI->getDestTy(); Type *SrcTy = BCI->getSrcTy(); if (ArePointersToStructsOfIdenticalLayouts(DstTy, SrcTy)) worklist.push_back(BCI); } } // Replace bitcast involving src and dest structs with identical layout while (!worklist.empty()) { BitCastInst *BCI = worklist.back(); worklist.pop_back(); RewriteBitcastWithIdenticalStructs(BCI); } } bool SROA_Parameter_HLSL::IsSimpleStoreOnlyAlloca(AllocaInst *AI) { if (!AI->getAllocatedType()->isSingleValueType()) return false; for (User *U : AI->users()) { if (!isa<StoreInst>(U)) return false; } return true; } bool SROA_Parameter_HLSL::DeleteSimpleStoreOnlyAlloca(AllocaInst *AI) { assert(IsSimpleStoreOnlyAlloca(AI)); for (auto it = AI->user_begin(), end = AI->user_end(); it != end;) { StoreInst *Store = cast<StoreInst>(*(it++)); Store->eraseFromParent(); } // Delete dbg.declare's too for (auto it = hlsl::dxilutil::mdv_users_begin(AI), end = hlsl::dxilutil::mdv_users_end(AI); it != end;) { User *U = *(it++); if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) DDI->eraseFromParent(); } AI->eraseFromParent(); return true; } void SROA_Parameter_HLSL::RewriteBitcastWithIdenticalStructs(BitCastInst *BCI) { StructType *srcStTy = cast<StructType>(BCI->getSrcTy()->getPointerElementType()); StructType *destStTy = cast<StructType>(BCI->getDestTy()->getPointerElementType()); Value *srcPtr = BCI->getOperand(0); IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(BCI->getParent()->getParent())); AllocaInst *destPtr = AllocaBuilder.CreateAlloca(destStTy); IRBuilder<> InstBuilder(BCI); std::vector<unsigned> idxlist = {0}; CopyElementsOfStructsWithIdenticalLayout(InstBuilder, destPtr, srcPtr, srcStTy, idxlist); BCI->replaceAllUsesWith(destPtr); BCI->eraseFromParent(); } /// DeleteDeadInstructions - Erase instructions on the DeadInstrs list, /// recursively including all their operands that become trivially dead. void SROA_Parameter_HLSL::DeleteDeadInstructions() { while (!DeadInsts.empty()) { Instruction *I = cast<Instruction>(DeadInsts.pop_back_val()); for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) if (Instruction *U = dyn_cast<Instruction>(*OI)) { // Zero out the operand and see if it becomes trivially dead. // (But, don't add allocas to the dead instruction list -- they are // already on the worklist and will be deleted separately.) *OI = nullptr; if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U)) DeadInsts.push_back(U); } I->eraseFromParent(); } } static DxilFieldAnnotation &GetEltAnnotation(Type *Ty, unsigned idx, DxilFieldAnnotation &annotation, DxilTypeSystem &dxilTypeSys) { while (Ty->isArrayTy()) Ty = Ty->getArrayElementType(); if (StructType *ST = dyn_cast<StructType>(Ty)) { if (HLMatrixType::isa(Ty)) return annotation; DxilStructAnnotation *SA = dxilTypeSys.GetStructAnnotation(ST); if (SA) { DxilFieldAnnotation &FA = SA->GetFieldAnnotation(idx); return FA; } } return annotation; } // Note: Semantic index allocation. // Semantic index is allocated base on linear layout. // For following code /* struct S { float4 m; float4 m2; }; S s[2] : semantic; struct S2 { float4 m[2]; float4 m2[2]; }; S2 s2 : semantic; */ // The semantic index is like this: // s[0].m : semantic0 // s[0].m2 : semantic1 // s[1].m : semantic2 // s[1].m2 : semantic3 // s2.m[0] : semantic0 // s2.m[1] : semantic1 // s2.m2[0] : semantic2 // s2.m2[1] : semantic3 // But when flatten argument, the result is like this: // float4 s_m[2], float4 s_m2[2]. // float4 s2_m[2], float4 s2_m2[2]. // To do the allocation, need to map from each element to its flattened // argument. Say arg index of float4 s_m[2] is 0, float4 s_m2[2] is 1. Need to // get 0 from s[0].m and s[1].m, get 1 from s[0].m2 and s[1].m2. // Allocate the argments with same semantic string from type where the // semantic starts( S2 for s2.m[2] and s2.m2[2]). // Iterate each elements of the type, save the semantic index and update it. // The map from element to the arg ( s[0].m2 -> s.m2[2]) is done by argIdx. // ArgIdx only inc by 1 when finish a struct field. static unsigned AllocateSemanticIndex( Type *Ty, unsigned &semIndex, unsigned argIdx, unsigned endArgIdx, std::vector<DxilParameterAnnotation> &FlatAnnotationList) { if (Ty->isPointerTy()) { return AllocateSemanticIndex(Ty->getPointerElementType(), semIndex, argIdx, endArgIdx, FlatAnnotationList); } else if (Ty->isArrayTy()) { unsigned arraySize = Ty->getArrayNumElements(); unsigned updatedArgIdx = argIdx; Type *EltTy = Ty->getArrayElementType(); for (unsigned i = 0; i < arraySize; i++) { updatedArgIdx = AllocateSemanticIndex(EltTy, semIndex, argIdx, endArgIdx, FlatAnnotationList); } return updatedArgIdx; } else if (Ty->isStructTy() && !HLMatrixType::isa(Ty)) { unsigned fieldsCount = Ty->getStructNumElements(); for (unsigned i = 0; i < fieldsCount; i++) { Type *EltTy = Ty->getStructElementType(i); argIdx = AllocateSemanticIndex(EltTy, semIndex, argIdx, endArgIdx, FlatAnnotationList); // Unwrap array types when checking whether this is a leaf node, // otherwise, array of struct will be misinterpreted as a leaf node. while (EltTy->isArrayTy()) EltTy = EltTy->getArrayElementType(); if (!(EltTy->isStructTy() && !HLMatrixType::isa(EltTy))) { // Update argIdx only when it is a leaf node. argIdx++; } } return argIdx; } else { DXASSERT(argIdx < endArgIdx, "arg index out of bound"); DxilParameterAnnotation &paramAnnotation = FlatAnnotationList[argIdx]; // Get element size. unsigned rows = 1; if (paramAnnotation.HasMatrixAnnotation()) { const DxilMatrixAnnotation &matrix = paramAnnotation.GetMatrixAnnotation(); if (matrix.Orientation == MatrixOrientation::RowMajor) { rows = matrix.Rows; } else { DXASSERT_NOMSG(matrix.Orientation == MatrixOrientation::ColumnMajor); rows = matrix.Cols; } } // Save semIndex. for (unsigned i = 0; i < rows; i++) paramAnnotation.AppendSemanticIndex(semIndex + i); // Update semIndex. semIndex += rows; return argIdx; } } void SROA_Parameter_HLSL::allocateSemanticIndex( std::vector<DxilParameterAnnotation> &FlatAnnotationList, unsigned startArgIndex, llvm::StringMap<Type *> &semanticTypeMap) { unsigned endArgIndex = FlatAnnotationList.size(); // Allocate semantic index. for (unsigned i = startArgIndex; i < endArgIndex; ++i) { // Group by semantic names. DxilParameterAnnotation &flatParamAnnotation = FlatAnnotationList[i]; const std::string &semantic = flatParamAnnotation.GetSemanticString(); // If semantic is undefined, an error will be emitted elsewhere. For now, // we should avoid asserting. if (semantic.empty()) continue; StringRef baseSemName; // The 'FOO' in 'FOO1'. uint32_t semIndex; // The '1' in 'FOO1' // Split semName and index. Semantic::DecomposeNameAndIndex(semantic, &baseSemName, &semIndex); unsigned semGroupEnd = i + 1; while (semGroupEnd < endArgIndex && FlatAnnotationList[semGroupEnd].GetSemanticString() == ContinuedPseudoSemantic) { FlatAnnotationList[semGroupEnd].SetSemanticString(baseSemName); ++semGroupEnd; } DXASSERT(semanticTypeMap.count(semantic) > 0, "Must has semantic type"); Type *semanticTy = semanticTypeMap[semantic]; AllocateSemanticIndex(semanticTy, semIndex, /*argIdx*/ i, /*endArgIdx*/ semGroupEnd, FlatAnnotationList); // Update i. i = semGroupEnd - 1; } } // // Cast parameters. // static void CopyHandleToResourcePtr(Value *Handle, Value *ResPtr, HLModule &HLM, IRBuilder<> &Builder) { // Cast it to resource. Type *ResTy = ResPtr->getType()->getPointerElementType(); Value *Res = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToResCast, ResTy, {Handle}, *HLM.GetModule()); // Store casted resource to OldArg. Builder.CreateStore(Res, ResPtr); } static void CopyHandlePtrToResourcePtr(Value *HandlePtr, Value *ResPtr, HLModule &HLM, IRBuilder<> &Builder) { // Load the handle. Value *Handle = Builder.CreateLoad(HandlePtr); CopyHandleToResourcePtr(Handle, ResPtr, HLM, Builder); } static Value *CastResourcePtrToHandle(Value *Res, Type *HandleTy, HLModule &HLM, IRBuilder<> &Builder) { // Load OldArg. Value *LdRes = Builder.CreateLoad(Res); Value *Handle = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLCreateHandle, /*opcode*/ 0, HandleTy, {LdRes}, *HLM.GetModule()); return Handle; } static void CopyResourcePtrToHandlePtr(Value *Res, Value *HandlePtr, HLModule &HLM, IRBuilder<> &Builder) { Type *HandleTy = HandlePtr->getType()->getPointerElementType(); Value *Handle = CastResourcePtrToHandle(Res, HandleTy, HLM, Builder); Builder.CreateStore(Handle, HandlePtr); } static void CopyVectorPtrToEltsPtr(Value *VecPtr, ArrayRef<Value *> elts, unsigned vecSize, IRBuilder<> &Builder) { Value *Vec = Builder.CreateLoad(VecPtr); for (unsigned i = 0; i < vecSize; i++) { Value *Elt = Builder.CreateExtractElement(Vec, i); Builder.CreateStore(Elt, elts[i]); } } static void CopyEltsPtrToVectorPtr(ArrayRef<Value *> elts, Value *VecPtr, Type *VecTy, unsigned vecSize, IRBuilder<> &Builder) { Value *Vec = UndefValue::get(VecTy); for (unsigned i = 0; i < vecSize; i++) { Value *Elt = Builder.CreateLoad(elts[i]); Vec = Builder.CreateInsertElement(Vec, Elt, i); } Builder.CreateStore(Vec, VecPtr); } static void CopyMatToArrayPtr(Value *Mat, Value *ArrayPtr, unsigned arrayBaseIdx, HLModule &HLM, IRBuilder<> &Builder, bool bRowMajor) { // Mat val is row major. HLMatrixType MatTy = HLMatrixType::cast(Mat->getType()); Type *VecTy = MatTy.getLoweredVectorTypeForReg(); Value *Vec = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::RowMatrixToVecCast, VecTy, {Mat}, *HLM.GetModule()); Value *zero = Builder.getInt32(0); for (unsigned r = 0; r < MatTy.getNumRows(); r++) { for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { unsigned matIdx = MatTy.getColumnMajorIndex(r, c); Value *Elt = Builder.CreateExtractElement(Vec, matIdx); Value *Ptr = Builder.CreateInBoundsGEP( ArrayPtr, {zero, Builder.getInt32(arrayBaseIdx + matIdx)}); Builder.CreateStore(Elt, Ptr); } } } static void CopyMatPtrToArrayPtr(Value *MatPtr, Value *ArrayPtr, unsigned arrayBaseIdx, HLModule &HLM, IRBuilder<> &Builder, bool bRowMajor) { Type *Ty = MatPtr->getType()->getPointerElementType(); Value *Mat = nullptr; if (bRowMajor) { Mat = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLMatLoadStore, (unsigned)HLMatLoadStoreOpcode::RowMatLoad, Ty, {MatPtr}, *HLM.GetModule()); } else { Mat = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLMatLoadStore, (unsigned)HLMatLoadStoreOpcode::ColMatLoad, Ty, {MatPtr}, *HLM.GetModule()); // Matrix value should be row major. Mat = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::ColMatrixToRowMatrix, Ty, {Mat}, *HLM.GetModule()); } CopyMatToArrayPtr(Mat, ArrayPtr, arrayBaseIdx, HLM, Builder, bRowMajor); } static Value *LoadArrayPtrToMat(Value *ArrayPtr, unsigned arrayBaseIdx, Type *Ty, HLModule &HLM, IRBuilder<> &Builder, bool bRowMajor) { HLMatrixType MatTy = HLMatrixType::cast(Ty); // HLInit operands are in row major. SmallVector<Value *, 16> Elts; Value *zero = Builder.getInt32(0); for (unsigned r = 0; r < MatTy.getNumRows(); r++) { for (unsigned c = 0; c < MatTy.getNumColumns(); c++) { unsigned matIdx = bRowMajor ? MatTy.getRowMajorIndex(r, c) : MatTy.getColumnMajorIndex(r, c); Value *Ptr = Builder.CreateInBoundsGEP( ArrayPtr, {zero, Builder.getInt32(arrayBaseIdx + matIdx)}); Value *Elt = Builder.CreateLoad(Ptr); Elts.emplace_back(Elt); } } return HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLInit, /*opcode*/ 0, Ty, {Elts}, *HLM.GetModule()); } static void CopyArrayPtrToMatPtr(Value *ArrayPtr, unsigned arrayBaseIdx, Value *MatPtr, HLModule &HLM, IRBuilder<> &Builder, bool bRowMajor) { Type *Ty = MatPtr->getType()->getPointerElementType(); Value *Mat = LoadArrayPtrToMat(ArrayPtr, arrayBaseIdx, Ty, HLM, Builder, bRowMajor); if (bRowMajor) { HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLMatLoadStore, (unsigned)HLMatLoadStoreOpcode::RowMatStore, Ty, {MatPtr, Mat}, *HLM.GetModule()); } else { // Mat is row major. // Cast it to col major before store. Mat = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::RowMatrixToColMatrix, Ty, {Mat}, *HLM.GetModule()); HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLMatLoadStore, (unsigned)HLMatLoadStoreOpcode::ColMatStore, Ty, {MatPtr, Mat}, *HLM.GetModule()); } } using CopyFunctionTy = void(Value *FromPtr, Value *ToPtr, HLModule &HLM, Type *HandleTy, IRBuilder<> &Builder, bool bRowMajor); static void CastCopyArrayMultiDimTo1Dim(Value *FromArray, Value *ToArray, Type *CurFromTy, std::vector<Value *> &idxList, unsigned calcIdx, Type *HandleTy, HLModule &HLM, IRBuilder<> &Builder, CopyFunctionTy CastCopyFn, bool bRowMajor) { if (CurFromTy->isVectorTy()) { // Copy vector to array. Value *FromPtr = Builder.CreateInBoundsGEP(FromArray, idxList); Value *V = Builder.CreateLoad(FromPtr); unsigned vecSize = CurFromTy->getVectorNumElements(); Value *zeroIdx = Builder.getInt32(0); for (unsigned i = 0; i < vecSize; i++) { Value *ToPtr = Builder.CreateInBoundsGEP( ToArray, {zeroIdx, Builder.getInt32(calcIdx++)}); Value *Elt = Builder.CreateExtractElement(V, i); Builder.CreateStore(Elt, ToPtr); } } else if (HLMatrixType MatTy = HLMatrixType::dyn_cast(CurFromTy)) { // Copy matrix to array. // Calculate the offset. unsigned offset = calcIdx * MatTy.getNumElements(); Value *FromPtr = Builder.CreateInBoundsGEP(FromArray, idxList); CopyMatPtrToArrayPtr(FromPtr, ToArray, offset, HLM, Builder, bRowMajor); } else if (!CurFromTy->isArrayTy()) { Value *FromPtr = Builder.CreateInBoundsGEP(FromArray, idxList); Value *ToPtr = Builder.CreateInBoundsGEP( ToArray, {Builder.getInt32(0), Builder.getInt32(calcIdx)}); CastCopyFn(FromPtr, ToPtr, HLM, HandleTy, Builder, bRowMajor); } else { unsigned size = CurFromTy->getArrayNumElements(); Type *FromEltTy = CurFromTy->getArrayElementType(); for (unsigned i = 0; i < size; i++) { idxList.push_back(Builder.getInt32(i)); unsigned idx = calcIdx * size + i; CastCopyArrayMultiDimTo1Dim(FromArray, ToArray, FromEltTy, idxList, idx, HandleTy, HLM, Builder, CastCopyFn, bRowMajor); idxList.pop_back(); } } } static void CastCopyArray1DimToMultiDim(Value *FromArray, Value *ToArray, Type *CurToTy, std::vector<Value *> &idxList, unsigned calcIdx, Type *HandleTy, HLModule &HLM, IRBuilder<> &Builder, CopyFunctionTy CastCopyFn, bool bRowMajor) { if (CurToTy->isVectorTy()) { // Copy array to vector. Value *V = UndefValue::get(CurToTy); unsigned vecSize = CurToTy->getVectorNumElements(); // Calculate the offset. unsigned offset = calcIdx * vecSize; Value *zeroIdx = Builder.getInt32(0); Value *ToPtr = Builder.CreateInBoundsGEP(ToArray, idxList); for (unsigned i = 0; i < vecSize; i++) { Value *FromPtr = Builder.CreateInBoundsGEP( FromArray, {zeroIdx, Builder.getInt32(offset++)}); Value *Elt = Builder.CreateLoad(FromPtr); V = Builder.CreateInsertElement(V, Elt, i); } Builder.CreateStore(V, ToPtr); } else if (HLMatrixType MatTy = HLMatrixType::cast(CurToTy)) { // Copy array to matrix. // Calculate the offset. unsigned offset = calcIdx * MatTy.getNumElements(); Value *ToPtr = Builder.CreateInBoundsGEP(ToArray, idxList); CopyArrayPtrToMatPtr(FromArray, offset, ToPtr, HLM, Builder, bRowMajor); } else if (!CurToTy->isArrayTy()) { Value *FromPtr = Builder.CreateInBoundsGEP( FromArray, {Builder.getInt32(0), Builder.getInt32(calcIdx)}); Value *ToPtr = Builder.CreateInBoundsGEP(ToArray, idxList); CastCopyFn(FromPtr, ToPtr, HLM, HandleTy, Builder, bRowMajor); } else { unsigned size = CurToTy->getArrayNumElements(); Type *ToEltTy = CurToTy->getArrayElementType(); for (unsigned i = 0; i < size; i++) { idxList.push_back(Builder.getInt32(i)); unsigned idx = calcIdx * size + i; CastCopyArray1DimToMultiDim(FromArray, ToArray, ToEltTy, idxList, idx, HandleTy, HLM, Builder, CastCopyFn, bRowMajor); idxList.pop_back(); } } } static void CastCopyOldPtrToNewPtr(Value *OldPtr, Value *NewPtr, HLModule &HLM, Type *HandleTy, IRBuilder<> &Builder, bool bRowMajor) { Type *NewTy = NewPtr->getType()->getPointerElementType(); Type *OldTy = OldPtr->getType()->getPointerElementType(); if (NewTy == HandleTy) { CopyResourcePtrToHandlePtr(OldPtr, NewPtr, HLM, Builder); } else if (OldTy->isVectorTy()) { // Copy vector to array. Value *V = Builder.CreateLoad(OldPtr); unsigned vecSize = OldTy->getVectorNumElements(); Value *zeroIdx = Builder.getInt32(0); for (unsigned i = 0; i < vecSize; i++) { Value *EltPtr = Builder.CreateGEP(NewPtr, {zeroIdx, Builder.getInt32(i)}); Value *Elt = Builder.CreateExtractElement(V, i); Builder.CreateStore(Elt, EltPtr); } } else if (HLMatrixType::isa(OldTy)) { CopyMatPtrToArrayPtr(OldPtr, NewPtr, /*arrayBaseIdx*/ 0, HLM, Builder, bRowMajor); } else if (OldTy->isArrayTy()) { std::vector<Value *> idxList; idxList.emplace_back(Builder.getInt32(0)); CastCopyArrayMultiDimTo1Dim(OldPtr, NewPtr, OldTy, idxList, /*calcIdx*/ 0, HandleTy, HLM, Builder, CastCopyOldPtrToNewPtr, bRowMajor); } } static void CastCopyNewPtrToOldPtr(Value *NewPtr, Value *OldPtr, HLModule &HLM, Type *HandleTy, IRBuilder<> &Builder, bool bRowMajor) { Type *NewTy = NewPtr->getType()->getPointerElementType(); Type *OldTy = OldPtr->getType()->getPointerElementType(); if (NewTy == HandleTy) { CopyHandlePtrToResourcePtr(NewPtr, OldPtr, HLM, Builder); } else if (OldTy->isVectorTy()) { // Copy array to vector. Value *V = UndefValue::get(OldTy); unsigned vecSize = OldTy->getVectorNumElements(); Value *zeroIdx = Builder.getInt32(0); for (unsigned i = 0; i < vecSize; i++) { Value *EltPtr = Builder.CreateGEP(NewPtr, {zeroIdx, Builder.getInt32(i)}); Value *Elt = Builder.CreateLoad(EltPtr); V = Builder.CreateInsertElement(V, Elt, i); } Builder.CreateStore(V, OldPtr); } else if (HLMatrixType::isa(OldTy)) { CopyArrayPtrToMatPtr(NewPtr, /*arrayBaseIdx*/ 0, OldPtr, HLM, Builder, bRowMajor); } else if (OldTy->isArrayTy()) { std::vector<Value *> idxList; idxList.emplace_back(Builder.getInt32(0)); CastCopyArray1DimToMultiDim(NewPtr, OldPtr, OldTy, idxList, /*calcIdx*/ 0, HandleTy, HLM, Builder, CastCopyNewPtrToOldPtr, bRowMajor); } } void SROA_Parameter_HLSL::replaceCastParameter( Value *NewParam, Value *OldParam, Function &F, Argument *Arg, const DxilParamInputQual inputQual, IRBuilder<> &Builder) { Type *HandleTy = m_pHLModule->GetOP()->GetHandleType(); Type *NewTy = NewParam->getType(); Type *OldTy = OldParam->getType(); bool bIn = inputQual == DxilParamInputQual::Inout || inputQual == DxilParamInputQual::In; bool bOut = inputQual == DxilParamInputQual::Inout || inputQual == DxilParamInputQual::Out; // Make sure InsertPoint after OldParam inst. if (Instruction *I = dyn_cast<Instruction>(OldParam)) { Builder.SetInsertPoint(I->getNextNode()); } SmallVector<DbgDeclareInst *, 4> dbgDecls; llvm::FindAllocaDbgDeclare(OldParam, dbgDecls); for (DbgDeclareInst *DDI : dbgDecls) { // Add debug info to new param. DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); DIExpression *DDIExp = DDI->getExpression(); DIB.insertDeclare(NewParam, DDI->getVariable(), DDIExp, DDI->getDebugLoc(), Builder.GetInsertPoint()); } if (isa<Argument>(OldParam) && OldTy->isPointerTy()) { // OldParam will be removed with Old function. // Create alloca to replace it. IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(&F)); Value *AllocParam = AllocaBuilder.CreateAlloca(OldTy->getPointerElementType()); OldParam->replaceAllUsesWith(AllocParam); OldParam = AllocParam; } if (NewTy == HandleTy) { CopyHandleToResourcePtr(NewParam, OldParam, *m_pHLModule, Builder); } else if (vectorEltsMap.count(NewParam)) { // Vector is flattened to scalars. Type *VecTy = OldTy; if (VecTy->isPointerTy()) VecTy = VecTy->getPointerElementType(); // Flattened vector. SmallVector<Value *, 4> &elts = vectorEltsMap[NewParam]; unsigned vecSize = elts.size(); if (NewTy->isPointerTy()) { if (bIn) { // Copy NewParam to OldParam at entry. CopyEltsPtrToVectorPtr(elts, OldParam, VecTy, vecSize, Builder); } // bOut must be true here. // Store the OldParam to NewParam before every return. for (auto &BB : F.getBasicBlockList()) { if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) { IRBuilder<> RetBuilder(RI); CopyVectorPtrToEltsPtr(OldParam, elts, vecSize, RetBuilder); } } } else { // Must be in parameter. // Copy NewParam to OldParam at entry. Value *Vec = UndefValue::get(VecTy); for (unsigned i = 0; i < vecSize; i++) { Vec = Builder.CreateInsertElement(Vec, elts[i], i); } if (OldTy->isPointerTy()) { Builder.CreateStore(Vec, OldParam); } else { OldParam->replaceAllUsesWith(Vec); } } // Don't need elts anymore. vectorEltsMap.erase(NewParam); } else if (!NewTy->isPointerTy()) { // Ptr param is cast to non-ptr param. // Must be in param. // Store NewParam to OldParam at entry. Builder.CreateStore(NewParam, OldParam); } else if (HLMatrixType::isa(OldTy)) { bool bRowMajor = castRowMajorParamMap.count(NewParam); Value *Mat = LoadArrayPtrToMat(NewParam, /*arrayBaseIdx*/ 0, OldTy, *m_pHLModule, Builder, bRowMajor); OldParam->replaceAllUsesWith(Mat); } else { bool bRowMajor = castRowMajorParamMap.count(NewParam); // NewTy is pointer type. if (bIn) { // Copy NewParam to OldParam at entry. CastCopyNewPtrToOldPtr(NewParam, OldParam, *m_pHLModule, HandleTy, Builder, bRowMajor); } if (bOut) { // Store the OldParam to NewParam before every return. for (auto &BB : F.getBasicBlockList()) { if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) { IRBuilder<> RetBuilder(RI); CastCopyOldPtrToNewPtr(OldParam, NewParam, *m_pHLModule, HandleTy, RetBuilder, bRowMajor); } } } } } Value * SROA_Parameter_HLSL::castResourceArgIfRequired(Value *V, Type *Ty, bool bOut, DxilParamInputQual inputQual, IRBuilder<> &Builder) { Type *HandleTy = m_pHLModule->GetOP()->GetHandleType(); Module &M = *m_pHLModule->GetModule(); IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(Builder.GetInsertPoint())); // Lower resource type to handle ty. if (dxilutil::IsHLSLResourceType(Ty)) { Value *Res = V; if (!bOut) { Value *LdRes = Builder.CreateLoad(Res); V = m_pHLModule->EmitHLOperationCall(Builder, HLOpcodeGroup::HLCreateHandle, /*opcode*/ 0, HandleTy, {LdRes}, M); } else { V = AllocaBuilder.CreateAlloca(HandleTy); } castParamMap[V] = std::make_pair(Res, inputQual); } else if (Ty->isArrayTy()) { unsigned arraySize = 1; Type *AT = Ty; while (AT->isArrayTy()) { arraySize *= AT->getArrayNumElements(); AT = AT->getArrayElementType(); } if (dxilutil::IsHLSLResourceType(AT)) { Value *Res = V; Type *Ty = ArrayType::get(HandleTy, arraySize); V = AllocaBuilder.CreateAlloca(Ty); castParamMap[V] = std::make_pair(Res, inputQual); } } return V; } Value *SROA_Parameter_HLSL::castArgumentIfRequired( Value *V, Type *Ty, bool bOut, DxilParamInputQual inputQual, DxilFieldAnnotation &annotation, IRBuilder<> &Builder, DxilTypeSystem &TypeSys) { Module &M = *m_pHLModule->GetModule(); IRBuilder<> AllocaBuilder( dxilutil::FindAllocaInsertionPt(Builder.GetInsertPoint())); if (inputQual == DxilParamInputQual::InPayload) { DXASSERT_NOMSG(isa<StructType>(Ty)); // Lower payload type here StructType *LoweredTy = GetLoweredUDT(cast<StructType>(Ty), &TypeSys); if (LoweredTy != Ty) { Value *Ptr = AllocaBuilder.CreateAlloca(LoweredTy); ReplaceUsesForLoweredUDT(V, Ptr); castParamMap[V] = std::make_pair(Ptr, inputQual); V = Ptr; } return V; } // Remove pointer for vector/scalar which is not out. if (V->getType()->isPointerTy() && !Ty->isAggregateType() && !bOut) { Value *Ptr = AllocaBuilder.CreateAlloca(Ty); V->replaceAllUsesWith(Ptr); // Create load here to make correct type. // The Ptr will be store with correct value in replaceCastParameter. if (Ptr->hasOneUse()) { // Load after existing user for call arg replace. // If not, call arg will load undef. // This will not hurt parameter, new load is only after first load. // It still before all the load users. Instruction *User = cast<Instruction>(*(Ptr->user_begin())); IRBuilder<> CallBuilder(User->getNextNode()); V = CallBuilder.CreateLoad(Ptr); } else { V = Builder.CreateLoad(Ptr); } castParamMap[V] = std::make_pair(Ptr, inputQual); } V = castResourceArgIfRequired(V, Ty, bOut, inputQual, Builder); // Entry function matrix value parameter has major. // Make sure its user use row major matrix value. bool updateToColMajor = annotation.HasMatrixAnnotation() && annotation.GetMatrixAnnotation().Orientation == MatrixOrientation::ColumnMajor; if (updateToColMajor) { if (V->getType()->isPointerTy()) { for (User *user : V->users()) { CallInst *CI = dyn_cast<CallInst>(user); if (!CI) continue; HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group != HLOpcodeGroup::HLMatLoadStore) continue; HLMatLoadStoreOpcode opcode = static_cast<HLMatLoadStoreOpcode>(GetHLOpcode(CI)); Type *opcodeTy = Builder.getInt32Ty(); switch (opcode) { case HLMatLoadStoreOpcode::RowMatLoad: { // Update matrix function opcode to col major version. Value *rowOpArg = ConstantInt::get( opcodeTy, static_cast<unsigned>(HLMatLoadStoreOpcode::ColMatLoad)); CI->setOperand(HLOperandIndex::kOpcodeIdx, rowOpArg); // Cast it to row major. CallInst *RowMat = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::ColMatrixToRowMatrix, Ty, {CI}, M); CI->replaceAllUsesWith(RowMat); // Set arg to CI again. RowMat->setArgOperand(HLOperandIndex::kUnaryOpSrc0Idx, CI); } break; case HLMatLoadStoreOpcode::RowMatStore: // Update matrix function opcode to col major version. Value *rowOpArg = ConstantInt::get( opcodeTy, static_cast<unsigned>(HLMatLoadStoreOpcode::ColMatStore)); CI->setOperand(HLOperandIndex::kOpcodeIdx, rowOpArg); Value *Mat = CI->getArgOperand(HLOperandIndex::kMatStoreValOpIdx); // Cast it to col major. CallInst *RowMat = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::RowMatrixToColMatrix, Ty, {Mat}, M); CI->setArgOperand(HLOperandIndex::kMatStoreValOpIdx, RowMat); break; } } } else { CallInst *RowMat = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::ColMatrixToRowMatrix, Ty, {V}, M); V->replaceAllUsesWith(RowMat); // Set arg to V again. RowMat->setArgOperand(HLOperandIndex::kUnaryOpSrc0Idx, V); } } return V; } struct AnnotatedValue { llvm::Value *Value; DxilFieldAnnotation Annotation; }; void SROA_Parameter_HLSL::flattenArgument( Function *F, Value *Arg, bool bForParam, DxilParameterAnnotation &paramAnnotation, std::vector<Value *> &FlatParamList, std::vector<DxilParameterAnnotation> &FlatAnnotationList, BasicBlock *EntryBlock, ArrayRef<DbgDeclareInst *> DDIs) { std::deque<AnnotatedValue> WorkList; WorkList.push_back({Arg, paramAnnotation}); unsigned startArgIndex = FlatAnnotationList.size(); DxilTypeSystem &dxilTypeSys = m_pHLModule->GetTypeSystem(); const std::string &semantic = paramAnnotation.GetSemanticString(); DxilParamInputQual inputQual = paramAnnotation.GetParamInputQual(); bool bOut = inputQual == DxilParamInputQual::Out || inputQual == DxilParamInputQual::Inout || inputQual == DxilParamInputQual::OutStream0 || inputQual == DxilParamInputQual::OutStream1 || inputQual == DxilParamInputQual::OutStream2 || inputQual == DxilParamInputQual::OutStream3; // Map from semantic string to type. llvm::StringMap<Type *> semanticTypeMap; // Original semantic type. if (!semantic.empty()) { // Unwrap top-level array if primitive if (inputQual == DxilParamInputQual::InputPatch || inputQual == DxilParamInputQual::OutputPatch || inputQual == DxilParamInputQual::InputPrimitive) { Type *Ty = Arg->getType(); if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); if (Ty->isArrayTy()) semanticTypeMap[semantic] = Ty->getArrayElementType(); } else { semanticTypeMap[semantic] = Arg->getType(); } } std::vector<Instruction *> deadAllocas; DIBuilder DIB(*F->getParent(), /*AllowUnresolved*/ false); unsigned debugOffset = 0; const DataLayout &DL = F->getParent()->getDataLayout(); // Process the worklist while (!WorkList.empty()) { AnnotatedValue AV = WorkList.front(); WorkList.pop_front(); // Do not skip unused parameter. Value *V = AV.Value; DxilFieldAnnotation &annotation = AV.Annotation; // We can never replace memcpy for arguments because they have an implicit // first memcpy that happens from argument passing, and pointer analysis // will not reveal that, especially if we've done a first SROA pass on V. // No DomTree needed for that reason const bool bAllowReplace = false; SROA_Helper::LowerMemcpy(V, &annotation, dxilTypeSys, DL, nullptr /*DT */, bAllowReplace); // Now is safe to create the IRBuilder. // If we create it before LowerMemcpy, the insertion pointer instruction may // get deleted IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(EntryBlock)); std::vector<Value *> Elts; // Not flat vector for entry function currently. bool SROAed = false; Type *BrokenUpTy = nullptr; uint64_t NumInstances = 1; if (inputQual != DxilParamInputQual::InPayload) { // DomTree isn't used by arguments SROAed = SROA_Helper::DoScalarReplacement( V, Elts, BrokenUpTy, NumInstances, Builder, /*bFlatVector*/ false, annotation.IsPrecise(), dxilTypeSys, DL, DeadInsts, /*DT*/ nullptr); } if (SROAed) { Type *Ty = V->getType()->getPointerElementType(); // Skip empty struct parameters. if (SROA_Helper::IsEmptyStructType(Ty, dxilTypeSys)) { SROA_Helper::MarkEmptyStructUsers(V, DeadInsts); DeleteDeadInstructions(); continue; } bool precise = annotation.IsPrecise(); const std::string &semantic = annotation.GetSemanticString(); hlsl::InterpolationMode interpMode = annotation.GetInterpolationMode(); // Find index of first non-empty field. unsigned firstNonEmptyIx = Elts.size(); for (unsigned ri = 0; ri < Elts.size(); ri++) { if (DL.getTypeSizeInBits(Ty->getContainedType(ri)) > 0) { firstNonEmptyIx = ri; break; } } // Push Elts into workList from right to left to preserve the order. for (unsigned ri = 0; ri < Elts.size(); ri++) { unsigned i = Elts.size() - ri - 1; DxilFieldAnnotation EltAnnotation = GetEltAnnotation(Ty, i, annotation, dxilTypeSys); const std::string &eltSem = EltAnnotation.GetSemanticString(); if (!semantic.empty()) { if (!eltSem.empty()) { // It doesn't look like we can provide source location information // from here F->getContext().emitWarning( Twine("semantic '") + eltSem + "' on field overridden by function or enclosing type"); } // Inherit semantic from parent, but only preserve it for the first // non-zero-sized element. // Subsequent elements are noted with a special value that gets // resolved once the argument is completely flattened. EltAnnotation.SetSemanticString( i == firstNonEmptyIx ? semantic : ContinuedPseudoSemantic); } else if (!eltSem.empty() && semanticTypeMap.count(eltSem) == 0) { Type *EltTy = dxilutil::GetArrayEltTy(Ty); DXASSERT(EltTy->isStructTy(), "must be a struct type to has semantic."); semanticTypeMap[eltSem] = EltTy->getStructElementType(i); } if (precise) EltAnnotation.SetPrecise(); if (EltAnnotation.GetInterpolationMode().GetKind() == DXIL::InterpolationMode::Undefined) EltAnnotation.SetInterpolationMode(interpMode); WorkList.push_front({Elts[i], EltAnnotation}); } ++NumReplaced; if (Instruction *I = dyn_cast<Instruction>(V)) deadAllocas.emplace_back(I); } else { Type *Ty = V->getType(); if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); // Flatten array of SV_Target. StringRef semanticStr = annotation.GetSemanticString(); if (semanticStr.upper().find("SV_TARGET") == 0 && Ty->isArrayTy()) { Type *Ty = cast<ArrayType>(V->getType()->getPointerElementType()); StringRef targetStr; unsigned targetIndex; Semantic::DecomposeNameAndIndex(semanticStr, &targetStr, &targetIndex); // Replace target parameter with local target. AllocaInst *localTarget = Builder.CreateAlloca(Ty); V->replaceAllUsesWith(localTarget); unsigned arraySize = 1; std::vector<unsigned> arraySizeList; while (Ty->isArrayTy()) { unsigned size = Ty->getArrayNumElements(); arraySizeList.emplace_back(size); arraySize *= size; Ty = Ty->getArrayElementType(); } unsigned arrayLevel = arraySizeList.size(); std::vector<unsigned> arrayIdxList(arrayLevel, 0); // Create flattened target. DxilFieldAnnotation EltAnnotation = annotation; for (unsigned i = 0; i < arraySize; i++) { Value *Elt = Builder.CreateAlloca(Ty); EltAnnotation.SetSemanticString(targetStr.str() + std::to_string(targetIndex + i)); // Add semantic type. semanticTypeMap[EltAnnotation.GetSemanticString()] = Ty; WorkList.push_front({Elt, EltAnnotation}); // Copy local target to flattened target. std::vector<Value *> idxList(arrayLevel + 1); idxList[0] = Builder.getInt32(0); for (unsigned idx = 0; idx < arrayLevel; idx++) { idxList[idx + 1] = Builder.getInt32(arrayIdxList[idx]); } if (bForParam) { // If Argument, copy before each return. for (auto &BB : F->getBasicBlockList()) { TerminatorInst *TI = BB.getTerminator(); if (isa<ReturnInst>(TI)) { IRBuilder<> RetBuilder(TI); Value *Ptr = RetBuilder.CreateGEP(localTarget, idxList); Value *V = RetBuilder.CreateLoad(Ptr); RetBuilder.CreateStore(V, Elt); } } } else { // Else, copy with Builder. Value *Ptr = Builder.CreateGEP(localTarget, idxList); Value *V = Builder.CreateLoad(Ptr); Builder.CreateStore(V, Elt); } // Update arrayIdxList. for (unsigned idx = arrayLevel; idx > 0; idx--) { arrayIdxList[idx - 1]++; if (arrayIdxList[idx - 1] < arraySizeList[idx - 1]) break; arrayIdxList[idx - 1] = 0; } } continue; } // Cast vector/matrix/resource parameter. V = castArgumentIfRequired(V, Ty, bOut, inputQual, annotation, Builder, dxilTypeSys); // Cannot SROA, save it to final parameter list. FlatParamList.emplace_back(V); // Create ParamAnnotation for V. FlatAnnotationList.emplace_back(DxilParameterAnnotation()); DxilParameterAnnotation &flatParamAnnotation = FlatAnnotationList.back(); flatParamAnnotation.SetParamInputQual( paramAnnotation.GetParamInputQual()); flatParamAnnotation.SetInterpolationMode( annotation.GetInterpolationMode()); flatParamAnnotation.SetSemanticString(annotation.GetSemanticString()); flatParamAnnotation.SetCompType(annotation.GetCompType().GetKind()); flatParamAnnotation.SetMatrixAnnotation(annotation.GetMatrixAnnotation()); flatParamAnnotation.SetPrecise(annotation.IsPrecise()); flatParamAnnotation.SetResourceProperties( annotation.GetResourceProperties()); // Add debug info. if (DDIs.size() && V != Arg) { Value *TmpV = V; // If V is casted, add debug into to original V. if (castParamMap.count(V)) { TmpV = castParamMap[V].first; // One more level for ptr of input vector. // It cast from ptr to non-ptr then cast to scalars. if (castParamMap.count(TmpV)) { TmpV = castParamMap[TmpV].first; } } Type *Ty = TmpV->getType(); if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); unsigned size = DL.getTypeAllocSize(Ty); #if 0 // HLSL Change DIExpression *DDIExp = DIB.createBitPieceExpression(debugOffset, size); #else // HLSL Change Type *argTy = Arg->getType(); if (argTy->isPointerTy()) argTy = argTy->getPointerElementType(); DIExpression *DDIExp = nullptr; if (debugOffset == 0 && DL.getTypeAllocSize(argTy) == size) { DDIExp = DIB.createExpression(); } else { DDIExp = DIB.createBitPieceExpression(debugOffset * 8, size * 8); } #endif // HLSL Change debugOffset += size; for (DbgDeclareInst *DDI : DDIs) { DIB.insertDeclare(TmpV, DDI->getVariable(), DDIExp, DDI->getDebugLoc(), Builder.GetInsertPoint()); } } // Flatten stream out. if (HLModule::IsStreamOutputPtrType(V->getType())) { // For stream output objects. // Create a value as output value. Type *outputType = V->getType()->getPointerElementType()->getStructElementType(0); Value *outputVal = Builder.CreateAlloca(outputType); // For each stream.Append(data) // transform into // d = load data // store outputVal, d // stream.Append(outputVal) for (User *user : V->users()) { if (CallInst *CI = dyn_cast<CallInst>(user)) { unsigned opcode = GetHLOpcode(CI); if (opcode == static_cast<unsigned>(IntrinsicOp::MOP_Append)) { // At this point, the stream append data argument might or not // have been SROA'd Value *firstDataPtr = CI->getArgOperand(HLOperandIndex::kStreamAppendDataOpIndex); DXASSERT(firstDataPtr->getType()->isPointerTy(), "Append value must be a pointer."); if (firstDataPtr->getType()->getPointerElementType() == outputType) { // The data has not been SROA'd DXASSERT(CI->getNumArgOperands() == (HLOperandIndex::kStreamAppendDataOpIndex + 1), "Unexpected number of arguments for non-SROA'd " "StreamOutput.Append"); IRBuilder<> Builder(CI); llvm::SmallVector<llvm::Value *, 16> idxList; SplitCpy(firstDataPtr->getType(), outputVal, firstDataPtr, idxList, Builder, DL, dxilTypeSys, &flatParamAnnotation); CI->setArgOperand(HLOperandIndex::kStreamAppendDataOpIndex, outputVal); } else { // Append has been SROA'd, we might be operating on multiple // values with types differing from the stream output type. // Flatten store outputVal. // Must be struct to be flatten. IRBuilder<> Builder(CI); llvm::SmallVector<llvm::Value *, 16> IdxList; llvm::SmallVector<llvm::Value *, 16> EltPtrList; llvm::SmallVector<const DxilFieldAnnotation *, 16> EltAnnotationList; // split SplitPtr(outputVal, IdxList, outputVal->getType(), flatParamAnnotation, EltPtrList, EltAnnotationList, dxilTypeSys, Builder); unsigned eltCount = CI->getNumArgOperands() - 2; DXASSERT_LOCALVAR(eltCount, eltCount == EltPtrList.size(), "invalid element count"); for (unsigned i = HLOperandIndex::kStreamAppendDataOpIndex; i < CI->getNumArgOperands(); i++) { Value *DataPtr = CI->getArgOperand(i); Value *EltPtr = EltPtrList[i - HLOperandIndex::kStreamAppendDataOpIndex]; const DxilFieldAnnotation *EltAnnotation = EltAnnotationList [i - HLOperandIndex::kStreamAppendDataOpIndex]; llvm::SmallVector<llvm::Value *, 16> IdxList; SplitCpy(DataPtr->getType(), EltPtr, DataPtr, IdxList, Builder, DL, dxilTypeSys, EltAnnotation); CI->setArgOperand(i, EltPtr); } } } } } // Then split output value to generate ParamQual. WorkList.push_front({outputVal, annotation}); } } } // Now erase any instructions that were made dead while rewriting the // alloca. DeleteDeadInstructions(); // Erase dead allocas after all uses deleted. for (Instruction *I : deadAllocas) I->eraseFromParent(); unsigned endArgIndex = FlatAnnotationList.size(); if (bForParam && startArgIndex < endArgIndex) { DxilParamInputQual inputQual = paramAnnotation.GetParamInputQual(); if (inputQual == DxilParamInputQual::OutStream0 || inputQual == DxilParamInputQual::OutStream1 || inputQual == DxilParamInputQual::OutStream2 || inputQual == DxilParamInputQual::OutStream3) startArgIndex++; DxilParameterAnnotation &flatParamAnnotation = FlatAnnotationList[startArgIndex]; const std::string &semantic = flatParamAnnotation.GetSemanticString(); if (!semantic.empty()) allocateSemanticIndex(FlatAnnotationList, startArgIndex, semanticTypeMap); } } static bool IsUsedAsCallArg(Value *V) { for (User *U : V->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { Function *CalledF = CI->getCalledFunction(); HLOpcodeGroup group = GetHLOpcodeGroup(CalledF); // Skip HL operations. if (group != HLOpcodeGroup::NotHL || group == HLOpcodeGroup::HLExtIntrinsic) { continue; } // Skip llvm intrinsic. if (CalledF->isIntrinsic()) continue; return true; } if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { if (IsUsedAsCallArg(GEP)) return true; } } return false; } // For function parameter which used in function call and need to be flattened. // Replace with tmp alloca. void SROA_Parameter_HLSL::preprocessArgUsedInCall(Function *F) { if (F->isDeclaration()) return; const DataLayout &DL = m_pHLModule->GetModule()->getDataLayout(); DxilTypeSystem &typeSys = m_pHLModule->GetTypeSystem(); DxilFunctionAnnotation *pFuncAnnot = typeSys.GetFunctionAnnotation(F); DXASSERT(pFuncAnnot, "else invalid function"); IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(F)); SmallVector<ReturnInst *, 2> retList; for (BasicBlock &bb : F->getBasicBlockList()) { if (ReturnInst *RI = dyn_cast<ReturnInst>(bb.getTerminator())) { retList.emplace_back(RI); } } for (Argument &arg : F->args()) { Type *Ty = arg.getType(); // Only check pointer types. if (!Ty->isPointerTy()) continue; Ty = Ty->getPointerElementType(); // Skip scalar types. if (!Ty->isAggregateType() && Ty->getScalarType() == Ty) continue; bool bUsedInCall = IsUsedAsCallArg(&arg); if (bUsedInCall) { // Create tmp. Value *TmpArg = Builder.CreateAlloca(Ty); // Replace arg with tmp. arg.replaceAllUsesWith(TmpArg); DxilParameterAnnotation &paramAnnot = pFuncAnnot->GetParameterAnnotation(arg.getArgNo()); DxilParamInputQual inputQual = paramAnnot.GetParamInputQual(); unsigned size = DL.getTypeAllocSize(Ty); // Copy between arg and tmp. if (inputQual == DxilParamInputQual::In || inputQual == DxilParamInputQual::Inout) { // copy arg to tmp. CallInst *argToTmp = Builder.CreateMemCpy(TmpArg, &arg, size, 0); // Split the memcpy. MemcpySplitter::SplitMemCpy(cast<MemCpyInst>(argToTmp), DL, nullptr, typeSys); } if (inputQual == DxilParamInputQual::Out || inputQual == DxilParamInputQual::Inout) { for (ReturnInst *RI : retList) { IRBuilder<> RetBuilder(RI); // copy tmp to arg. CallInst *tmpToArg = RetBuilder.CreateMemCpy(&arg, TmpArg, size, 0); // Split the memcpy. MemcpySplitter::SplitMemCpy(cast<MemCpyInst>(tmpToArg), DL, nullptr, typeSys); } } // TODO: support other DxilParamInputQual. } } } /// moveFunctionBlocks - Move body of F to flatF. void SROA_Parameter_HLSL::moveFunctionBody(Function *F, Function *flatF) { bool updateRetType = F->getReturnType() != flatF->getReturnType(); // Splice the body of the old function right into the new function. flatF->getBasicBlockList().splice(flatF->begin(), F->getBasicBlockList()); // Update Block uses. if (updateRetType) { for (BasicBlock &BB : flatF->getBasicBlockList()) { if (updateRetType) { // Replace ret with ret void. if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) { // Create store for return. IRBuilder<> Builder(RI); Builder.CreateRetVoid(); RI->eraseFromParent(); } } } } } static void SplitArrayCopy(Value *V, const DataLayout &DL, DxilTypeSystem &typeSys, DxilFieldAnnotation *fieldAnnotation) { for (auto U = V->user_begin(); U != V->user_end();) { User *user = *(U++); if (StoreInst *ST = dyn_cast<StoreInst>(user)) { Value *ptr = ST->getPointerOperand(); Value *val = ST->getValueOperand(); IRBuilder<> Builder(ST); SmallVector<Value *, 16> idxList; SplitCpy(ptr->getType(), ptr, val, idxList, Builder, DL, typeSys, fieldAnnotation); ST->eraseFromParent(); } } } static void CheckArgUsage(Value *V, bool &bLoad, bool &bStore) { if (bLoad && bStore) return; for (User *user : V->users()) { if (dyn_cast<LoadInst>(user)) { bLoad = true; } else if (dyn_cast<StoreInst>(user)) { bStore = true; } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(user)) { CheckArgUsage(GEP, bLoad, bStore); } else if (CallInst *CI = dyn_cast<CallInst>(user)) { if (CI->getType()->isPointerTy()) CheckArgUsage(CI, bLoad, bStore); else { HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); if (group == HLOpcodeGroup::HLMatLoadStore) { HLMatLoadStoreOpcode opcode = static_cast<HLMatLoadStoreOpcode>(GetHLOpcode(CI)); switch (opcode) { case HLMatLoadStoreOpcode::ColMatLoad: case HLMatLoadStoreOpcode::RowMatLoad: bLoad = true; break; case HLMatLoadStoreOpcode::ColMatStore: case HLMatLoadStoreOpcode::RowMatStore: bStore = true; break; } } } } } } // AcceptHitAndEndSearch and IgnoreHit both will not return, but require // outputs to have been written before the call. Do this by: // - inject a return immediately after the call if not there already // - LegalizeDxilInputOutputs will inject writes from temp alloca to // outputs before each return. // - in HLOperationLower, after lowering the intrinsic, move the intrinsic // to just before the return. static void InjectReturnAfterNoReturnPreserveOutput(HLModule &HLM) { for (Function &F : HLM.GetModule()->functions()) { if (GetHLOpcodeGroup(&F) == HLOpcodeGroup::HLIntrinsic) { for (auto U : F.users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { unsigned OpCode = GetHLOpcode(CI); if (OpCode == (unsigned)IntrinsicOp::IOP_AcceptHitAndEndSearch || OpCode == (unsigned)IntrinsicOp::IOP_IgnoreHit) { Instruction *pNextI = CI->getNextNode(); // Skip if already has a return immediatly following call if (isa<ReturnInst>(pNextI)) continue; // split block and add return: BasicBlock *BB = CI->getParent(); BB->splitBasicBlock(pNextI); TerminatorInst *Term = BB->getTerminator(); Term->eraseFromParent(); IRBuilder<> Builder(BB); llvm::Type *RetTy = CI->getParent()->getParent()->getReturnType(); if (RetTy->isVoidTy()) Builder.CreateRetVoid(); else Builder.CreateRet(UndefValue::get(RetTy)); } } } } } } // Support store to input and load from output. static void LegalizeDxilInputOutputs(Function *F, DxilFunctionAnnotation *EntryAnnotation, const DataLayout &DL, DxilTypeSystem &typeSys) { BasicBlock &EntryBlk = F->getEntryBlock(); Module *M = F->getParent(); // Map from output to the temp created for it. MapVector<Argument *, Value *> outputTempMap; // Need deterministic order of iteration for (Argument &arg : F->args()) { dxilutil::MergeGepUse(&arg); Type *Ty = arg.getType(); DxilParameterAnnotation &paramAnnotation = EntryAnnotation->GetParameterAnnotation(arg.getArgNo()); DxilParamInputQual qual = paramAnnotation.GetParamInputQual(); bool isColMajor = false; // Skip arg which is not a pointer. if (!Ty->isPointerTy()) { if (HLMatrixType::isa(Ty)) { // Replace matrix arg with cast to vec. It will be lowered in // DxilGenerationPass. isColMajor = paramAnnotation.GetMatrixAnnotation().Orientation == MatrixOrientation::ColumnMajor; IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(F)); HLCastOpcode opcode = isColMajor ? HLCastOpcode::ColMatrixToVecCast : HLCastOpcode::RowMatrixToVecCast; Value *undefVal = UndefValue::get(Ty); Value *Cast = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, static_cast<unsigned>(opcode), Ty, {undefVal}, *M); arg.replaceAllUsesWith(Cast); // Set arg as the operand. CallInst *CI = cast<CallInst>(Cast); CI->setArgOperand(HLOperandIndex::kUnaryOpSrc0Idx, &arg); } continue; } Ty = Ty->getPointerElementType(); bool bLoad = false; bool bStore = false; CheckArgUsage(&arg, bLoad, bStore); bool bStoreInputToTemp = false; bool bLoadOutputFromTemp = false; if (qual == DxilParamInputQual::In && bStore) { bStoreInputToTemp = true; } else if (qual == DxilParamInputQual::Out && bLoad) { bLoadOutputFromTemp = true; } else if (bLoad && bStore) { switch (qual) { case DxilParamInputQual::InPayload: case DxilParamInputQual::InputPrimitive: case DxilParamInputQual::InputPatch: case DxilParamInputQual::OutputPatch: case DxilParamInputQual::NodeIO: { bStoreInputToTemp = true; } break; case DxilParamInputQual::Inout: break; default: DXASSERT(0, "invalid input qual here"); } } else if (qual == DxilParamInputQual::Inout) { // Only replace inout when (bLoad && bStore) == false. bLoadOutputFromTemp = true; bStoreInputToTemp = true; } if (HLMatrixType::isa(Ty)) { if (qual == DxilParamInputQual::In) bStoreInputToTemp = bLoad; else if (qual == DxilParamInputQual::Out) bLoadOutputFromTemp = bStore; else if (qual == DxilParamInputQual::Inout) { bStoreInputToTemp = true; bLoadOutputFromTemp = true; } } if (bStoreInputToTemp || bLoadOutputFromTemp) { IRBuilder<> Builder(EntryBlk.getFirstInsertionPt()); AllocaInst *temp = Builder.CreateAlloca(Ty); // Replace all uses with temp. arg.replaceAllUsesWith(temp); // Copy input to temp. if (bStoreInputToTemp) { llvm::SmallVector<llvm::Value *, 16> idxList; // split copy. SplitCpy(temp->getType(), temp, &arg, idxList, Builder, DL, typeSys, &paramAnnotation); } // Generate store output, temp later. if (bLoadOutputFromTemp) { outputTempMap[&arg] = temp; } } } for (BasicBlock &BB : F->getBasicBlockList()) { if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) { IRBuilder<> Builder(RI); // Copy temp to output. for (auto It : outputTempMap) { Argument *output = It.first; Value *temp = It.second; llvm::SmallVector<llvm::Value *, 16> idxList; DxilParameterAnnotation &paramAnnotation = EntryAnnotation->GetParameterAnnotation(output->getArgNo()); auto Iter = Builder.GetInsertPoint(); if (RI != BB.begin()) Iter--; // split copy. SplitCpy(output->getType(), output, temp, idxList, Builder, DL, typeSys, &paramAnnotation); } // Clone the return. Builder.CreateRet(RI->getReturnValue()); RI->eraseFromParent(); } } } void SROA_Parameter_HLSL::createFlattenedFunction(Function *F) { DxilTypeSystem &typeSys = m_pHLModule->GetTypeSystem(); DXASSERT(F == m_pHLModule->GetEntryFunction() || m_pHLModule->IsEntryThatUsesSignatures(F), "otherwise, createFlattenedFunction called on library function " "that should not be flattened."); const DataLayout &DL = m_pHLModule->GetModule()->getDataLayout(); // Skip void (void) function. if (F->getReturnType()->isVoidTy() && F->getArgumentList().empty()) { return; } // Clear maps for cast. castParamMap.clear(); vectorEltsMap.clear(); DxilFunctionAnnotation *funcAnnotation = m_pHLModule->GetFunctionAnnotation(F); DXASSERT(funcAnnotation, "must find annotation for function"); std::deque<Value *> WorkList; LLVMContext &Ctx = m_pHLModule->GetCtx(); std::unique_ptr<BasicBlock> TmpBlockForFuncDecl; BasicBlock *EntryBlock; if (F->isDeclaration()) { // We still want to SROA the parameters, so creaty a dummy // function body block to avoid special cases. TmpBlockForFuncDecl.reset(BasicBlock::Create(Ctx)); // Create return as terminator. IRBuilder<> RetBuilder(TmpBlockForFuncDecl.get()); RetBuilder.CreateRetVoid(); EntryBlock = TmpBlockForFuncDecl.get(); } else { EntryBlock = &F->getEntryBlock(); } std::vector<Value *> FlatParamList; std::vector<DxilParameterAnnotation> FlatParamAnnotationList; std::vector<int> FlatParamOriArgNoList; const bool bForParamTrue = true; // Add all argument to worklist. for (Argument &Arg : F->args()) { // merge GEP use for arg. dxilutil::MergeGepUse(&Arg); unsigned prevFlatParamCount = FlatParamList.size(); DxilParameterAnnotation &paramAnnotation = funcAnnotation->GetParameterAnnotation(Arg.getArgNo()); SmallVector<DbgDeclareInst *, 4> DDIs; llvm::FindAllocaDbgDeclare(&Arg, DDIs); flattenArgument(F, &Arg, bForParamTrue, paramAnnotation, FlatParamList, FlatParamAnnotationList, EntryBlock, DDIs); unsigned newFlatParamCount = FlatParamList.size() - prevFlatParamCount; for (unsigned i = 0; i < newFlatParamCount; i++) { FlatParamOriArgNoList.emplace_back(Arg.getArgNo()); } } Type *retType = F->getReturnType(); std::vector<Value *> FlatRetList; std::vector<DxilParameterAnnotation> FlatRetAnnotationList; // Split and change to out parameter. if (!retType->isVoidTy()) { IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(EntryBlock)); Value *retValAddr = Builder.CreateAlloca(retType); DxilParameterAnnotation &retAnnotation = funcAnnotation->GetRetTypeAnnotation(); Module &M = *m_pHLModule->GetModule(); Type *voidTy = Type::getVoidTy(m_pHLModule->GetCtx()); #if 0 // We don't really want this to show up in debug info. // Create DbgDecl for the ret value. if (DISubprogram *funcDI = getDISubprogram(F)) { DITypeRef RetDITyRef = funcDI->getType()->getTypeArray()[0]; DITypeIdentifierMap EmptyMap; DIType * RetDIType = RetDITyRef.resolve(EmptyMap); DIBuilder DIB(*F->getParent(), /*AllowUnresolved*/ false); DILocalVariable *RetVar = DIB.createLocalVariable(llvm::dwarf::Tag::DW_TAG_arg_variable, funcDI, F->getName().str() + ".Ret", funcDI->getFile(), funcDI->getLine(), RetDIType); DIExpression *Expr = DIB.createExpression(); // TODO: how to get col? DILocation *DL = DILocation::get(F->getContext(), funcDI->getLine(), 0, funcDI); DIB.insertDeclare(retValAddr, RetVar, Expr, DL, Builder.GetInsertPoint()); } #endif for (BasicBlock &BB : F->getBasicBlockList()) { if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) { // Create store for return. IRBuilder<> RetBuilder(RI); if (!retAnnotation.HasMatrixAnnotation()) { RetBuilder.CreateStore(RI->getReturnValue(), retValAddr); } else { bool isRowMajor = retAnnotation.GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor; Value *RetVal = RI->getReturnValue(); if (!isRowMajor) { // Matrix value is row major. ColMatStore require col major. // Cast before store. RetVal = HLModule::EmitHLOperationCall( RetBuilder, HLOpcodeGroup::HLCast, static_cast<unsigned>(HLCastOpcode::RowMatrixToColMatrix), RetVal->getType(), {RetVal}, M); } unsigned opcode = static_cast<unsigned>( isRowMajor ? HLMatLoadStoreOpcode::RowMatStore : HLMatLoadStoreOpcode::ColMatStore); HLModule::EmitHLOperationCall(RetBuilder, HLOpcodeGroup::HLMatLoadStore, opcode, voidTy, {retValAddr, RetVal}, M); } } } // Create a fake store to keep retValAddr so it can be flattened. if (retValAddr->user_empty()) { Builder.CreateStore(UndefValue::get(retType), retValAddr); } SmallVector<DbgDeclareInst *, 4> DDIs; llvm::FindAllocaDbgDeclare(retValAddr, DDIs); flattenArgument(F, retValAddr, bForParamTrue, funcAnnotation->GetRetTypeAnnotation(), FlatRetList, FlatRetAnnotationList, EntryBlock, DDIs); const int kRetArgNo = -1; for (unsigned i = 0; i < FlatRetList.size(); i++) { FlatParamOriArgNoList.insert(FlatParamOriArgNoList.begin(), kRetArgNo); } } // Always change return type as parameter. // By doing this, no need to check return when generate storeOutput. if (FlatRetList.size() || // For empty struct return type. !retType->isVoidTy()) { // Return value is flattened. // Change return value into out parameter. retType = Type::getVoidTy(retType->getContext()); // Merge return data info param data. FlatParamList.insert(FlatParamList.begin(), FlatRetList.begin(), FlatRetList.end()); FlatParamAnnotationList.insert(FlatParamAnnotationList.begin(), FlatRetAnnotationList.begin(), FlatRetAnnotationList.end()); } std::vector<Type *> FinalTypeList; for (Value *arg : FlatParamList) { FinalTypeList.emplace_back(arg->getType()); } unsigned extraParamSize = 0; if (m_pHLModule->HasDxilFunctionProps(F)) { DxilFunctionProps &funcProps = m_pHLModule->GetDxilFunctionProps(F); if (funcProps.shaderKind == ShaderModel::Kind::Vertex) { auto &VS = funcProps.ShaderProps.VS; Type *outFloatTy = Type::getFloatPtrTy(F->getContext()); // Add out float parameter for each clip plane. unsigned i = 0; for (; i < DXIL::kNumClipPlanes; i++) { if (!VS.clipPlanes[i]) break; FinalTypeList.emplace_back(outFloatTy); } extraParamSize = i; } } FunctionType *flatFuncTy = FunctionType::get(retType, FinalTypeList, false); // Return if nothing changed. if (flatFuncTy == F->getFunctionType()) { // Copy semantic allocation. if (!FlatParamAnnotationList.empty()) { if (!FlatParamAnnotationList[0].GetSemanticString().empty()) { for (unsigned i = 0; i < FlatParamAnnotationList.size(); i++) { DxilParameterAnnotation &paramAnnotation = funcAnnotation->GetParameterAnnotation(i); DxilParameterAnnotation &flatParamAnnotation = FlatParamAnnotationList[i]; paramAnnotation.SetSemanticIndexVec( flatParamAnnotation.GetSemanticIndexVec()); paramAnnotation.SetSemanticString( flatParamAnnotation.GetSemanticString()); } } } if (!F->isDeclaration()) { // Support store to input and load from output. LegalizeDxilInputOutputs(F, funcAnnotation, DL, typeSys); } return; } std::string flatName = F->getName().str() + ".flat"; DXASSERT(nullptr == F->getParent()->getFunction(flatName), "else overwriting existing function"); Function *flatF = cast<Function>(F->getParent()->getOrInsertFunction(flatName, flatFuncTy)); funcMap[F] = flatF; // Update function debug info. if (DISubprogram *funcDI = getDISubprogram(F)) funcDI->replaceFunction(flatF); // Create FunctionAnnotation for flatF. DxilFunctionAnnotation *flatFuncAnnotation = m_pHLModule->AddFunctionAnnotation(flatF); // Don't need to set Ret Info, flatF always return void now. // Param Info for (unsigned ArgNo = 0; ArgNo < FlatParamAnnotationList.size(); ++ArgNo) { DxilParameterAnnotation &paramAnnotation = flatFuncAnnotation->GetParameterAnnotation(ArgNo); paramAnnotation = FlatParamAnnotationList[ArgNo]; } // Function Attr and Parameter Attr. // Remove sret first. if (F->hasStructRetAttr()) F->removeFnAttr(Attribute::StructRet); for (Argument &arg : F->args()) { if (arg.hasStructRetAttr()) { Attribute::AttrKind SRet[] = {Attribute::StructRet}; AttributeSet SRetAS = AttributeSet::get(Ctx, arg.getArgNo() + 1, SRet); arg.removeAttr(SRetAS); } } AttributeSet AS = F->getAttributes(); AttrBuilder FnAttrs(AS.getFnAttributes(), AttributeSet::FunctionIndex); AttributeSet flatAS; flatAS = flatAS.addAttributes( Ctx, AttributeSet::FunctionIndex, AttributeSet::get(Ctx, AttributeSet::FunctionIndex, FnAttrs)); if (!F->isDeclaration()) { // Only set Param attribute for function has a body. for (unsigned ArgNo = 0; ArgNo < FlatParamAnnotationList.size(); ++ArgNo) { unsigned oriArgNo = FlatParamOriArgNoList[ArgNo] + 1; AttrBuilder paramAttr(AS, oriArgNo); if (oriArgNo == AttributeSet::ReturnIndex) paramAttr.addAttribute(Attribute::AttrKind::NoAlias); flatAS = flatAS.addAttributes( Ctx, ArgNo + 1, AttributeSet::get(Ctx, ArgNo + 1, paramAttr)); } } flatF->setAttributes(flatAS); DXASSERT_LOCALVAR(extraParamSize, flatF->arg_size() == (extraParamSize + FlatParamAnnotationList.size()), "parameter count mismatch"); // ShaderProps. if (m_pHLModule->HasDxilFunctionProps(F)) { DxilFunctionProps &funcProps = m_pHLModule->GetDxilFunctionProps(F); std::unique_ptr<DxilFunctionProps> flatFuncProps = llvm::make_unique<DxilFunctionProps>(); *flatFuncProps = funcProps; m_pHLModule->AddDxilFunctionProps(flatF, flatFuncProps); if (funcProps.shaderKind == ShaderModel::Kind::Vertex) { auto &VS = funcProps.ShaderProps.VS; unsigned clipArgIndex = FlatParamAnnotationList.size(); // Add out float SV_ClipDistance for each clip plane. for (unsigned i = 0; i < DXIL::kNumClipPlanes; i++) { if (!VS.clipPlanes[i]) break; DxilParameterAnnotation &paramAnnotation = flatFuncAnnotation->GetParameterAnnotation(clipArgIndex + i); paramAnnotation.SetParamInputQual(DxilParamInputQual::Out); Twine semName = Twine("SV_ClipDistance") + Twine(i); paramAnnotation.SetSemanticString(semName.str()); paramAnnotation.SetCompType(DXIL::ComponentType::F32); paramAnnotation.AppendSemanticIndex(i); } } } if (!F->isDeclaration()) { // Move function body into flatF. moveFunctionBody(F, flatF); // Replace old parameters with flatF Arguments. auto argIter = flatF->arg_begin(); auto flatArgIter = FlatParamList.begin(); LLVMContext &Context = F->getContext(); // Parameter cast come from begining of entry block. IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(flatF)); while (argIter != flatF->arg_end()) { Argument *Arg = argIter++; if (flatArgIter == FlatParamList.end()) { DXASSERT(extraParamSize > 0, "parameter count mismatch"); break; } Value *flatArg = *(flatArgIter++); if (castParamMap.count(flatArg)) { replaceCastParameter(flatArg, castParamMap[flatArg].first, *flatF, Arg, castParamMap[flatArg].second, Builder); } // Update arg debug info. SmallVector<DbgDeclareInst *, 4> DDIs; llvm::FindAllocaDbgDeclare(flatArg, DDIs); if (DDIs.size()) { Value *VMD = nullptr; if (!flatArg->getType()->isPointerTy()) { // Create alloca to hold the debug info. Value *allocaArg = nullptr; if (flatArg->hasOneUse() && isa<StoreInst>(*flatArg->user_begin())) { StoreInst *SI = cast<StoreInst>(*flatArg->user_begin()); allocaArg = SI->getPointerOperand(); } else { allocaArg = Builder.CreateAlloca(flatArg->getType()); StoreInst *initArg = Builder.CreateStore(flatArg, allocaArg); Value *ldArg = Builder.CreateLoad(allocaArg); flatArg->replaceAllUsesWith(ldArg); initArg->setOperand(0, flatArg); } VMD = MetadataAsValue::get(Context, ValueAsMetadata::get(allocaArg)); } else { VMD = MetadataAsValue::get(Context, ValueAsMetadata::get(Arg)); } for (DbgDeclareInst *DDI : DDIs) { DDI->setArgOperand(0, VMD); } } flatArg->replaceAllUsesWith(Arg); if (isa<Instruction>(flatArg)) DeadInsts.emplace_back(flatArg); dxilutil::MergeGepUse(Arg); // Flatten store of array parameter. if (Arg->getType()->isPointerTy()) { Type *Ty = Arg->getType()->getPointerElementType(); if (Ty->isArrayTy()) SplitArrayCopy( Arg, DL, typeSys, &flatFuncAnnotation->GetParameterAnnotation(Arg->getArgNo())); } } // Support store to input and load from output. LegalizeDxilInputOutputs(flatF, flatFuncAnnotation, DL, typeSys); } } void SROA_Parameter_HLSL::replaceCall(Function *F, Function *flatF) { // Update entry function. if (F == m_pHLModule->GetEntryFunction()) { m_pHLModule->SetEntryFunction(flatF); } DXASSERT(F->user_empty(), "otherwise we flattened a library function."); } // Public interface to the SROA_Parameter_HLSL pass ModulePass *llvm::createSROA_Parameter_HLSL() { return new SROA_Parameter_HLSL(); } //===----------------------------------------------------------------------===// // Lower static global into Alloca. //===----------------------------------------------------------------------===// namespace { class LowerStaticGlobalIntoAlloca : public ModulePass { DebugInfoFinder m_DbgFinder; public: static char ID; // Pass identification, replacement for typeid explicit LowerStaticGlobalIntoAlloca() : ModulePass(ID) {} StringRef getPassName() const override { return "Lower static global into Alloca"; } bool runOnModule(Module &M) override { m_DbgFinder.processModule(M); Type *handleTy = nullptr; DxilTypeSystem *pTypeSys = nullptr; SetVector<Function *> entryAndInitFunctionSet; if (M.HasHLModule()) { auto &HLM = M.GetHLModule(); pTypeSys = &HLM.GetTypeSystem(); handleTy = HLM.GetOP()->GetHandleType(); if (!HLM.GetShaderModel()->IsLib()) { entryAndInitFunctionSet.insert(HLM.GetEntryFunction()); if (HLM.GetShaderModel()->IsHS()) { entryAndInitFunctionSet.insert(HLM.GetPatchConstantFunction()); } } else { for (Function &F : M) { if (F.isDeclaration() || !HLM.IsEntry(&F)) { continue; } entryAndInitFunctionSet.insert(&F); } } } else { DXASSERT(M.HasDxilModule(), "must have dxilModle or HLModule"); auto &DM = M.GetDxilModule(); pTypeSys = &DM.GetTypeSystem(); handleTy = DM.GetOP()->GetHandleType(); if (!DM.GetShaderModel()->IsLib()) { entryAndInitFunctionSet.insert(DM.GetEntryFunction()); if (DM.GetShaderModel()->IsHS()) { entryAndInitFunctionSet.insert(DM.GetPatchConstantFunction()); } } else { for (Function &F : M) { if (F.isDeclaration() || !DM.IsEntry(&F)) continue; entryAndInitFunctionSet.insert(&F); } } } // Collect init functions for static globals. if (GlobalVariable *Ctors = M.getGlobalVariable("llvm.global_ctors")) { if (ConstantArray *CA = dyn_cast<ConstantArray>(Ctors->getInitializer())) { for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { if (isa<ConstantAggregateZero>(*i)) continue; ConstantStruct *CS = cast<ConstantStruct>(*i); if (isa<ConstantPointerNull>(CS->getOperand(1))) continue; // Must have a function or null ptr. if (!isa<Function>(CS->getOperand(1))) continue; Function *Ctor = cast<Function>(CS->getOperand(1)); assert(Ctor->getReturnType()->isVoidTy() && Ctor->arg_size() == 0 && "function type must be void (void)"); // Add Ctor. entryAndInitFunctionSet.insert(Ctor); } } } // Lower static global into allocas. std::vector<GlobalVariable *> staticGVs; for (GlobalVariable &GV : M.globals()) { // only for non-constant static globals if (!dxilutil::IsStaticGlobal(&GV) || GV.isConstant()) continue; // Skip dx.ishelper if (GV.getName().compare(DXIL::kDxIsHelperGlobalName) == 0) continue; // Skip if GV used in functions other than entry. if (!usedOnlyInEntry(&GV, entryAndInitFunctionSet)) continue; Type *EltTy = GV.getType()->getElementType(); if (!EltTy->isAggregateType()) { staticGVs.emplace_back(&GV); } else { EltTy = dxilutil::GetArrayEltTy(EltTy); // Lower static [array of] resources if (dxilutil::IsHLSLObjectType(EltTy) || EltTy == handleTy) { staticGVs.emplace_back(&GV); } } } bool bUpdated = false; const DataLayout &DL = M.getDataLayout(); // Create AI for each GV in each entry. // Replace all users of GV with AI. // Collect all users of GV within each entry. // Remove unused AI in the end. for (GlobalVariable *GV : staticGVs) { bUpdated |= lowerStaticGlobalIntoAlloca(GV, DL, *pTypeSys, entryAndInitFunctionSet); } return bUpdated; } private: bool lowerStaticGlobalIntoAlloca(GlobalVariable *GV, const DataLayout &DL, DxilTypeSystem &typeSys, SetVector<Function *> &entryAndInitFunctionSet); bool usedOnlyInEntry(Value *V, SetVector<Function *> &entryAndInitFunctionSet); }; } // namespace // Go through the base type chain of TyA and see if // we eventually get to TyB // // Note: Not necessarily about inheritance. Could be // typedef, const type, ref type, MEMBER type (TyA // being a member of TyB). // static bool IsDerivedTypeOf(DIType *TyA, DIType *TyB) { DITypeIdentifierMap EmptyMap; while (TyA) { if (DIDerivedType *Derived = dyn_cast<DIDerivedType>(TyA)) { if (Derived->getBaseType() == TyB) return true; else TyA = Derived->getBaseType().resolve(EmptyMap); } else { break; } } return false; } // See if 'DGV' a member type of some other variable, and return that variable // and the offset and size DGV is into it. // // If DGV is not a member, just return nullptr. // static DIGlobalVariable * FindGlobalVariableFragment(const DebugInfoFinder &DbgFinder, DIGlobalVariable *DGV, unsigned *Out_OffsetInBits, unsigned *Out_SizeInBits) { DITypeIdentifierMap EmptyMap; StringRef FullName = DGV->getName(); size_t FirstDot = FullName.find_first_of('.'); if (FirstDot == StringRef::npos) return nullptr; StringRef BaseName = FullName.substr(0, FirstDot); assert(BaseName.size()); DIType *Ty = DGV->getType().resolve(EmptyMap); assert(isa<DIDerivedType>(Ty) && Ty->getTag() == dwarf::DW_TAG_member); DIGlobalVariable *FinalResult = nullptr; for (DIGlobalVariable *DGV_It : DbgFinder.global_variables()) { if (DGV_It->getName() == BaseName && IsDerivedTypeOf(Ty, DGV_It->getType().resolve(EmptyMap))) { FinalResult = DGV_It; break; } } if (FinalResult) { *Out_OffsetInBits = Ty->getOffsetInBits(); *Out_SizeInBits = Ty->getSizeInBits(); } return FinalResult; } // Create a fake local variable for the GlobalVariable GV that has just been // lowered to local Alloca. // static void PatchDebugInfo(DebugInfoFinder &DbgFinder, Function *F, GlobalVariable *GV, AllocaInst *AI) { if (!DbgFinder.compile_unit_count()) return; // Find the subprogram for function DISubprogram *Subprogram = nullptr; for (DISubprogram *SP : DbgFinder.subprograms()) { if (SP->getFunction() == F) { Subprogram = SP; break; } } DIGlobalVariable *DGV = dxilutil::FindGlobalVariableDebugInfo(GV, DbgFinder); if (!DGV) return; DITypeIdentifierMap EmptyMap; DIBuilder DIB(*GV->getParent()); DIScope *Scope = Subprogram; DebugLoc Loc = DebugLoc::get(DGV->getLine(), 0, Scope); // If the variable is a member of another variable, find the offset and size bool IsFragment = false; unsigned OffsetInBits = 0, SizeInBits = 0; if (DIGlobalVariable *UnsplitDGV = FindGlobalVariableFragment( DbgFinder, DGV, &OffsetInBits, &SizeInBits)) { DGV = UnsplitDGV; IsFragment = true; } std::string Name = "global."; Name += DGV->getName(); // Using arg_variable instead of auto_variable because arg variables can use // Subprogram as its scope, so we don't have to make one up for it. llvm::dwarf::Tag Tag = llvm::dwarf::Tag::DW_TAG_arg_variable; DIType *Ty = DGV->getType().resolve(EmptyMap); DXASSERT(Ty->getTag() != dwarf::DW_TAG_member, "Member type is not allowed for variables."); DILocalVariable *ConvertedLocalVar = DIB.createLocalVariable( Tag, Scope, Name, DGV->getFile(), DGV->getLine(), Ty); DIExpression *Expr = nullptr; if (IsFragment) { Expr = DIB.createBitPieceExpression(OffsetInBits, SizeInBits); } else { Expr = DIB.createExpression(ArrayRef<int64_t>()); } DIB.insertDeclare(AI, ConvertedLocalVar, Expr, Loc, AI->getNextNode()); } // Collect instructions using GV and the value used by the instruction. // For direct use, the value == GV // For constant operator like GEP/Bitcast, the value is the operator used by the // instruction. This requires recursion to unwrap nested constant operators // using the GV. static void collectGVInstUsers(Value *V, DenseMap<Instruction *, Value *> &InstUserMap) { for (User *U : V->users()) { if (Instruction *I = dyn_cast<Instruction>(U)) { InstUserMap[I] = V; } else { collectGVInstUsers(U, InstUserMap); } } } static Instruction *replaceGVUseWithAI(GlobalVariable *GV, AllocaInst *AI, Value *U, IRBuilder<> &B) { if (U == GV) return AI; if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { Instruction *PtrInst = replaceGVUseWithAI(GV, AI, GEP->getPointerOperand(), B); SmallVector<Value *, 2> Index(GEP->idx_begin(), GEP->idx_end()); return cast<Instruction>(B.CreateGEP(PtrInst, Index)); } if (BitCastOperator *BCO = dyn_cast<BitCastOperator>(U)) { Instruction *SrcInst = replaceGVUseWithAI(GV, AI, BCO->getOperand(0), B); return cast<Instruction>(B.CreateBitCast(SrcInst, BCO->getType())); } DXASSERT(false, "unsupported user of static global"); return nullptr; } bool LowerStaticGlobalIntoAlloca::lowerStaticGlobalIntoAlloca( GlobalVariable *GV, const DataLayout &DL, DxilTypeSystem &typeSys, SetVector<Function *> &entryAndInitFunctionSet) { GV->removeDeadConstantUsers(); bool bIsObjectTy = dxilutil::IsHLSLObjectType( dxilutil::StripArrayTypes(GV->getType()->getElementType())); // Create alloca for each entry. DenseMap<Function *, AllocaInst *> allocaMap; for (Function *F : entryAndInitFunctionSet) { IRBuilder<> Builder(dxilutil::FindAllocaInsertionPt(F)); AllocaInst *AI = Builder.CreateAlloca(GV->getType()->getElementType()); allocaMap[F] = AI; // Store initializer is exist. if (GV->hasInitializer() && !isa<UndefValue>(GV->getInitializer()) && !bIsObjectTy) { // Do not zerio-initialize object allocas Builder.CreateStore(GV->getInitializer(), GV); } } DenseMap<Instruction *, Value *> InstUserMap; collectGVInstUsers(GV, InstUserMap); for (auto it : InstUserMap) { Instruction *I = it.first; Value *U = it.second; Function *F = I->getParent()->getParent(); AllocaInst *AI = allocaMap[F]; IRBuilder<> B(I); Instruction *UI = replaceGVUseWithAI(GV, AI, U, B); I->replaceUsesOfWith(U, UI); } for (Function *F : entryAndInitFunctionSet) { AllocaInst *AI = allocaMap[F]; if (AI->user_empty()) AI->eraseFromParent(); else PatchDebugInfo(m_DbgFinder, F, GV, AI); } GV->removeDeadConstantUsers(); if (GV->user_empty()) GV->eraseFromParent(); return true; } bool LowerStaticGlobalIntoAlloca::usedOnlyInEntry( Value *V, SetVector<Function *> &entryAndInitFunctionSet) { bool bResult = true; for (User *U : V->users()) { if (Instruction *I = dyn_cast<Instruction>(U)) { Function *F = I->getParent()->getParent(); if (entryAndInitFunctionSet.count(F) == 0) { bResult = false; break; } } else { bResult = usedOnlyInEntry(U, entryAndInitFunctionSet); if (!bResult) break; } } return bResult; } char LowerStaticGlobalIntoAlloca::ID = 0; INITIALIZE_PASS(LowerStaticGlobalIntoAlloca, "static-global-to-alloca", "Lower static global into Alloca", false, false) // Public interface to the LowerStaticGlobalIntoAlloca pass ModulePass *llvm::createLowerStaticGlobalIntoAlloca() { return new LowerStaticGlobalIntoAlloca(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/ADCE.cpp
//===- DCE.cpp - Code to perform dead code elimination --------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Aggressive Dead Code Elimination pass. This pass // optimistically assumes that all instructions are dead until proven otherwise, // allowing it to eliminate dead computations that other DCE passes do not // catch, particularly involving loop computations. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Pass.h" using namespace llvm; #define DEBUG_TYPE "adce" STATISTIC(NumRemoved, "Number of instructions removed"); namespace { struct ADCE : public FunctionPass { static char ID; // Pass identification, replacement for typeid ADCE() : FunctionPass(ID) { initializeADCEPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function& F) override; void getAnalysisUsage(AnalysisUsage& AU) const override { AU.setPreservesCFG(); } }; } char ADCE::ID = 0; INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false) bool ADCE::runOnFunction(Function& F) { if (skipOptnoneFunction(F)) return false; SmallPtrSet<Instruction*, 128> Alive; SmallVector<Instruction*, 128> Worklist; // Collect the set of "root" instructions that are known live. for (Instruction &I : inst_range(F)) { if (isa<TerminatorInst>(I) || isa<DbgInfoIntrinsic>(I) || isa<LandingPadInst>(I) || I.mayHaveSideEffects()) { Alive.insert(&I); Worklist.push_back(&I); } } // Propagate liveness backwards to operands. while (!Worklist.empty()) { Instruction *Curr = Worklist.pop_back_val(); for (Use &OI : Curr->operands()) { if (Instruction *Inst = dyn_cast<Instruction>(OI)) if (Alive.insert(Inst).second) Worklist.push_back(Inst); } } // The inverse of the live set is the dead set. These are those instructions // which have no side effects and do not influence the control flow or return // value of the function, and may therefore be deleted safely. // NOTE: We reuse the Worklist vector here for memory efficiency. for (Instruction &I : inst_range(F)) { if (!Alive.count(&I)) { Worklist.push_back(&I); I.dropAllReferences(); } } for (Instruction *&I : Worklist) { ++NumRemoved; I->eraseFromParent(); } return !Worklist.empty(); } FunctionPass *llvm::createAggressiveDCEPass() { return new ADCE(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LowerAtomic.cpp
//===- LowerAtomic.cpp - Lower atomic intrinsics --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass lowers atomic intrinsics to non-atomic form for use in a known // non-preemptible environment. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Pass.h" using namespace llvm; #define DEBUG_TYPE "loweratomic" static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { IRBuilder<> Builder(CXI->getParent(), CXI); Value *Ptr = CXI->getPointerOperand(); Value *Cmp = CXI->getCompareOperand(); Value *Val = CXI->getNewValOperand(); LoadInst *Orig = Builder.CreateLoad(Ptr); Value *Equal = Builder.CreateICmpEQ(Orig, Cmp); Value *Res = Builder.CreateSelect(Equal, Val, Orig); Builder.CreateStore(Res, Ptr); Res = Builder.CreateInsertValue(UndefValue::get(CXI->getType()), Orig, 0); Res = Builder.CreateInsertValue(Res, Equal, 1); CXI->replaceAllUsesWith(Res); CXI->eraseFromParent(); return true; } static bool LowerAtomicRMWInst(AtomicRMWInst *RMWI) { IRBuilder<> Builder(RMWI->getParent(), RMWI); Value *Ptr = RMWI->getPointerOperand(); Value *Val = RMWI->getValOperand(); LoadInst *Orig = Builder.CreateLoad(Ptr); Value *Res = nullptr; switch (RMWI->getOperation()) { default: llvm_unreachable("Unexpected RMW operation"); case AtomicRMWInst::Xchg: Res = Val; break; case AtomicRMWInst::Add: Res = Builder.CreateAdd(Orig, Val); break; case AtomicRMWInst::Sub: Res = Builder.CreateSub(Orig, Val); break; case AtomicRMWInst::And: Res = Builder.CreateAnd(Orig, Val); break; case AtomicRMWInst::Nand: Res = Builder.CreateNot(Builder.CreateAnd(Orig, Val)); break; case AtomicRMWInst::Or: Res = Builder.CreateOr(Orig, Val); break; case AtomicRMWInst::Xor: Res = Builder.CreateXor(Orig, Val); break; case AtomicRMWInst::Max: Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Val), Val, Orig); break; case AtomicRMWInst::Min: Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Val), Orig, Val); break; case AtomicRMWInst::UMax: Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Val), Val, Orig); break; case AtomicRMWInst::UMin: Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Val), Orig, Val); break; } Builder.CreateStore(Res, Ptr); RMWI->replaceAllUsesWith(Orig); RMWI->eraseFromParent(); return true; } static bool LowerFenceInst(FenceInst *FI) { FI->eraseFromParent(); return true; } static bool LowerLoadInst(LoadInst *LI) { LI->setAtomic(NotAtomic); return true; } static bool LowerStoreInst(StoreInst *SI) { SI->setAtomic(NotAtomic); return true; } namespace { struct LowerAtomic : public BasicBlockPass { static char ID; LowerAtomic() : BasicBlockPass(ID) { initializeLowerAtomicPass(*PassRegistry::getPassRegistry()); } bool runOnBasicBlock(BasicBlock &BB) override { if (skipOptnoneFunction(BB)) return false; bool Changed = false; for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) { Instruction *Inst = DI++; if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) Changed |= LowerFenceInst(FI); else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Inst)) Changed |= LowerAtomicCmpXchgInst(CXI); else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst)) Changed |= LowerAtomicRMWInst(RMWI); else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { if (LI->isAtomic()) LowerLoadInst(LI); } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { if (SI->isAtomic()) LowerStoreInst(SI); } } return Changed; } }; } char LowerAtomic::ID = 0; INITIALIZE_PASS(LowerAtomic, "loweratomic", "Lower atomic intrinsics to non-atomic form", false, false) Pass *llvm::createLowerAtomicPass() { return new LowerAtomic(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
//===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass implements an idiom recognizer that transforms simple loops into a // non-loop form. In cases that this kicks in, it can be a significant // performance win. // //===----------------------------------------------------------------------===// // // TODO List: // // Future loop memory idioms to recognize: // memcmp, memmove, strlen, etc. // Future floating point idioms to recognize in -ffast-math mode: // fpowi // Future integer operation idioms to recognize: // ctpop, ctlz, cttz // // Beware that isel's default lowering for ctpop is highly inefficient for // i64 and larger types when i64 is legal and the value has few bits set. It // would be good to enhance isel to emit a loop for ctpop in this case. // // We should enhance the memset/memcpy recognition to handle multiple stores in // the loop. This would handle things like: // void foo(_Complex float *P) // for (i) { __real__(*P) = 0; __imag__(*P) = 0; } // // We should enhance this to handle negative strides through memory. // Alternatively (and perhaps better) we could rely on an earlier pass to force // forward iteration through memory, which is generally better for cache // behavior. Negative strides *do* happen for memset/memcpy loops. // // This could recognize common matrix multiplies and dot product idioms and // replace them with calls to BLAS (if linked in??). // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "loop-idiom" STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); namespace { class LoopIdiomRecognize; /// This class defines some utility functions for loop idiom recognization. class LIRUtil { public: /// Return true iff the block contains nothing but an uncondition branch /// (aka goto instruction). static bool isAlmostEmpty(BasicBlock *); static BranchInst *getBranch(BasicBlock *BB) { return dyn_cast<BranchInst>(BB->getTerminator()); } /// Derive the precondition block (i.e the block that guards the loop /// preheader) from the given preheader. static BasicBlock *getPrecondBb(BasicBlock *PreHead); }; /// This class is to recoginize idioms of population-count conducted in /// a noncountable loop. Currently it only recognizes this pattern: /// \code /// while(x) {cnt++; ...; x &= x - 1; ...} /// \endcode class NclPopcountRecognize { LoopIdiomRecognize &LIR; Loop *CurLoop; BasicBlock *PreCondBB; typedef IRBuilder<> IRBuilderTy; public: explicit NclPopcountRecognize(LoopIdiomRecognize &TheLIR); bool recognize(); private: /// Take a glimpse of the loop to see if we need to go ahead recoginizing /// the idiom. bool preliminaryScreen(); /// Check if the given conditional branch is based on the comparison /// between a variable and zero, and if the variable is non-zero, the /// control yields to the loop entry. If the branch matches the behavior, /// the variable involved in the comparion is returned. This function will /// be called to see if the precondition and postcondition of the loop /// are in desirable form. Value *matchCondition(BranchInst *Br, BasicBlock *NonZeroTarget) const; /// Return true iff the idiom is detected in the loop. and 1) \p CntInst /// is set to the instruction counting the population bit. 2) \p CntPhi /// is set to the corresponding phi node. 3) \p Var is set to the value /// whose population bits are being counted. bool detectIdiom (Instruction *&CntInst, PHINode *&CntPhi, Value *&Var) const; /// Insert ctpop intrinsic function and some obviously dead instructions. void transform(Instruction *CntInst, PHINode *CntPhi, Value *Var); /// Create llvm.ctpop.* intrinsic function. CallInst *createPopcntIntrinsic(IRBuilderTy &IRB, Value *Val, DebugLoc DL); }; class LoopIdiomRecognize : public LoopPass { Loop *CurLoop; DominatorTree *DT; ScalarEvolution *SE; TargetLibraryInfo *TLI; const TargetTransformInfo *TTI; public: static char ID; explicit LoopIdiomRecognize() : LoopPass(ID) { initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry()); DT = nullptr; SE = nullptr; TLI = nullptr; TTI = nullptr; } bool runOnLoop(Loop *L, LPPassManager &LPM) override; bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, SmallVectorImpl<BasicBlock*> &ExitBlocks); bool processLoopStore(StoreInst *SI, const SCEV *BECount); bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment, Value *SplatValue, Instruction *TheStore, const SCEVAddRecExpr *Ev, const SCEV *BECount); bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, const SCEVAddRecExpr *StoreEv, const SCEVAddRecExpr *LoadEv, const SCEV *BECount); /// This transformation requires natural loop information & requires that /// loop preheaders be inserted into the CFG. /// void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addRequiredID(LoopSimplifyID); AU.addPreservedID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); AU.addRequired<AliasAnalysis>(); AU.addPreserved<AliasAnalysis>(); AU.addRequired<ScalarEvolution>(); AU.addPreserved<ScalarEvolution>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); } DominatorTree *getDominatorTree() { return DT ? DT : (DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree()); } ScalarEvolution *getScalarEvolution() { return SE ? SE : (SE = &getAnalysis<ScalarEvolution>()); } TargetLibraryInfo *getTargetLibraryInfo() { if (!TLI) TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); return TLI; } const TargetTransformInfo *getTargetTransformInfo() { return TTI ? TTI : (TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( *CurLoop->getHeader()->getParent())); } Loop *getLoop() const { return CurLoop; } private: bool runOnNoncountableLoop(); bool runOnCountableLoop(); }; } char LoopIdiomRecognize::ID = 0; INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", false, false) Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); } /// deleteDeadInstruction - Delete this instruction. Before we do, go through /// and zero out all the operands of this instruction. If any of them become /// dead, delete them and the computation tree that feeds them. /// static void deleteDeadInstruction(Instruction *I, const TargetLibraryInfo *TLI) { SmallVector<Value *, 16> Operands(I->value_op_begin(), I->value_op_end()); I->replaceAllUsesWith(UndefValue::get(I->getType())); I->eraseFromParent(); for (Value *Op : Operands) RecursivelyDeleteTriviallyDeadInstructions(Op, TLI); } //===----------------------------------------------------------------------===// // // Implementation of LIRUtil // //===----------------------------------------------------------------------===// // This function will return true iff the given block contains nothing but goto. // A typical usage of this function is to check if the preheader function is // "almost" empty such that generated intrinsic functions can be moved across // the preheader and be placed at the end of the precondition block without // the concern of breaking data dependence. bool LIRUtil::isAlmostEmpty(BasicBlock *BB) { if (BranchInst *Br = getBranch(BB)) { return Br->isUnconditional() && Br == BB->begin(); } return false; } BasicBlock *LIRUtil::getPrecondBb(BasicBlock *PreHead) { if (BasicBlock *BB = PreHead->getSinglePredecessor()) { BranchInst *Br = getBranch(BB); return Br && Br->isConditional() ? BB : nullptr; } return nullptr; } //===----------------------------------------------------------------------===// // // Implementation of NclPopcountRecognize // //===----------------------------------------------------------------------===// NclPopcountRecognize::NclPopcountRecognize(LoopIdiomRecognize &TheLIR): LIR(TheLIR), CurLoop(TheLIR.getLoop()), PreCondBB(nullptr) { } bool NclPopcountRecognize::preliminaryScreen() { const TargetTransformInfo *TTI = LIR.getTargetTransformInfo(); if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) return false; // Counting population are usually conducted by few arithmetic instructions. // Such instructions can be easilly "absorbed" by vacant slots in a // non-compact loop. Therefore, recognizing popcount idiom only makes sense // in a compact loop. // Give up if the loop has multiple blocks or multiple backedges. if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) return false; BasicBlock *LoopBody = *(CurLoop->block_begin()); if (LoopBody->size() >= 20) { // The loop is too big, bail out. return false; } // It should have a preheader containing nothing but a goto instruction. BasicBlock *PreHead = CurLoop->getLoopPreheader(); if (!PreHead || !LIRUtil::isAlmostEmpty(PreHead)) return false; // It should have a precondition block where the generated popcount instrinsic // function will be inserted. PreCondBB = LIRUtil::getPrecondBb(PreHead); if (!PreCondBB) return false; return true; } Value *NclPopcountRecognize::matchCondition(BranchInst *Br, BasicBlock *LoopEntry) const { if (!Br || !Br->isConditional()) return nullptr; ICmpInst *Cond = dyn_cast<ICmpInst>(Br->getCondition()); if (!Cond) return nullptr; ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); if (!CmpZero || !CmpZero->isZero()) return nullptr; ICmpInst::Predicate Pred = Cond->getPredicate(); if ((Pred == ICmpInst::ICMP_NE && Br->getSuccessor(0) == LoopEntry) || (Pred == ICmpInst::ICMP_EQ && Br->getSuccessor(1) == LoopEntry)) return Cond->getOperand(0); return nullptr; } bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst, PHINode *&CntPhi, Value *&Var) const { // Following code tries to detect this idiom: // // if (x0 != 0) // goto loop-exit // the precondition of the loop // cnt0 = init-val; // do { // x1 = phi (x0, x2); // cnt1 = phi(cnt0, cnt2); // // cnt2 = cnt1 + 1; // ... // x2 = x1 & (x1 - 1); // ... // } while(x != 0); // // loop-exit: // // step 1: Check to see if the look-back branch match this pattern: // "if (a!=0) goto loop-entry". BasicBlock *LoopEntry; Instruction *DefX2, *CountInst; Value *VarX1, *VarX0; PHINode *PhiX, *CountPhi; DefX2 = CountInst = nullptr; VarX1 = VarX0 = nullptr; PhiX = CountPhi = nullptr; LoopEntry = *(CurLoop->block_begin()); // step 1: Check if the loop-back branch is in desirable form. { if (Value *T = matchCondition (LIRUtil::getBranch(LoopEntry), LoopEntry)) DefX2 = dyn_cast<Instruction>(T); else return false; } // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" { if (!DefX2 || DefX2->getOpcode() != Instruction::And) return false; BinaryOperator *SubOneOp; if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) VarX1 = DefX2->getOperand(1); else { VarX1 = DefX2->getOperand(0); SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); } if (!SubOneOp) return false; Instruction *SubInst = cast<Instruction>(SubOneOp); ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1)); if (!Dec || !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) || (SubInst->getOpcode() == Instruction::Add && Dec->isAllOnesValue()))) { return false; } } // step 3: Check the recurrence of variable X { PhiX = dyn_cast<PHINode>(VarX1); if (!PhiX || (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) { return false; } } // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 { CountInst = nullptr; for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI(), IterE = LoopEntry->end(); Iter != IterE; Iter++) { Instruction *Inst = Iter; if (Inst->getOpcode() != Instruction::Add) continue; ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); if (!Inc || !Inc->isOne()) continue; PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0)); if (!Phi || Phi->getParent() != LoopEntry) continue; // Check if the result of the instruction is live of the loop. bool LiveOutLoop = false; for (User *U : Inst->users()) { if ((cast<Instruction>(U))->getParent() != LoopEntry) { LiveOutLoop = true; break; } } if (LiveOutLoop) { CountInst = Inst; CountPhi = Phi; break; } } if (!CountInst) return false; } // step 5: check if the precondition is in this form: // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" { BranchInst *PreCondBr = LIRUtil::getBranch(PreCondBB); Value *T = matchCondition (PreCondBr, CurLoop->getLoopPreheader()); if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) return false; CntInst = CountInst; CntPhi = CountPhi; Var = T; } return true; } void NclPopcountRecognize::transform(Instruction *CntInst, PHINode *CntPhi, Value *Var) { ScalarEvolution *SE = LIR.getScalarEvolution(); TargetLibraryInfo *TLI = LIR.getTargetLibraryInfo(); BasicBlock *PreHead = CurLoop->getLoopPreheader(); BranchInst *PreCondBr = LIRUtil::getBranch(PreCondBB); const DebugLoc DL = CntInst->getDebugLoc(); // Assuming before transformation, the loop is following: // if (x) // the precondition // do { cnt++; x &= x - 1; } while(x); // Step 1: Insert the ctpop instruction at the end of the precondition block IRBuilderTy Builder(PreCondBr); Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; { PopCnt = createPopcntIntrinsic(Builder, Var, DL); NewCount = PopCntZext = Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); if (NewCount != PopCnt) (cast<Instruction>(NewCount))->setDebugLoc(DL); // TripCnt is exactly the number of iterations the loop has TripCnt = NewCount; // If the population counter's initial value is not zero, insert Add Inst. Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); if (!InitConst || !InitConst->isZero()) { NewCount = Builder.CreateAdd(NewCount, CntInitVal); (cast<Instruction>(NewCount))->setDebugLoc(DL); } } // Step 2: Replace the precondition from "if(x == 0) goto loop-exit" to // "if(NewCount == 0) loop-exit". Withtout this change, the intrinsic // function would be partial dead code, and downstream passes will drag // it back from the precondition block to the preheader. { ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); Value *Opnd0 = PopCntZext; Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); if (PreCond->getOperand(0) != Var) std::swap(Opnd0, Opnd1); ICmpInst *NewPreCond = cast<ICmpInst>(Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); PreCondBr->setCondition(NewPreCond); RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); } // Step 3: Note that the population count is exactly the trip count of the // loop in question, which enble us to to convert the loop from noncountable // loop into a countable one. The benefit is twofold: // // - If the loop only counts population, the entire loop become dead after // the transformation. It is lots easier to prove a countable loop dead // than to prove a noncountable one. (In some C dialects, a infite loop // isn't dead even if it computes nothing useful. In general, DCE needs // to prove a noncountable loop finite before safely delete it.) // // - If the loop also performs something else, it remains alive. // Since it is transformed to countable form, it can be aggressively // optimized by some optimizations which are in general not applicable // to a noncountable loop. // // After this step, this loop (conceptually) would look like following: // newcnt = __builtin_ctpop(x); // t = newcnt; // if (x) // do { cnt++; x &= x-1; t--) } while (t > 0); BasicBlock *Body = *(CurLoop->block_begin()); { BranchInst *LbBr = LIRUtil::getBranch(Body); ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); Type *Ty = TripCnt->getType(); PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", Body->begin()); Builder.SetInsertPoint(LbCond); Value *Opnd1 = cast<Value>(TcPhi); Value *Opnd2 = cast<Value>(ConstantInt::get(Ty, 1)); Instruction *TcDec = cast<Instruction>(Builder.CreateSub(Opnd1, Opnd2, "tcdec", false, true)); TcPhi->addIncoming(TripCnt, PreHead); TcPhi->addIncoming(TcDec, Body); CmpInst::Predicate Pred = (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; LbCond->setPredicate(Pred); LbCond->setOperand(0, TcDec); LbCond->setOperand(1, cast<Value>(ConstantInt::get(Ty, 0))); } // Step 4: All the references to the original population counter outside // the loop are replaced with the NewCount -- the value returned from // __builtin_ctpop(). CntInst->replaceUsesOutsideBlock(NewCount, Body); // step 5: Forget the "non-computable" trip-count SCEV associated with the // loop. The loop would otherwise not be deleted even if it becomes empty. SE->forgetLoop(CurLoop); } CallInst *NclPopcountRecognize::createPopcntIntrinsic(IRBuilderTy &IRBuilder, Value *Val, DebugLoc DL) { Value *Ops[] = { Val }; Type *Tys[] = { Val->getType() }; Module *M = (*(CurLoop->block_begin()))->getParent()->getParent(); Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); CallInst *CI = IRBuilder.CreateCall(Func, Ops); CI->setDebugLoc(DL); return CI; } /// recognize - detect population count idiom in a non-countable loop. If /// detected, transform the relevant code to popcount intrinsic function /// call, and return true; otherwise, return false. bool NclPopcountRecognize::recognize() { if (!LIR.getTargetTransformInfo()) return false; LIR.getScalarEvolution(); if (!preliminaryScreen()) return false; Instruction *CntInst; PHINode *CntPhi; Value *Val; if (!detectIdiom(CntInst, CntPhi, Val)) return false; transform(CntInst, CntPhi, Val); return true; } //===----------------------------------------------------------------------===// // // Implementation of LoopIdiomRecognize // //===----------------------------------------------------------------------===// bool LoopIdiomRecognize::runOnCountableLoop() { const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); assert(!isa<SCEVCouldNotCompute>(BECount) && "runOnCountableLoop() called on a loop without a predictable" "backedge-taken count"); // If this loop executes exactly one time, then it should be peeled, not // optimized by this pass. if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) if (BECst->getValue()->getValue() == 0) return false; // set DT (void)getDominatorTree(); LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); // set TLI (void)getTargetLibraryInfo(); SmallVector<BasicBlock*, 8> ExitBlocks; CurLoop->getUniqueExitBlocks(ExitBlocks); DEBUG(dbgs() << "loop-idiom Scanning: F[" << CurLoop->getHeader()->getParent()->getName() << "] Loop %" << CurLoop->getHeader()->getName() << "\n"); bool MadeChange = false; // Scan all the blocks in the loop that are not in subloops. for (auto *BB : CurLoop->getBlocks()) { // Ignore blocks in subloops. if (LI.getLoopFor(BB) != CurLoop) continue; MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); } return MadeChange; } bool LoopIdiomRecognize::runOnNoncountableLoop() { NclPopcountRecognize Popcount(*this); if (Popcount.recognize()) return true; return false; } bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; CurLoop = L; // If the loop could not be converted to canonical form, it must have an // indirectbr in it, just give up. if (!L->getLoopPreheader()) return false; // Disable loop idiom recognition if the function's name is a common idiom. StringRef Name = L->getHeader()->getParent()->getName(); if (Name == "memset" || Name == "memcpy") return false; SE = &getAnalysis<ScalarEvolution>(); if (SE->hasLoopInvariantBackedgeTakenCount(L)) return runOnCountableLoop(); return runOnNoncountableLoop(); } /// runOnLoopBlock - Process the specified block, which lives in a counted loop /// with the specified backedge count. This block is known to be in the current /// loop and not in any subloops. bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, SmallVectorImpl<BasicBlock*> &ExitBlocks) { // We can only promote stores in this block if they are unconditionally // executed in the loop. For a block to be unconditionally executed, it has // to dominate all the exit blocks of the loop. Verify this now. for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) if (!DT->dominates(BB, ExitBlocks[i])) return false; bool MadeChange = false; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { Instruction *Inst = I++; // Look for store instructions, which may be optimized to memset/memcpy. if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { WeakTrackingVH InstPtr(I); if (!processLoopStore(SI, BECount)) continue; MadeChange = true; // If processing the store invalidated our iterator, start over from the // top of the block. if (!InstPtr) I = BB->begin(); continue; } // Look for memset instructions, which may be optimized to a larger memset. if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { WeakTrackingVH InstPtr(I); if (!processLoopMemSet(MSI, BECount)) continue; MadeChange = true; // If processing the memset invalidated our iterator, start over from the // top of the block. if (!InstPtr) I = BB->begin(); continue; } } return MadeChange; } /// processLoopStore - See if this store can be promoted to a memset or memcpy. bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) { if (!SI->isSimple()) return false; Value *StoredVal = SI->getValueOperand(); Value *StorePtr = SI->getPointerOperand(); // Reject stores that are so large that they overflow an unsigned. auto &DL = CurLoop->getHeader()->getModule()->getDataLayout(); uint64_t SizeInBits = DL.getTypeSizeInBits(StoredVal->getType()); if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) return false; // See if the pointer expression is an AddRec like {base,+,1} on the current // loop, which indicates a strided store. If we have something else, it's a // random store we can't handle. const SCEVAddRecExpr *StoreEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) return false; // Check to see if the stride matches the size of the store. If so, then we // know that every byte is touched in the loop. unsigned StoreSize = (unsigned)SizeInBits >> 3; const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1)); if (!Stride || StoreSize != Stride->getValue()->getValue()) { // TODO: Could also handle negative stride here someday, that will require // the validity check in mayLoopAccessLocation to be updated though. // Enable this to print exact negative strides. #if 0 // HLSL Change - suppress '0 &&' warning if (0 && Stride && StoreSize == -Stride->getValue()->getValue()) { dbgs() << "NEGATIVE STRIDE: " << *SI << "\n"; dbgs() << "BB: " << *SI->getParent(); } #endif // HLSL Change - suppress '0 &&' warning return false; } // See if we can optimize just this store in isolation. if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(), StoredVal, SI, StoreEv, BECount)) return true; // If the stored value is a strided load in the same loop with the same stride // this this may be transformable into a memcpy. This kicks in for stuff like // for (i) A[i] = B[i]; if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { const SCEVAddRecExpr *LoadEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0))); if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() && StoreEv->getOperand(1) == LoadEv->getOperand(1) && LI->isSimple()) if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount)) return true; } //errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n"; return false; } /// processLoopMemSet - See if this memset can be promoted to a large memset. bool LoopIdiomRecognize:: processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) { // We can only handle non-volatile memsets with a constant size. if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) return false; // If we're not allowed to hack on memset, we fail. if (!TLI->has(LibFunc::memset)) return false; Value *Pointer = MSI->getDest(); // See if the pointer expression is an AddRec like {base,+,1} on the current // loop, which indicates a strided store. If we have something else, it's a // random store we can't handle. const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) return false; // Reject memsets that are so large that they overflow an unsigned. uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); if ((SizeInBytes >> 32) != 0) return false; // Check to see if the stride matches the size of the memset. If so, then we // know that every byte is touched in the loop. const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); // TODO: Could also handle negative stride here someday, that will require the // validity check in mayLoopAccessLocation to be updated though. if (!Stride || MSI->getLength() != Stride->getValue()) return false; return processLoopStridedStore(Pointer, (unsigned)SizeInBytes, MSI->getAlignment(), MSI->getValue(), MSI, Ev, BECount); } /// mayLoopAccessLocation - Return true if the specified loop might access the /// specified pointer location, which is a loop-strided access. The 'Access' /// argument specifies what the verboten forms of access are (read or write). static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access, Loop *L, const SCEV *BECount, unsigned StoreSize, AliasAnalysis &AA, Instruction *IgnoredStore) { // Get the location that may be stored across the loop. Since the access is // strided positively through memory, we say that the modified location starts // at the pointer and has infinite size. uint64_t AccessSize = MemoryLocation::UnknownSize; // If the loop iterates a fixed number of times, we can refine the access size // to be exactly the size of the memset, which is (BECount+1)*StoreSize if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) AccessSize = (BECst->getValue()->getZExtValue()+1)*StoreSize; // TODO: For this to be really effective, we have to dive into the pointer // operand in the store. Store to &A[i] of 100 will always return may alias // with store of &A[100], we need to StoreLoc to be "A" with size of 100, // which will then no-alias a store to &A[100]. MemoryLocation StoreLoc(Ptr, AccessSize); for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; ++BI) for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I) if (&*I != IgnoredStore && (AA.getModRefInfo(I, StoreLoc) & Access)) return true; return false; } /// getMemSetPatternValue - If a strided store of the specified value is safe to /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should /// be passed in. Otherwise, return null. /// /// Note that we don't ever attempt to use memset_pattern8 or 4, because these /// just replicate their input array and then pass on to memset_pattern16. static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) { // If the value isn't a constant, we can't promote it to being in a constant // array. We could theoretically do a store to an alloca or something, but // that doesn't seem worthwhile. Constant *C = dyn_cast<Constant>(V); if (!C) return nullptr; // Only handle simple values that are a power of two bytes in size. uint64_t Size = DL.getTypeSizeInBits(V->getType()); if (Size == 0 || (Size & 7) || (Size & (Size-1))) return nullptr; // Don't care enough about darwin/ppc to implement this. if (DL.isBigEndian()) return nullptr; // Convert to size in bytes. Size /= 8; // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see // if the top and bottom are the same (e.g. for vectors and large integers). if (Size > 16) return nullptr; // If the constant is exactly 16 bytes, just use it. if (Size == 16) return C; // Otherwise, we'll use an array of the constants. unsigned ArraySize = 16/Size; ArrayType *AT = ArrayType::get(V->getType(), ArraySize); return ConstantArray::get(AT, std::vector<Constant*>(ArraySize, C)); } /// processLoopStridedStore - We see a strided store of some value. If we can /// transform this into a memset or memset_pattern in the loop preheader, do so. bool LoopIdiomRecognize:: processLoopStridedStore(Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment, Value *StoredVal, Instruction *TheStore, const SCEVAddRecExpr *Ev, const SCEV *BECount) { // If the stored value is a byte-wise value (like i32 -1), then it may be // turned into a memset of i8 -1, assuming that all the consecutive bytes // are stored. A store of i32 0x01020304 can never be turned into a memset, // but it can be turned into memset_pattern if the target supports it. Value *SplatValue = isBytewiseValue(StoredVal); Constant *PatternValue = nullptr; auto &DL = CurLoop->getHeader()->getModule()->getDataLayout(); unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); // If we're allowed to form a memset, and the stored value would be acceptable // for memset, use it. if (SplatValue && TLI->has(LibFunc::memset) && // Verify that the stored value is loop invariant. If not, we can't // promote the memset. CurLoop->isLoopInvariant(SplatValue)) { // Keep and use SplatValue. PatternValue = nullptr; } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) && (PatternValue = getMemSetPatternValue(StoredVal, DL))) { // Don't create memset_pattern16s with address spaces. // It looks like we can use PatternValue! SplatValue = nullptr; } else { // Otherwise, this isn't an idiom we can transform. For example, we can't // do anything with a 3-byte store. return false; } // The trip count of the loop and the base pointer of the addrec SCEV is // guaranteed to be loop invariant, which means that it should dominate the // header. This allows us to insert code for it in the preheader. BasicBlock *Preheader = CurLoop->getLoopPreheader(); IRBuilder<> Builder(Preheader->getTerminator()); SCEVExpander Expander(*SE, DL, "loop-idiom"); Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); // Okay, we have a strided store "p[i]" of a splattable value. We can turn // this into a memset in the loop preheader now if we want. However, this // would be unsafe to do if there is anything else in the loop that may read // or write to the aliased location. Check for any overlap by generating the // base pointer and checking the region. Value *BasePtr = Expander.expandCodeFor(Ev->getStart(), DestInt8PtrTy, Preheader->getTerminator()); if (mayLoopAccessLocation(BasePtr, AliasAnalysis::ModRef, CurLoop, BECount, StoreSize, getAnalysis<AliasAnalysis>(), TheStore)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); return false; } // Okay, everything looks good, insert the memset. // The # stored bytes is (BECount+1)*Size. Expand the trip count out to // pointer size if it isn't already. Type *IntPtr = Builder.getIntPtrTy(DL, DestAS); BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), SCEV::FlagNUW); if (StoreSize != 1) { NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), SCEV::FlagNUW); } Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); CallInst *NewCall; if (SplatValue) { NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment); } else { // Everything is emitted in default address space Type *Int8PtrTy = DestInt8PtrTy; Module *M = TheStore->getParent()->getParent()->getParent(); Value *MSP = M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(), Int8PtrTy, Int8PtrTy, IntPtr, (void*)nullptr); // Otherwise we should form a memset_pattern16. PatternValue is known to be // an constant array of 16-bytes. Plop the value into a mergable global. GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, GlobalValue::PrivateLinkage, PatternValue, ".memset_pattern"); GV->setUnnamedAddr(true); // Ok to merge these. GV->setAlignment(16); Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); } DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" << " from store to: " << *Ev << " at: " << *TheStore << "\n"); NewCall->setDebugLoc(TheStore->getDebugLoc()); // Okay, the memset has been formed. Zap the original store and anything that // feeds into it. deleteDeadInstruction(TheStore, TLI); ++NumMemSet; return true; } /// processLoopStoreOfLoopLoad - We see a strided store whose value is a /// same-strided load. bool LoopIdiomRecognize:: processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, const SCEVAddRecExpr *StoreEv, const SCEVAddRecExpr *LoadEv, const SCEV *BECount) { // If we're not allowed to form memcpy, we fail. if (!TLI->has(LibFunc::memcpy)) return false; LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); // The trip count of the loop and the base pointer of the addrec SCEV is // guaranteed to be loop invariant, which means that it should dominate the // header. This allows us to insert code for it in the preheader. BasicBlock *Preheader = CurLoop->getLoopPreheader(); IRBuilder<> Builder(Preheader->getTerminator()); const DataLayout &DL = Preheader->getModule()->getDataLayout(); SCEVExpander Expander(*SE, DL, "loop-idiom"); // Okay, we have a strided store "p[i]" of a loaded value. We can turn // this into a memcpy in the loop preheader now if we want. However, this // would be unsafe to do if there is anything else in the loop that may read // or write the memory region we're storing to. This includes the load that // feeds the stores. Check for an alias by generating the base address and // checking everything. Value *StoreBasePtr = Expander.expandCodeFor(StoreEv->getStart(), Builder.getInt8PtrTy(SI->getPointerAddressSpace()), Preheader->getTerminator()); if (mayLoopAccessLocation(StoreBasePtr, AliasAnalysis::ModRef, CurLoop, BECount, StoreSize, getAnalysis<AliasAnalysis>(), SI)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); return false; } // For a memcpy, we have to make sure that the input array is not being // mutated by the loop. Value *LoadBasePtr = Expander.expandCodeFor(LoadEv->getStart(), Builder.getInt8PtrTy(LI->getPointerAddressSpace()), Preheader->getTerminator()); if (mayLoopAccessLocation(LoadBasePtr, AliasAnalysis::Mod, CurLoop, BECount, StoreSize, getAnalysis<AliasAnalysis>(), SI)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI); RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); return false; } // Okay, everything is safe, we can transform this! // The # stored bytes is (BECount+1)*Size. Expand the trip count out to // pointer size if it isn't already. Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace()); BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy); const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1), SCEV::FlagNUW); if (StoreSize != 1) NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize), SCEV::FlagNUW); Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); CallInst *NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, std::min(SI->getAlignment(), LI->getAlignment())); NewCall->setDebugLoc(SI->getDebugLoc()); DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" << " from store ptr=" << *StoreEv << " at: " << *SI << "\n"); // Okay, the memset has been formed. Zap the original store and anything that // feeds into it. deleteDeadInstruction(SI, TLI); ++NumMemCpy; return true; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopDistribute.cpp
//===- LoopDistribute.cpp - Loop Distribution Pass ------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Loop Distribution Pass. Its main focus is to // distribute loops that cannot be vectorized due to dependence cycles. It // tries to isolate the offending dependences into a new loop allowing // vectorization of the remaining parts. // // For dependence analysis, the pass uses the LoopVectorizer's // LoopAccessAnalysis. Because this analysis presumes no change in the order of // memory operations, special care is taken to preserve the lexical order of // these operations. // // Similarly to the Vectorizer, the pass also supports loop versioning to // run-time disambiguate potentially overlapping arrays. // //===----------------------------------------------------------------------===// #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/EquivalenceClasses.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/LoopVersioning.h" #include <list> #define LDIST_NAME "loop-distribute" #define DEBUG_TYPE LDIST_NAME using namespace llvm; #if 0 // HLSL Change Starts - option pending static cl::opt<bool> LDistVerify("loop-distribute-verify", cl::Hidden, cl::desc("Turn on DominatorTree and LoopInfo verification " "after Loop Distribution"), cl::init(false)); static cl::opt<bool> DistributeNonIfConvertible( "loop-distribute-non-if-convertible", cl::Hidden, cl::desc("Whether to distribute into a loop that may not be " "if-convertible by the loop vectorizer"), cl::init(false)); #else static const bool LDistVerify = false; static const bool DistributeNonIfConvertible = false; #endif STATISTIC(NumLoopsDistributed, "Number of loops distributed"); namespace { /// \brief Maintains the set of instructions of the loop for a partition before /// cloning. After cloning, it hosts the new loop. class InstPartition { typedef SmallPtrSet<Instruction *, 8> InstructionSet; public: InstPartition(Instruction *I, Loop *L, bool DepCycle = false) : DepCycle(DepCycle), OrigLoop(L), ClonedLoop(nullptr) { Set.insert(I); } /// \brief Returns whether this partition contains a dependence cycle. bool hasDepCycle() const { return DepCycle; } /// \brief Adds an instruction to this partition. void add(Instruction *I) { Set.insert(I); } /// \brief Collection accessors. InstructionSet::iterator begin() { return Set.begin(); } InstructionSet::iterator end() { return Set.end(); } InstructionSet::const_iterator begin() const { return Set.begin(); } InstructionSet::const_iterator end() const { return Set.end(); } bool empty() const { return Set.empty(); } /// \brief Moves this partition into \p Other. This partition becomes empty /// after this. void moveTo(InstPartition &Other) { Other.Set.insert(Set.begin(), Set.end()); Set.clear(); Other.DepCycle |= DepCycle; } /// \brief Populates the partition with a transitive closure of all the /// instructions that the seeded instructions dependent on. void populateUsedSet() { // FIXME: We currently don't use control-dependence but simply include all // blocks (possibly empty at the end) and let simplifycfg mostly clean this // up. for (auto *B : OrigLoop->getBlocks()) Set.insert(B->getTerminator()); // Follow the use-def chains to form a transitive closure of all the // instructions that the originally seeded instructions depend on. SmallVector<Instruction *, 8> Worklist(Set.begin(), Set.end()); while (!Worklist.empty()) { Instruction *I = Worklist.pop_back_val(); // Insert instructions from the loop that we depend on. for (Value *V : I->operand_values()) { auto *I = dyn_cast<Instruction>(V); if (I && OrigLoop->contains(I->getParent()) && Set.insert(I).second) Worklist.push_back(I); } } } /// \brief Clones the original loop. /// /// Updates LoopInfo and DominatorTree using the information that block \p /// LoopDomBB dominates the loop. Loop *cloneLoopWithPreheader(BasicBlock *InsertBefore, BasicBlock *LoopDomBB, unsigned Index, LoopInfo *LI, DominatorTree *DT) { ClonedLoop = ::cloneLoopWithPreheader(InsertBefore, LoopDomBB, OrigLoop, VMap, Twine(".ldist") + Twine(Index), LI, DT, ClonedLoopBlocks); return ClonedLoop; } /// \brief The cloned loop. If this partition is mapped to the original loop, /// this is null. const Loop *getClonedLoop() const { return ClonedLoop; } /// \brief Returns the loop where this partition ends up after distribution. /// If this partition is mapped to the original loop then use the block from /// the loop. const Loop *getDistributedLoop() const { return ClonedLoop ? ClonedLoop : OrigLoop; } /// \brief The VMap that is populated by cloning and then used in /// remapinstruction to remap the cloned instructions. ValueToValueMapTy &getVMap() { return VMap; } /// \brief Remaps the cloned instructions using VMap. void remapInstructions() { remapInstructionsInBlocks(ClonedLoopBlocks, VMap); } /// \brief Based on the set of instructions selected for this partition, /// removes the unnecessary ones. void removeUnusedInsts() { SmallVector<Instruction *, 8> Unused; for (auto *Block : OrigLoop->getBlocks()) for (auto &Inst : *Block) if (!Set.count(&Inst)) { Instruction *NewInst = &Inst; if (!VMap.empty()) NewInst = cast<Instruction>(VMap[NewInst]); assert(!isa<BranchInst>(NewInst) && "Branches are marked used early on"); Unused.push_back(NewInst); } // Delete the instructions backwards, as it has a reduced likelihood of // having to update as many def-use and use-def chains. for (auto I = Unused.rbegin(), E = Unused.rend(); I != E; ++I) { auto *Inst = *I; if (!Inst->use_empty()) Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); Inst->eraseFromParent(); } } void print() const { if (DepCycle) dbgs() << " (cycle)\n"; for (auto *I : Set) // Prefix with the block name. dbgs() << " " << I->getParent()->getName() << ":" << *I << "\n"; } void printBlocks() const { for (auto *BB : getDistributedLoop()->getBlocks()) dbgs() << *BB; } private: /// \brief Instructions from OrigLoop selected for this partition. InstructionSet Set; /// \brief Whether this partition contains a dependence cycle. bool DepCycle; /// \brief The original loop. Loop *OrigLoop; /// \brief The cloned loop. If this partition is mapped to the original loop, /// this is null. Loop *ClonedLoop; /// \brief The blocks of ClonedLoop including the preheader. If this /// partition is mapped to the original loop, this is empty. SmallVector<BasicBlock *, 8> ClonedLoopBlocks; /// \brief These gets populated once the set of instructions have been /// finalized. If this partition is mapped to the original loop, these are not /// set. ValueToValueMapTy VMap; }; /// \brief Holds the set of Partitions. It populates them, merges them and then /// clones the loops. class InstPartitionContainer { typedef DenseMap<Instruction *, int> InstToPartitionIdT; public: InstPartitionContainer(Loop *L, LoopInfo *LI, DominatorTree *DT) : L(L), LI(LI), DT(DT) {} /// \brief Returns the number of partitions. unsigned getSize() const { return PartitionContainer.size(); } /// \brief Adds \p Inst into the current partition if that is marked to /// contain cycles. Otherwise start a new partition for it. void addToCyclicPartition(Instruction *Inst) { // If the current partition is non-cyclic. Start a new one. if (PartitionContainer.empty() || !PartitionContainer.back().hasDepCycle()) PartitionContainer.emplace_back(Inst, L, /*DepCycle=*/true); else PartitionContainer.back().add(Inst); } /// \brief Adds \p Inst into a partition that is not marked to contain /// dependence cycles. /// // Initially we isolate memory instructions into as many partitions as // possible, then later we may merge them back together. void addToNewNonCyclicPartition(Instruction *Inst) { PartitionContainer.emplace_back(Inst, L); } /// \brief Merges adjacent non-cyclic partitions. /// /// The idea is that we currently only want to isolate the non-vectorizable /// partition. We could later allow more distribution among these partition /// too. void mergeAdjacentNonCyclic() { mergeAdjacentPartitionsIf( [](const InstPartition *P) { return !P->hasDepCycle(); }); } /// \brief If a partition contains only conditional stores, we won't vectorize /// it. Try to merge it with a previous cyclic partition. void mergeNonIfConvertible() { mergeAdjacentPartitionsIf([&](const InstPartition *Partition) { if (Partition->hasDepCycle()) return true; // Now, check if all stores are conditional in this partition. bool seenStore = false; for (auto *Inst : *Partition) if (isa<StoreInst>(Inst)) { seenStore = true; if (!LoopAccessInfo::blockNeedsPredication(Inst->getParent(), L, DT)) return false; } return seenStore; }); } /// \brief Merges the partitions according to various heuristics. void mergeBeforePopulating() { mergeAdjacentNonCyclic(); if (!DistributeNonIfConvertible) mergeNonIfConvertible(); } /// \brief Merges partitions in order to ensure that no loads are duplicated. /// /// We can't duplicate loads because that could potentially reorder them. /// LoopAccessAnalysis provides dependency information with the context that /// the order of memory operation is preserved. /// /// Return if any partitions were merged. bool mergeToAvoidDuplicatedLoads() { typedef DenseMap<Instruction *, InstPartition *> LoadToPartitionT; typedef EquivalenceClasses<InstPartition *> ToBeMergedT; LoadToPartitionT LoadToPartition; ToBeMergedT ToBeMerged; // Step through the partitions and create equivalence between partitions // that contain the same load. Also put partitions in between them in the // same equivalence class to avoid reordering of memory operations. for (PartitionContainerT::iterator I = PartitionContainer.begin(), E = PartitionContainer.end(); I != E; ++I) { auto *PartI = &*I; // If a load occurs in two partitions PartI and PartJ, merge all // partitions (PartI, PartJ] into PartI. for (Instruction *Inst : *PartI) if (isa<LoadInst>(Inst)) { bool NewElt; LoadToPartitionT::iterator LoadToPart; std::tie(LoadToPart, NewElt) = LoadToPartition.insert(std::make_pair(Inst, PartI)); if (!NewElt) { DEBUG(dbgs() << "Merging partitions due to this load in multiple " << "partitions: " << PartI << ", " << LoadToPart->second << "\n" << *Inst << "\n"); auto PartJ = I; do { --PartJ; ToBeMerged.unionSets(PartI, &*PartJ); } while (&*PartJ != LoadToPart->second); } } } if (ToBeMerged.empty()) return false; // Merge the member of an equivalence class into its class leader. This // makes the members empty. for (ToBeMergedT::iterator I = ToBeMerged.begin(), E = ToBeMerged.end(); I != E; ++I) { if (!I->isLeader()) continue; auto PartI = I->getData(); for (auto PartJ : make_range(std::next(ToBeMerged.member_begin(I)), ToBeMerged.member_end())) { PartJ->moveTo(*PartI); } } // Remove the empty partitions. PartitionContainer.remove_if( [](const InstPartition &P) { return P.empty(); }); return true; } /// \brief Sets up the mapping between instructions to partitions. If the /// instruction is duplicated across multiple partitions, set the entry to -1. void setupPartitionIdOnInstructions() { int PartitionID = 0; for (const auto &Partition : PartitionContainer) { for (Instruction *Inst : Partition) { bool NewElt; InstToPartitionIdT::iterator Iter; std::tie(Iter, NewElt) = InstToPartitionId.insert(std::make_pair(Inst, PartitionID)); if (!NewElt) Iter->second = -1; } ++PartitionID; } } /// \brief Populates the partition with everything that the seeding /// instructions require. void populateUsedSet() { for (auto &P : PartitionContainer) P.populateUsedSet(); } /// \brief This performs the main chunk of the work of cloning the loops for /// the partitions. void cloneLoops(Pass *P) { BasicBlock *OrigPH = L->getLoopPreheader(); // At this point the predecessor of the preheader is either the memcheck // block or the top part of the original preheader. BasicBlock *Pred = OrigPH->getSinglePredecessor(); assert(Pred && "Preheader does not have a single predecessor"); BasicBlock *ExitBlock = L->getExitBlock(); assert(ExitBlock && "No single exit block"); Loop *NewLoop; assert(!PartitionContainer.empty() && "at least two partitions expected"); // We're cloning the preheader along with the loop so we already made sure // it was empty. assert(&*OrigPH->begin() == OrigPH->getTerminator() && "preheader not empty"); // Create a loop for each partition except the last. Clone the original // loop before PH along with adding a preheader for the cloned loop. Then // update PH to point to the newly added preheader. BasicBlock *TopPH = OrigPH; unsigned Index = getSize() - 1; for (auto I = std::next(PartitionContainer.rbegin()), E = PartitionContainer.rend(); I != E; ++I, --Index, TopPH = NewLoop->getLoopPreheader()) { auto *Part = &*I; NewLoop = Part->cloneLoopWithPreheader(TopPH, Pred, Index, LI, DT); Part->getVMap()[ExitBlock] = TopPH; Part->remapInstructions(); } Pred->getTerminator()->replaceUsesOfWith(OrigPH, TopPH); // Now go in forward order and update the immediate dominator for the // preheaders with the exiting block of the previous loop. Dominance // within the loop is updated in cloneLoopWithPreheader. for (auto Curr = PartitionContainer.cbegin(), Next = std::next(PartitionContainer.cbegin()), E = PartitionContainer.cend(); Next != E; ++Curr, ++Next) DT->changeImmediateDominator( Next->getDistributedLoop()->getLoopPreheader(), Curr->getDistributedLoop()->getExitingBlock()); } /// \brief Removes the dead instructions from the cloned loops. void removeUnusedInsts() { for (auto &Partition : PartitionContainer) Partition.removeUnusedInsts(); } /// \brief For each memory pointer, it computes the partitionId the pointer is /// used in. /// /// This returns an array of int where the I-th entry corresponds to I-th /// entry in LAI.getRuntimePointerCheck(). If the pointer is used in multiple /// partitions its entry is set to -1. SmallVector<int, 8> computePartitionSetForPointers(const LoopAccessInfo &LAI) { const RuntimePointerChecking *RtPtrCheck = LAI.getRuntimePointerChecking(); unsigned N = RtPtrCheck->Pointers.size(); SmallVector<int, 8> PtrToPartitions(N); for (unsigned I = 0; I < N; ++I) { Value *Ptr = RtPtrCheck->Pointers[I].PointerValue; auto Instructions = LAI.getInstructionsForAccess(Ptr, RtPtrCheck->Pointers[I].IsWritePtr); int &Partition = PtrToPartitions[I]; // First set it to uninitialized. Partition = -2; for (Instruction *Inst : Instructions) { // Note that this could be -1 if Inst is duplicated across multiple // partitions. int ThisPartition = this->InstToPartitionId[Inst]; if (Partition == -2) Partition = ThisPartition; // -1 means belonging to multiple partitions. else if (Partition == -1) break; else if (Partition != (int)ThisPartition) Partition = -1; } assert(Partition != -2 && "Pointer not belonging to any partition"); } return PtrToPartitions; } void print(raw_ostream &OS) const { unsigned Index = 0; for (const auto &P : PartitionContainer) { OS << "Partition " << Index++ << " (" << &P << "):\n"; P.print(); } } void dump() const { print(dbgs()); } #ifndef NDEBUG friend raw_ostream &operator<<(raw_ostream &OS, const InstPartitionContainer &Partitions) { Partitions.print(OS); return OS; } #endif void printBlocks() const { unsigned Index = 0; for (const auto &P : PartitionContainer) { dbgs() << "\nPartition " << Index++ << " (" << &P << "):\n"; P.printBlocks(); } } private: typedef std::list<InstPartition> PartitionContainerT; /// \brief List of partitions. PartitionContainerT PartitionContainer; /// \brief Mapping from Instruction to partition Id. If the instruction /// belongs to multiple partitions the entry contains -1. InstToPartitionIdT InstToPartitionId; Loop *L; LoopInfo *LI; DominatorTree *DT; /// \brief The control structure to merge adjacent partitions if both satisfy /// the \p Predicate. template <class UnaryPredicate> void mergeAdjacentPartitionsIf(UnaryPredicate Predicate) { InstPartition *PrevMatch = nullptr; for (auto I = PartitionContainer.begin(); I != PartitionContainer.end();) { auto DoesMatch = Predicate(&*I); if (PrevMatch == nullptr && DoesMatch) { PrevMatch = &*I; ++I; } else if (PrevMatch != nullptr && DoesMatch) { I->moveTo(*PrevMatch); I = PartitionContainer.erase(I); } else { PrevMatch = nullptr; ++I; } } } }; /// \brief For each memory instruction, this class maintains difference of the /// number of unsafe dependences that start out from this instruction minus /// those that end here. /// /// By traversing the memory instructions in program order and accumulating this /// number, we know whether any unsafe dependence crosses over a program point. class MemoryInstructionDependences { typedef MemoryDepChecker::Dependence Dependence; public: struct Entry { Instruction *Inst; unsigned NumUnsafeDependencesStartOrEnd; Entry(Instruction *Inst) : Inst(Inst), NumUnsafeDependencesStartOrEnd(0) {} }; typedef SmallVector<Entry, 8> AccessesType; AccessesType::const_iterator begin() const { return Accesses.begin(); } AccessesType::const_iterator end() const { return Accesses.end(); } MemoryInstructionDependences( const SmallVectorImpl<Instruction *> &Instructions, const SmallVectorImpl<Dependence> &InterestingDependences) { Accesses.append(Instructions.begin(), Instructions.end()); DEBUG(dbgs() << "Backward dependences:\n"); for (auto &Dep : InterestingDependences) if (Dep.isPossiblyBackward()) { // Note that the designations source and destination follow the program // order, i.e. source is always first. (The direction is given by the // DepType.) ++Accesses[Dep.Source].NumUnsafeDependencesStartOrEnd; --Accesses[Dep.Destination].NumUnsafeDependencesStartOrEnd; DEBUG(Dep.print(dbgs(), 2, Instructions)); } } private: AccessesType Accesses; }; /// \brief Returns the instructions that use values defined in the loop. static SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L) { SmallVector<Instruction *, 8> UsedOutside; for (auto *Block : L->getBlocks()) // FIXME: I believe that this could use copy_if if the Inst reference could // be adapted into a pointer. for (auto &Inst : *Block) { auto Users = Inst.users(); if (std::any_of(Users.begin(), Users.end(), [&](User *U) { auto *Use = cast<Instruction>(U); return !L->contains(Use->getParent()); })) UsedOutside.push_back(&Inst); } return UsedOutside; } /// \brief The pass class. class LoopDistribute : public FunctionPass { public: LoopDistribute() : FunctionPass(ID) { initializeLoopDistributePass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override { LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); LAA = &getAnalysis<LoopAccessAnalysis>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); // Build up a worklist of inner-loops to vectorize. This is necessary as the // act of distributing a loop creates new loops and can invalidate iterators // across the loops. SmallVector<Loop *, 8> Worklist; for (Loop *TopLevelLoop : *LI) for (Loop *L : depth_first(TopLevelLoop)) // We only handle inner-most loops. if (L->empty()) Worklist.push_back(L); // Now walk the identified inner loops. bool Changed = false; for (Loop *L : Worklist) Changed |= processLoop(L); // Process each loop nest in the function. return Changed; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addRequired<LoopAccessAnalysis>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); } static char ID; private: /// \brief Try to distribute an inner-most loop. bool processLoop(Loop *L) { assert(L->empty() && "Only process inner loops."); DEBUG(dbgs() << "\nLDist: In \"" << L->getHeader()->getParent()->getName() << "\" checking " << *L << "\n"); BasicBlock *PH = L->getLoopPreheader(); if (!PH) { DEBUG(dbgs() << "Skipping; no preheader"); return false; } if (!L->getExitBlock()) { DEBUG(dbgs() << "Skipping; multiple exit blocks"); return false; } // LAA will check that we only have a single exiting block. const LoopAccessInfo &LAI = LAA->getInfo(L, ValueToValueMap()); // Currently, we only distribute to isolate the part of the loop with // dependence cycles to enable partial vectorization. if (LAI.canVectorizeMemory()) { DEBUG(dbgs() << "Skipping; memory operations are safe for vectorization"); return false; } auto *InterestingDependences = LAI.getDepChecker().getInterestingDependences(); if (!InterestingDependences || InterestingDependences->empty()) { DEBUG(dbgs() << "Skipping; No unsafe dependences to isolate"); return false; } InstPartitionContainer Partitions(L, LI, DT); // First, go through each memory operation and assign them to consecutive // partitions (the order of partitions follows program order). Put those // with unsafe dependences into "cyclic" partition otherwise put each store // in its own "non-cyclic" partition (we'll merge these later). // // Note that a memory operation (e.g. Load2 below) at a program point that // has an unsafe dependence (Store3->Load1) spanning over it must be // included in the same cyclic partition as the dependent operations. This // is to preserve the original program order after distribution. E.g.: // // NumUnsafeDependencesStartOrEnd NumUnsafeDependencesActive // Load1 -. 1 0->1 // Load2 | /Unsafe/ 0 1 // Store3 -' -1 1->0 // Load4 0 0 // // NumUnsafeDependencesActive > 0 indicates this situation and in this case // we just keep assigning to the same cyclic partition until // NumUnsafeDependencesActive reaches 0. const MemoryDepChecker &DepChecker = LAI.getDepChecker(); MemoryInstructionDependences MID(DepChecker.getMemoryInstructions(), *InterestingDependences); int NumUnsafeDependencesActive = 0; for (auto &InstDep : MID) { Instruction *I = InstDep.Inst; // We update NumUnsafeDependencesActive post-instruction, catch the // start of a dependence directly via NumUnsafeDependencesStartOrEnd. if (NumUnsafeDependencesActive || InstDep.NumUnsafeDependencesStartOrEnd > 0) Partitions.addToCyclicPartition(I); else Partitions.addToNewNonCyclicPartition(I); NumUnsafeDependencesActive += InstDep.NumUnsafeDependencesStartOrEnd; assert(NumUnsafeDependencesActive >= 0 && "Negative number of dependences active"); } // Add partitions for values used outside. These partitions can be out of // order from the original program order. This is OK because if the // partition uses a load we will merge this partition with the original // partition of the load that we set up in the previous loop (see // mergeToAvoidDuplicatedLoads). auto DefsUsedOutside = findDefsUsedOutsideOfLoop(L); for (auto *Inst : DefsUsedOutside) Partitions.addToNewNonCyclicPartition(Inst); DEBUG(dbgs() << "Seeded partitions:\n" << Partitions); if (Partitions.getSize() < 2) return false; // Run the merge heuristics: Merge non-cyclic adjacent partitions since we // should be able to vectorize these together. Partitions.mergeBeforePopulating(); DEBUG(dbgs() << "\nMerged partitions:\n" << Partitions); if (Partitions.getSize() < 2) return false; // Now, populate the partitions with non-memory operations. Partitions.populateUsedSet(); DEBUG(dbgs() << "\nPopulated partitions:\n" << Partitions); // In order to preserve original lexical order for loads, keep them in the // partition that we set up in the MemoryInstructionDependences loop. if (Partitions.mergeToAvoidDuplicatedLoads()) { DEBUG(dbgs() << "\nPartitions merged to ensure unique loads:\n" << Partitions); if (Partitions.getSize() < 2) return false; } DEBUG(dbgs() << "\nDistributing loop: " << *L << "\n"); // We're done forming the partitions set up the reverse mapping from // instructions to partitions. Partitions.setupPartitionIdOnInstructions(); // To keep things simple have an empty preheader before we version or clone // the loop. (Also split if this has no predecessor, i.e. entry, because we // rely on PH having a predecessor.) if (!PH->getSinglePredecessor() || &*PH->begin() != PH->getTerminator()) SplitBlock(PH, PH->getTerminator(), DT, LI); // If we need run-time checks to disambiguate pointers are run-time, version // the loop now. auto PtrToPartition = Partitions.computePartitionSetForPointers(LAI); LoopVersioning LVer(LAI, L, LI, DT, &PtrToPartition); if (LVer.needsRuntimeChecks()) { DEBUG(dbgs() << "\nPointers:\n"); DEBUG(LAI.getRuntimePointerChecking()->print(dbgs(), 0, &PtrToPartition)); LVer.versionLoop(this); LVer.addPHINodes(DefsUsedOutside); } // Create identical copies of the original loop for each partition and hook // them up sequentially. Partitions.cloneLoops(this); // Now, we remove the instruction from each loop that don't belong to that // partition. Partitions.removeUnusedInsts(); DEBUG(dbgs() << "\nAfter removing unused Instrs:\n"); DEBUG(Partitions.printBlocks()); if (LDistVerify) { LI->verify(); DT->verifyDomTree(); } ++NumLoopsDistributed; return true; } // Analyses used. LoopInfo *LI; LoopAccessAnalysis *LAA; DominatorTree *DT; }; } // anonymous namespace char LoopDistribute::ID; static const char ldist_name[] = "Loop Distribition"; INITIALIZE_PASS_BEGIN(LoopDistribute, LDIST_NAME, ldist_name, false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(LoopDistribute, LDIST_NAME, ldist_name, false, false) namespace llvm { FunctionPass *createLoopDistributePass() { return new LoopDistribute(); } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/PlaceSafepoints.cpp
//===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Place garbage collection safepoints at appropriate locations in the IR. This // does not make relocation semantics or variable liveness explicit. That's // done by RewriteStatepointsForGC. // // Terminology: // - A call is said to be "parseable" if there is a stack map generated for the // return PC of the call. A runtime can determine where values listed in the // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located // on the stack when the code is suspended inside such a call. Every parse // point is represented by a call wrapped in an gc.statepoint intrinsic. // - A "poll" is an explicit check in the generated code to determine if the // runtime needs the generated code to cooperate by calling a helper routine // and thus suspending its execution at a known state. The call to the helper // routine will be parseable. The (gc & runtime specific) logic of a poll is // assumed to be provided in a function of the name "gc.safepoint_poll". // // We aim to insert polls such that running code can quickly be brought to a // well defined state for inspection by the collector. In the current // implementation, this is done via the insertion of poll sites at method entry // and the backedge of most loops. We try to avoid inserting more polls than // are neccessary to ensure a finite period between poll sites. This is not // because the poll itself is expensive in the generated code; it's not. Polls // do tend to impact the optimizer itself in negative ways; we'd like to avoid // perturbing the optimization of the method as much as we can. // // We also need to make most call sites parseable. The callee might execute a // poll (or otherwise be inspected by the GC). If so, the entire stack // (including the suspended frame of the current method) must be parseable. // // This pass will insert: // - Call parse points ("call safepoints") for any call which may need to // reach a safepoint during the execution of the callee function. // - Backedge safepoint polls and entry safepoint polls to ensure that // executing code reaches a safepoint poll in a finite amount of time. // // We do not currently support return statepoints, but adding them would not // be hard. They are not required for correctness - entry safepoints are an // alternative - but some GCs may prefer them. Patches welcome. // //===----------------------------------------------------------------------===// #include "llvm/Pass.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/Statepoint.h" #include "llvm/IR/Value.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/Debug.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #define DEBUG_TYPE "safepoint-placement" STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted"); STATISTIC(NumCallSafepoints, "Number of call safepoints inserted"); STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted"); STATISTIC(CallInLoop, "Number of loops w/o safepoints due to calls in loop"); STATISTIC(FiniteExecution, "Number of loops w/o safepoints finite execution"); using namespace llvm; // Ignore oppurtunities to avoid placing safepoints on backedges, useful for // validation static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden, cl::init(false)); /// If true, do not place backedge safepoints in counted loops. static cl::opt<bool> SkipCounted("spp-counted", cl::Hidden, cl::init(true)); // If true, split the backedge of a loop when placing the safepoint, otherwise // split the latch block itself. Both are useful to support for // experimentation, but in practice, it looks like splitting the backedge // optimizes better. static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden, cl::init(false)); // Print tracing output static cl::opt<bool> TraceLSP("spp-trace", cl::Hidden, cl::init(false)); namespace { /// An analysis pass whose purpose is to identify each of the backedges in /// the function which require a safepoint poll to be inserted. struct PlaceBackedgeSafepointsImpl : public FunctionPass { static char ID; /// The output of the pass - gives a list of each backedge (described by /// pointing at the branch) which need a poll inserted. std::vector<TerminatorInst *> PollLocations; /// True unless we're running spp-no-calls in which case we need to disable /// the call dependend placement opts. bool CallSafepointsEnabled; ScalarEvolution *SE = nullptr; DominatorTree *DT = nullptr; LoopInfo *LI = nullptr; PlaceBackedgeSafepointsImpl(bool CallSafepoints = false) : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) { initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry()); } bool runOnLoop(Loop *); void runOnLoopAndSubLoops(Loop *L) { // Visit all the subloops for (auto I = L->begin(), E = L->end(); I != E; I++) runOnLoopAndSubLoops(*I); runOnLoop(L); } bool runOnFunction(Function &F) override { SE = &getAnalysis<ScalarEvolution>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); for (auto I = LI->begin(), E = LI->end(); I != E; I++) { runOnLoopAndSubLoops(*I); } return false; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addRequired<LoopInfoWrapperPass>(); // We no longer modify the IR at all in this pass. Thus all // analysis are preserved. AU.setPreservesAll(); } }; } static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false)); static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false)); static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false)); namespace { struct PlaceSafepoints : public FunctionPass { static char ID; // Pass identification, replacement for typeid PlaceSafepoints() : FunctionPass(ID) { initializePlaceSafepointsPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { // We modify the graph wholesale (inlining, block insertion, etc). We // preserve nothing at the moment. We could potentially preserve dom tree // if that was worth doing } }; } // Insert a safepoint poll immediately before the given instruction. Does // not handle the parsability of state at the runtime call, that's the // callers job. static void InsertSafepointPoll(Instruction *InsertBefore, std::vector<CallSite> &ParsePointsNeeded /*rval*/); static bool isGCLeafFunction(const CallSite &CS); static bool needsStatepoint(const CallSite &CS) { if (isGCLeafFunction(CS)) return false; if (CS.isCall()) { CallInst *call = cast<CallInst>(CS.getInstruction()); if (call->isInlineAsm()) return false; } if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) { return false; } return true; } static Value *ReplaceWithStatepoint(const CallSite &CS, Pass *P); /// Returns true if this loop is known to contain a call safepoint which /// must unconditionally execute on any iteration of the loop which returns /// to the loop header via an edge from Pred. Returns a conservative correct /// answer; i.e. false is always valid. static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header, BasicBlock *Pred, DominatorTree &DT) { // In general, we're looking for any cut of the graph which ensures // there's a call safepoint along every edge between Header and Pred. // For the moment, we look only for the 'cuts' that consist of a single call // instruction in a block which is dominated by the Header and dominates the // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain // of such dominating blocks gets substaintially more occurences than just // checking the Pred and Header blocks themselves. This may be due to the // density of loop exit conditions caused by range and null checks. // TODO: structure this as an analysis pass, cache the result for subloops, // avoid dom tree recalculations assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?"); BasicBlock *Current = Pred; while (true) { for (Instruction &I : *Current) { if (auto CS = CallSite(&I)) // Note: Technically, needing a safepoint isn't quite the right // condition here. We should instead be checking if the target method // has an // unconditional poll. In practice, this is only a theoretical concern // since we don't have any methods with conditional-only safepoint // polls. if (needsStatepoint(CS)) return true; } if (Current == Header) break; Current = DT.getNode(Current)->getIDom()->getBlock(); } return false; } /// Returns true if this loop is known to terminate in a finite number of /// iterations. Note that this function may return false for a loop which /// does actual terminate in a finite constant number of iterations due to /// conservatism in the analysis. static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE, BasicBlock *Pred) { // Only used when SkipCounted is off const unsigned upperTripBound = 8192; // A conservative bound on the loop as a whole. const SCEV *MaxTrips = SE->getMaxBackedgeTakenCount(L); if (MaxTrips != SE->getCouldNotCompute()) { if (SE->getUnsignedRange(MaxTrips).getUnsignedMax().ult(upperTripBound)) return true; if (SkipCounted && SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(32)) return true; } // If this is a conditional branch to the header with the alternate path // being outside the loop, we can ask questions about the execution frequency // of the exit block. if (L->isLoopExiting(Pred)) { // This returns an exact expression only. TODO: We really only need an // upper bound here, but SE doesn't expose that. const SCEV *MaxExec = SE->getExitCount(L, Pred); if (MaxExec != SE->getCouldNotCompute()) { if (SE->getUnsignedRange(MaxExec).getUnsignedMax().ult(upperTripBound)) return true; if (SkipCounted && SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(32)) return true; } } return /* not finite */ false; } static void scanOneBB(Instruction *start, Instruction *end, std::vector<CallInst *> &calls, std::set<BasicBlock *> &seen, std::vector<BasicBlock *> &worklist) { for (BasicBlock::iterator itr(start); itr != start->getParent()->end() && itr != BasicBlock::iterator(end); itr++) { if (CallInst *CI = dyn_cast<CallInst>(&*itr)) { calls.push_back(CI); } // FIXME: This code does not handle invokes assert(!dyn_cast<InvokeInst>(&*itr) && "support for invokes in poll code needed"); // Only add the successor blocks if we reach the terminator instruction // without encountering end first if (itr->isTerminator()) { BasicBlock *BB = itr->getParent(); for (BasicBlock *Succ : successors(BB)) { if (seen.count(Succ) == 0) { worklist.push_back(Succ); seen.insert(Succ); } } } } } static void scanInlinedCode(Instruction *start, Instruction *end, std::vector<CallInst *> &calls, std::set<BasicBlock *> &seen) { calls.clear(); std::vector<BasicBlock *> worklist; seen.insert(start->getParent()); scanOneBB(start, end, calls, seen, worklist); while (!worklist.empty()) { BasicBlock *BB = worklist.back(); worklist.pop_back(); scanOneBB(&*BB->begin(), end, calls, seen, worklist); } } bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) { // Loop through all loop latches (branches controlling backedges). We need // to place a safepoint on every backedge (potentially). // Note: In common usage, there will be only one edge due to LoopSimplify // having run sometime earlier in the pipeline, but this code must be correct // w.r.t. loops with multiple backedges. BasicBlock *header = L->getHeader(); SmallVector<BasicBlock*, 16> LoopLatches; L->getLoopLatches(LoopLatches); for (BasicBlock *pred : LoopLatches) { assert(L->contains(pred)); // Make a policy decision about whether this loop needs a safepoint or // not. Note that this is about unburdening the optimizer in loops, not // avoiding the runtime cost of the actual safepoint. if (!AllBackedges) { if (mustBeFiniteCountedLoop(L, SE, pred)) { if (TraceLSP) errs() << "skipping safepoint placement in finite loop\n"; FiniteExecution++; continue; } if (CallSafepointsEnabled && containsUnconditionalCallSafepoint(L, header, pred, *DT)) { // Note: This is only semantically legal since we won't do any further // IPO or inlining before the actual call insertion.. If we hadn't, we // might latter loose this call safepoint. if (TraceLSP) errs() << "skipping safepoint placement due to unconditional call\n"; CallInLoop++; continue; } } // TODO: We can create an inner loop which runs a finite number of // iterations with an outer loop which contains a safepoint. This would // not help runtime performance that much, but it might help our ability to // optimize the inner loop. // Safepoint insertion would involve creating a new basic block (as the // target of the current backedge) which does the safepoint (of all live // variables) and branches to the true header TerminatorInst *term = pred->getTerminator(); if (TraceLSP) { errs() << "[LSP] terminator instruction: "; term->dump(); } PollLocations.push_back(term); } return false; } /// Returns true if an entry safepoint is not required before this callsite in /// the caller function. static bool doesNotRequireEntrySafepointBefore(const CallSite &CS) { Instruction *Inst = CS.getInstruction(); if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { switch (II->getIntrinsicID()) { case Intrinsic::experimental_gc_statepoint: case Intrinsic::experimental_patchpoint_void: case Intrinsic::experimental_patchpoint_i64: // The can wrap an actual call which may grow the stack by an unbounded // amount or run forever. return false; default: // Most LLVM intrinsics are things which do not expand to actual calls, or // at least if they do, are leaf functions that cause only finite stack // growth. In particular, the optimizer likes to form things like memsets // out of stores in the original IR. Another important example is // llvm.localescape which must occur in the entry block. Inserting a // safepoint before it is not legal since it could push the localescape // out of the entry block. return true; } } return false; } static Instruction *findLocationForEntrySafepoint(Function &F, DominatorTree &DT) { // Conceptually, this poll needs to be on method entry, but in // practice, we place it as late in the entry block as possible. We // can place it as late as we want as long as it dominates all calls // that can grow the stack. This, combined with backedge polls, // give us all the progress guarantees we need. // hasNextInstruction and nextInstruction are used to iterate // through a "straight line" execution sequence. auto hasNextInstruction = [](Instruction *I) { if (!I->isTerminator()) { return true; } BasicBlock *nextBB = I->getParent()->getUniqueSuccessor(); return nextBB && (nextBB->getUniquePredecessor() != nullptr); }; auto nextInstruction = [&hasNextInstruction](Instruction *I) { assert(hasNextInstruction(I) && "first check if there is a next instruction!"); (void)hasNextInstruction; // HLSL Change - unused var if (I->isTerminator()) { return I->getParent()->getUniqueSuccessor()->begin(); } else { return std::next(BasicBlock::iterator(I)); } }; Instruction *cursor = nullptr; for (cursor = F.getEntryBlock().begin(); hasNextInstruction(cursor); cursor = nextInstruction(cursor)) { // We need to ensure a safepoint poll occurs before any 'real' call. The // easiest way to ensure finite execution between safepoints in the face of // recursive and mutually recursive functions is to enforce that each take // a safepoint. Additionally, we need to ensure a poll before any call // which can grow the stack by an unbounded amount. This isn't required // for GC semantics per se, but is a common requirement for languages // which detect stack overflow via guard pages and then throw exceptions. if (auto CS = CallSite(cursor)) { if (doesNotRequireEntrySafepointBefore(CS)) continue; break; } } assert((hasNextInstruction(cursor) || cursor->isTerminator()) && "either we stopped because of a call, or because of terminator"); return cursor; } /// Identify the list of call sites which need to be have parseable state static void findCallSafepoints(Function &F, std::vector<CallSite> &Found /*rval*/) { assert(Found.empty() && "must be empty!"); for (Instruction &I : inst_range(F)) { Instruction *inst = &I; if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) { CallSite CS(inst); // No safepoint needed or wanted if (!needsStatepoint(CS)) { continue; } Found.push_back(CS); } } } /// Implement a unique function which doesn't require we sort the input /// vector. Doing so has the effect of changing the output of a couple of /// tests in ways which make them less useful in testing fused safepoints. template <typename T> static void unique_unsorted(std::vector<T> &vec) { std::set<T> seen; std::vector<T> tmp; vec.reserve(vec.size()); std::swap(tmp, vec); for (auto V : tmp) { if (seen.insert(V).second) { vec.push_back(V); } } } static const char *const GCSafepointPollName = "gc.safepoint_poll"; static bool isGCSafepointPoll(Function &F) { return F.getName().equals(GCSafepointPollName); } /// Returns true if this function should be rewritten to include safepoint /// polls and parseable call sites. The main point of this function is to be /// an extension point for custom logic. static bool shouldRewriteFunction(Function &F) { // TODO: This should check the GCStrategy if (F.hasGC()) { const char *FunctionGCName = F.getGC(); const StringRef StatepointExampleName("statepoint-example"); const StringRef CoreCLRName("coreclr"); return (StatepointExampleName == FunctionGCName) || (CoreCLRName == FunctionGCName); } else return false; } // TODO: These should become properties of the GCStrategy, possibly with // command line overrides. static bool enableEntrySafepoints(Function &F) { return !NoEntry; } static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; } static bool enableCallSafepoints(Function &F) { return !NoCall; } // Normalize basic block to make it ready to be target of invoke statepoint. // Ensure that 'BB' does not have phi nodes. It may require spliting it. static BasicBlock *normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent) { BasicBlock *ret = BB; if (!BB->getUniquePredecessor()) { ret = SplitBlockPredecessors(BB, InvokeParent, ""); } // Now that 'ret' has unique predecessor we can safely remove all phi nodes // from it FoldSingleEntryPHINodes(ret); assert(!isa<PHINode>(ret->begin())); return ret; } bool PlaceSafepoints::runOnFunction(Function &F) { if (F.isDeclaration() || F.empty()) { // This is a declaration, nothing to do. Must exit early to avoid crash in // dom tree calculation return false; } if (isGCSafepointPoll(F)) { // Given we're inlining this inside of safepoint poll insertion, this // doesn't make any sense. Note that we do make any contained calls // parseable after we inline a poll. return false; } if (!shouldRewriteFunction(F)) return false; bool modified = false; // In various bits below, we rely on the fact that uses are reachable from // defs. When there are basic blocks unreachable from the entry, dominance // and reachablity queries return non-sensical results. Thus, we preprocess // the function to ensure these properties hold. modified |= removeUnreachableBlocks(F); // STEP 1 - Insert the safepoint polling locations. We do not need to // actually insert parse points yet. That will be done for all polls and // calls in a single pass. DominatorTree DT; DT.recalculate(F); SmallVector<Instruction *, 16> PollsNeeded; std::vector<CallSite> ParsePointNeeded; if (enableBackedgeSafepoints(F)) { // Construct a pass manager to run the LoopPass backedge logic. We // need the pass manager to handle scheduling all the loop passes // appropriately. Doing this by hand is painful and just not worth messing // with for the moment. legacy::FunctionPassManager FPM(F.getParent()); bool CanAssumeCallSafepoints = enableCallSafepoints(F); PlaceBackedgeSafepointsImpl *PBS = new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints); FPM.add(PBS); FPM.run(F); // We preserve dominance information when inserting the poll, otherwise // we'd have to recalculate this on every insert DT.recalculate(F); auto &PollLocations = PBS->PollLocations; auto OrderByBBName = [](Instruction *a, Instruction *b) { return a->getParent()->getName() < b->getParent()->getName(); }; // We need the order of list to be stable so that naming ends up stable // when we split edges. This makes test cases much easier to write. std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName); // We can sometimes end up with duplicate poll locations. This happens if // a single loop is visited more than once. The fact this happens seems // wrong, but it does happen for the split-backedge.ll test case. PollLocations.erase(std::unique(PollLocations.begin(), PollLocations.end()), PollLocations.end()); // Insert a poll at each point the analysis pass identified // The poll location must be the terminator of a loop latch block. for (TerminatorInst *Term : PollLocations) { // We are inserting a poll, the function is modified modified = true; if (SplitBackedge) { // Split the backedge of the loop and insert the poll within that new // basic block. This creates a loop with two latches per original // latch (which is non-ideal), but this appears to be easier to // optimize in practice than inserting the poll immediately before the // latch test. // Since this is a latch, at least one of the successors must dominate // it. Its possible that we have a) duplicate edges to the same header // and b) edges to distinct loop headers. We need to insert pools on // each. SetVector<BasicBlock *> Headers; for (unsigned i = 0; i < Term->getNumSuccessors(); i++) { BasicBlock *Succ = Term->getSuccessor(i); if (DT.dominates(Succ, Term->getParent())) { Headers.insert(Succ); } } assert(!Headers.empty() && "poll location is not a loop latch?"); // The split loop structure here is so that we only need to recalculate // the dominator tree once. Alternatively, we could just keep it up to // date and use a more natural merged loop. SetVector<BasicBlock *> SplitBackedges; for (BasicBlock *Header : Headers) { BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT); PollsNeeded.push_back(NewBB->getTerminator()); NumBackedgeSafepoints++; } } else { // Split the latch block itself, right before the terminator. PollsNeeded.push_back(Term); NumBackedgeSafepoints++; } } } if (enableEntrySafepoints(F)) { Instruction *Location = findLocationForEntrySafepoint(F, DT); if (!Location) { // policy choice not to insert? } else { PollsNeeded.push_back(Location); modified = true; NumEntrySafepoints++; } } // Now that we've identified all the needed safepoint poll locations, insert // safepoint polls themselves. for (Instruction *PollLocation : PollsNeeded) { std::vector<CallSite> RuntimeCalls; InsertSafepointPoll(PollLocation, RuntimeCalls); ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(), RuntimeCalls.end()); } PollsNeeded.clear(); // make sure we don't accidentally use // The dominator tree has been invalidated by the inlining performed in the // above loop. TODO: Teach the inliner how to update the dom tree? DT.recalculate(F); if (enableCallSafepoints(F)) { std::vector<CallSite> Calls; findCallSafepoints(F, Calls); NumCallSafepoints += Calls.size(); ParsePointNeeded.insert(ParsePointNeeded.end(), Calls.begin(), Calls.end()); } // Unique the vectors since we can end up with duplicates if we scan the call // site for call safepoints after we add it for entry or backedge. The // only reason we need tracking at all is that some functions might have // polls but not call safepoints and thus we might miss marking the runtime // calls for the polls. (This is useful in test cases!) unique_unsorted(ParsePointNeeded); // Any parse point (no matter what source) will be handled here // We're about to start modifying the function if (!ParsePointNeeded.empty()) modified = true; // Now run through and insert the safepoints, but do _NOT_ update or remove // any existing uses. We have references to live variables that need to // survive to the last iteration of this loop. std::vector<Value *> Results; Results.reserve(ParsePointNeeded.size()); for (size_t i = 0; i < ParsePointNeeded.size(); i++) { CallSite &CS = ParsePointNeeded[i]; // For invoke statepoints we need to remove all phi nodes at the normal // destination block. // Reason for this is that we can place gc_result only after last phi node // in basic block. We will get malformed code after RAUW for the // gc_result if one of this phi nodes uses result from the invoke. if (InvokeInst *Invoke = dyn_cast<InvokeInst>(CS.getInstruction())) { normalizeForInvokeSafepoint(Invoke->getNormalDest(), Invoke->getParent()); } Value *GCResult = ReplaceWithStatepoint(CS, nullptr); Results.push_back(GCResult); } assert(Results.size() == ParsePointNeeded.size()); // Adjust all users of the old call sites to use the new ones instead for (size_t i = 0; i < ParsePointNeeded.size(); i++) { CallSite &CS = ParsePointNeeded[i]; Value *GCResult = Results[i]; if (GCResult) { // Can not RAUW for the invoke gc result in case of phi nodes preset. assert(CS.isCall() || !isa<PHINode>(cast<Instruction>(GCResult)->getParent()->begin())); // Replace all uses with the new call CS.getInstruction()->replaceAllUsesWith(GCResult); } // Now that we've handled all uses, remove the original call itself // Note: The insert point can't be the deleted instruction! CS.getInstruction()->eraseFromParent(); } return modified; } char PlaceBackedgeSafepointsImpl::ID = 0; char PlaceSafepoints::ID = 0; FunctionPass *llvm::createPlaceSafepointsPass() { return new PlaceSafepoints(); } INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl, "place-backedge-safepoints-impl", "Place Backedge Safepoints", false, false) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl, "place-backedge-safepoints-impl", "Place Backedge Safepoints", false, false) INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints", false, false) INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints", false, false) static bool isGCLeafFunction(const CallSite &CS) { Instruction *inst = CS.getInstruction(); if (isa<IntrinsicInst>(inst)) { // Most LLVM intrinsics are things which can never take a safepoint. // As a result, we don't need to have the stack parsable at the // callsite. This is a highly useful optimization since intrinsic // calls are fairly prevelent, particularly in debug builds. return true; } // If this function is marked explicitly as a leaf call, we don't need to // place a safepoint of it. In fact, for correctness we *can't* in many // cases. Note: Indirect calls return Null for the called function, // these obviously aren't runtime functions with attributes // TODO: Support attributes on the call site as well. const Function *F = CS.getCalledFunction(); bool isLeaf = F && F->getFnAttribute("gc-leaf-function").getValueAsString().equals("true"); if (isLeaf) { return true; } return false; } static void InsertSafepointPoll(Instruction *InsertBefore, std::vector<CallSite> &ParsePointsNeeded /*rval*/) { BasicBlock *OrigBB = InsertBefore->getParent(); Module *M = InsertBefore->getModule(); assert(M && "must be part of a module"); // Inline the safepoint poll implementation - this will get all the branch, // control flow, etc.. Most importantly, it will introduce the actual slow // path call - where we need to insert a safepoint (parsepoint). auto *F = M->getFunction(GCSafepointPollName); assert(F->getType()->getElementType() == FunctionType::get(Type::getVoidTy(M->getContext()), false) && "gc.safepoint_poll declared with wrong type"); assert(!F->empty() && "gc.safepoint_poll must be a non-empty function"); CallInst *PollCall = CallInst::Create(F, "", InsertBefore); // Record some information about the call site we're replacing BasicBlock::iterator before(PollCall), after(PollCall); bool isBegin(false); if (before == OrigBB->begin()) { isBegin = true; } else { before--; } after++; assert(after != OrigBB->end() && "must have successor"); // do the actual inlining InlineFunctionInfo IFI; bool InlineStatus = InlineFunction(PollCall, IFI); assert(InlineStatus && "inline must succeed"); (void)InlineStatus; // suppress warning in release-asserts // Check post conditions assert(IFI.StaticAllocas.empty() && "can't have allocs"); std::vector<CallInst *> calls; // new calls std::set<BasicBlock *> BBs; // new BBs + insertee // Include only the newly inserted instructions, Note: begin may not be valid // if we inserted to the beginning of the basic block BasicBlock::iterator start; if (isBegin) { start = OrigBB->begin(); } else { start = before; start++; } // If your poll function includes an unreachable at the end, that's not // valid. Bugpoint likes to create this, so check for it. assert(isPotentiallyReachable(&*start, &*after, nullptr, nullptr) && "malformed poll function"); scanInlinedCode(&*(start), &*(after), calls, BBs); assert(!calls.empty() && "slow path not found for safepoint poll"); // Record the fact we need a parsable state at the runtime call contained in // the poll function. This is required so that the runtime knows how to // parse the last frame when we actually take the safepoint (i.e. execute // the slow path) assert(ParsePointsNeeded.empty()); for (size_t i = 0; i < calls.size(); i++) { // No safepoint needed or wanted if (!needsStatepoint(calls[i])) { continue; } // These are likely runtime calls. Should we assert that via calling // convention or something? ParsePointsNeeded.push_back(CallSite(calls[i])); } assert(ParsePointsNeeded.size() <= calls.size()); } /// Replaces the given call site (Call or Invoke) with a gc.statepoint /// intrinsic with an empty deoptimization arguments list. This does /// NOT do explicit relocation for GC support. static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */ Pass *P) { assert(CS.getInstruction()->getParent()->getParent()->getParent() && "must be set"); // TODO: technically, a pass is not allowed to get functions from within a // function pass since it might trigger a new function addition. Refactor // this logic out to the initialization of the pass. Doesn't appear to // matter in practice. // Then go ahead and use the builder do actually do the inserts. We insert // immediately before the previous instruction under the assumption that all // arguments will be available here. We can't insert afterwards since we may // be replacing a terminator. IRBuilder<> Builder(CS.getInstruction()); // Note: The gc args are not filled in at this time, that's handled by // RewriteStatepointsForGC (which is currently under review). // Create the statepoint given all the arguments Instruction *Token = nullptr; uint64_t ID; uint32_t NumPatchBytes; AttributeSet OriginalAttrs = CS.getAttributes(); Attribute AttrID = OriginalAttrs.getAttribute(AttributeSet::FunctionIndex, "statepoint-id"); Attribute AttrNumPatchBytes = OriginalAttrs.getAttribute( AttributeSet::FunctionIndex, "statepoint-num-patch-bytes"); AttrBuilder AttrsToRemove; bool HasID = AttrID.isStringAttribute() && !AttrID.getValueAsString().getAsInteger(10, ID); if (HasID) AttrsToRemove.addAttribute("statepoint-id"); else ID = 0xABCDEF00; bool HasNumPatchBytes = AttrNumPatchBytes.isStringAttribute() && !AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes); if (HasNumPatchBytes) AttrsToRemove.addAttribute("statepoint-num-patch-bytes"); else NumPatchBytes = 0; OriginalAttrs = OriginalAttrs.removeAttributes( CS.getInstruction()->getContext(), AttributeSet::FunctionIndex, AttrsToRemove); Value *StatepointTarget = NumPatchBytes == 0 ? CS.getCalledValue() : ConstantPointerNull::get(cast<PointerType>( CS.getCalledValue()->getType())); if (CS.isCall()) { CallInst *ToReplace = cast<CallInst>(CS.getInstruction()); CallInst *Call = Builder.CreateGCStatepointCall( ID, NumPatchBytes, StatepointTarget, makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None, "safepoint_token"); Call->setTailCall(ToReplace->isTailCall()); Call->setCallingConv(ToReplace->getCallingConv()); // In case if we can handle this set of attributes - set up function // attributes directly on statepoint and return attributes later for // gc_result intrinsic. Call->setAttributes(OriginalAttrs.getFnAttributes()); Token = Call; // Put the following gc_result and gc_relocate calls immediately after the // the old call (which we're about to delete). assert(ToReplace->getNextNode() && "not a terminator, must have next"); Builder.SetInsertPoint(ToReplace->getNextNode()); Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc()); } else if (CS.isInvoke()) { InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction()); // Insert the new invoke into the old block. We'll remove the old one in a // moment at which point this will become the new terminator for the // original block. Builder.SetInsertPoint(ToReplace->getParent()); InvokeInst *Invoke = Builder.CreateGCStatepointInvoke( ID, NumPatchBytes, StatepointTarget, ToReplace->getNormalDest(), ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None, "safepoint_token"); Invoke->setCallingConv(ToReplace->getCallingConv()); // In case if we can handle this set of attributes - set up function // attributes directly on statepoint and return attributes later for // gc_result intrinsic. Invoke->setAttributes(OriginalAttrs.getFnAttributes()); Token = Invoke; // We'll insert the gc.result into the normal block BasicBlock *NormalDest = ToReplace->getNormalDest(); // Can not insert gc.result in case of phi nodes preset. // Should have removed this cases prior to runnning this function assert(!isa<PHINode>(NormalDest->begin())); Instruction *IP = &*(NormalDest->getFirstInsertionPt()); Builder.SetInsertPoint(IP); } else { llvm_unreachable("unexpect type of CallSite"); } assert(Token); // Handle the return value of the original call - update all uses to use a // gc_result hanging off the statepoint node we just inserted // Only add the gc_result iff there is actually a used result if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) { std::string TakenName = CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : ""; CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName); GCResult->setAttributes(OriginalAttrs.getRetAttributes()); return GCResult; } else { // No return value for the call. return nullptr; } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/HoistConstantArray.cpp
//===- HoistConstantArray.cpp - Code to perform constant array hoisting ---===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // // Copyright (C) Microsoft Corporation. All rights reserved. // //===----------------------------------------------------------------------===// // // This file implements hoisting of constant local arrays to global arrays. // The idea is to change the array initialization from function local memory // using alloca and stores to global constant memory using a global variable // and constant initializer. We only hoist arrays that have all constant // elements. The frontend will hoist the arrays if they are declared static, but // we can hoist any array that is only ever initialized with constant data. // // This transformation was developed to work with the dxil produced from the // hlsl compiler. Hoisting the array to use a constant initializer should allow // a dxil backend compiler to generate more efficent code than a local array. // For example, it could use an immediate constant pool to represent the array. // // We limit hoisting to those arrays that are initialized by constant values. // We still hoist if the array is partially initialized as long as no // non-constant values are written. The uninitialized values will be hoisted // as undef values. // // Improvements: // Currently we do not merge arrays that have the same constant values. We // create the global variables with `unnamed_addr` set which means they // can be merged with other constants. We should probably use a separate // pass to merge all the unnamed_addr constants. // // Example: // // float main(int i : I) : SV_Target{ // float A[] = { 1, 2, 3 }; // return A[i]; // } // // Without array hoisting, we generate the following dxil // // define void @main() { // entry: // %0 = call i32 @dx.op.loadInput.i32(i32 4, i32 0, i32 0, i8 0, i32 undef) // %A = alloca[3 x float], align 4 // %1 = getelementptr inbounds[3 x float], [3 x float] * %A, i32 0, i32 0 // store float 1.000000e+00, float* %1, align 4 // %2 = getelementptr inbounds[3 x float], [3 x float] * %A, i32 0, i32 1 // store float 2.000000e+00, float* %2, align 4 // %3 = getelementptr inbounds[3 x float], [3 x float] * %A, i32 0, i32 2 // store float 3.000000e+00, float* %3, align 4 // %arrayidx = getelementptr inbounds[3 x float], [3 x float] * %A, i32 0, i32 // %0 %4 = load float, float* %arrayidx, align 4, !tbaa !14 call void // @dx.op.storeOutput.f32(i32 5, i32 0, i32 0, i8 0, float %4); ret void // } // // With array hoisting enabled we generate this dxil // // @A.hca = internal unnamed_addr constant [3 x float] [float 1.000000e+00, // float 2.000000e+00, float 3.000000e+00] define void @main() { entry: // %0 = call i32 @dx.op.loadInput.i32(i32 4, i32 0, i32 0, i8 0, i32 undef) // %arrayidx = getelementptr inbounds[3 x float], [3 x float] * @A.hca, i32 0, // i32 %0 %1 = load float, float* %arrayidx, align 4, !tbaa !14 call void // @dx.op.storeOutput.f32(i32 5, i32 0, i32 0, i8 0, float %1) ret void // } // //===----------------------------------------------------------------------===// #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/IR/Type.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Transforms/Scalar.h" using namespace llvm; namespace { class CandidateArray; //===--------------------------------------------------------------------===// // HoistConstantArray pass implementation // class HoistConstantArray : public ModulePass { public: static char ID; // Pass identification, replacement for typeid HoistConstantArray() : ModulePass(ID) { initializeHoistConstantArrayPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); } private: bool runOnFunction(Function &F); std::vector<AllocaInst *> findCandidateAllocas(Function &F); void hoistArray(const CandidateArray &candidate); void removeLocalArrayStores(const CandidateArray &candidate); }; // Represents an array we are considering for hoisting. // Contains helper routines for analyzing if hoisting is possible // and creating the global variable for the hoisted array. class CandidateArray { public: explicit CandidateArray(AllocaInst *); bool IsConstArray() const { return m_IsConstArray; } void AnalyzeUses(); GlobalVariable *GetGlobalArray() const; AllocaInst *GetLocalArray() const { return m_Alloca; } std::vector<StoreInst *> GetArrayStores() const; private: AllocaInst *m_Alloca; ArrayType *m_ArrayType; std::vector<Constant *> m_Values; bool m_IsConstArray; bool AnalyzeStore(StoreInst *SI); bool StoreConstant(int64_t index, Constant *value); void EnsureSize(); void GetArrayStores(GEPOperator *gep, std::vector<StoreInst *> &stores) const; bool AllArrayUsersAreGEPOrLifetime(std::vector<GEPOperator *> &geps); bool AllGEPUsersAreValid(GEPOperator *gep); UndefValue *UndefElement(); }; } // namespace // Returns the ArrayType for the alloca or nullptr if the alloca // does not allocate an array. static ArrayType *getAllocaArrayType(AllocaInst *allocaInst) { return dyn_cast<ArrayType>(allocaInst->getType()->getPointerElementType()); } // Check if the instruction is an alloca that we should consider for hoisting. // The alloca must allocate and array of primitive types. static AllocaInst *isHoistableArrayAlloca(Instruction *I) { AllocaInst *allocaInst = dyn_cast<AllocaInst>(I); if (!allocaInst) return nullptr; ArrayType *arrayTy = getAllocaArrayType(allocaInst); if (!arrayTy) return nullptr; if (!arrayTy->getElementType()->isSingleValueType()) return nullptr; return allocaInst; } // ---------------------------------------------------------------------------- // CandidateArray implementation // ---------------------------------------------------------------------------- // Create the candidate array for the alloca. CandidateArray::CandidateArray(AllocaInst *AI) : m_Alloca(AI), m_Values(), m_IsConstArray(false) { assert(isHoistableArrayAlloca(AI)); m_ArrayType = getAllocaArrayType(AI); } // Get the global variable with a constant initializer for the array. // Only valid to call if the array has been analyzed as a constant array. GlobalVariable *CandidateArray::GetGlobalArray() const { assert(IsConstArray()); Constant *initializer = ConstantArray::get(m_ArrayType, m_Values); Module *M = m_Alloca->getModule(); GlobalVariable *GV = new GlobalVariable( *M, m_ArrayType, true, GlobalVariable::LinkageTypes::InternalLinkage, initializer, Twine(m_Alloca->getName()) + ".hca"); GV->setUnnamedAddr(true); return GV; } // Get a list of all the stores that write to the array through one or more // GetElementPtrInst operations. std::vector<StoreInst *> CandidateArray::GetArrayStores() const { std::vector<StoreInst *> stores; for (User *U : m_Alloca->users()) if (GEPOperator *gep = dyn_cast<GEPOperator>(U)) GetArrayStores(gep, stores); return stores; } // Recursively collect all the stores that write to the pointer/buffer // referred to by this GetElementPtrInst. void CandidateArray::GetArrayStores(GEPOperator *gep, std::vector<StoreInst *> &stores) const { for (User *GU : gep->users()) { if (StoreInst *SI = dyn_cast<StoreInst>(GU)) { stores.push_back(SI); } else if (GEPOperator *GEPI = dyn_cast<GEPOperator>(GU)) { GetArrayStores(GEPI, stores); } } } // Check to see that all the users of the array are GEPs or lifetime intrinsics. // If so, populate the `geps` vector with a list of all geps that use the array. bool CandidateArray::AllArrayUsersAreGEPOrLifetime( std::vector<GEPOperator *> &geps) { for (User *U : m_Alloca->users()) { // Allow users that are only used by lifetime intrinsics. if (isa<BitCastInst>(U) && onlyUsedByLifetimeMarkers(U)) continue; GEPOperator *gep = dyn_cast<GEPOperator>(U); if (!gep) return false; geps.push_back(gep); } return true; } // Check that all gep uses are valid. // A valid use is either // 1. A store of a constant value that does not overwrite an existing constant // with a different value. // 2. A load instruction. // 3. Another GetElementPtrInst that itself only has valid uses (recursively) // Any other use is considered invalid. bool CandidateArray::AllGEPUsersAreValid(GEPOperator *gep) { for (User *U : gep->users()) { if (StoreInst *SI = dyn_cast<StoreInst>(U)) { if (!AnalyzeStore(SI)) return false; } else if (GEPOperator *recursive_gep = dyn_cast<GEPOperator>(U)) { if (!AllGEPUsersAreValid(recursive_gep)) return false; } else if (!isa<LoadInst>(U)) { return false; } } return true; } // Analyze all uses of the array to see if it qualifes as a constant array. // We check the following conditions: // 1. Make sure alloca is only used by GEP and lifetime intrinsics. // 2. Make sure GEP is only used in load/store. // 3. Make sure all stores have constant indicies. // 4. Make sure all stores are constants. // 5. Make sure all stores to same location are the same constant. void CandidateArray::AnalyzeUses() { m_IsConstArray = false; std::vector<GEPOperator *> geps; if (!AllArrayUsersAreGEPOrLifetime(geps)) return; for (GEPOperator *gep : geps) if (!AllGEPUsersAreValid(gep)) return; m_IsConstArray = true; } // Analyze a store to see if it is a valid constant store. // A valid store will write a constant value to a known (constant) location. bool CandidateArray::AnalyzeStore(StoreInst *SI) { if (!isa<Constant>(SI->getValueOperand())) return false; // Walk up the ladder of GetElementPtr instructions to accumulate the index int64_t index = 0; for (auto iter = SI->getPointerOperand(); iter != m_Alloca;) { GEPOperator *gep = cast<GEPOperator>(iter); if (!gep->hasAllConstantIndices()) return false; // Deal with the 'extra 0' index from what might have been a global pointer // https://www.llvm.org/docs/GetElementPtr.html#why-is-the-extra-0-index-required if ((gep->getNumIndices() == 2) && (gep->getPointerOperand() == m_Alloca)) { // Non-zero offset is unexpected, but could occur in the wild. Bail out if // we see it. ConstantInt *ptrOffset = cast<ConstantInt>(gep->getOperand(1)); if (!ptrOffset->isZero()) return false; } else if (gep->getNumIndices() != 1) { return false; } // Accumulate the index ConstantInt *c = cast<ConstantInt>(gep->getOperand(gep->getNumIndices())); index += c->getSExtValue(); iter = gep->getPointerOperand(); } return StoreConstant(index, cast<Constant>(SI->getValueOperand())); } // Check if the store is valid and record the value if so. // A valid constant store is either: // 1. A store of a new constant // 2. A store of the same constant to the same location bool CandidateArray::StoreConstant(int64_t index, Constant *value) { EnsureSize(); size_t i = static_cast<size_t>(index); if (i >= m_Values.size()) return false; if (m_Values[i] == UndefElement()) m_Values[i] = value; return m_Values[i] == value; } // We lazily create the values array until we have a store of a // constant that we need to remember. This avoids memory overhead // for obviously non-constant arrays. void CandidateArray::EnsureSize() { if (m_Values.size() == 0) { m_Values.resize(m_ArrayType->getNumElements(), UndefElement()); } assert(m_Values.size() == m_ArrayType->getNumElements()); } // Get an undef value of the correct type for the array. UndefValue *CandidateArray::UndefElement() { return UndefValue::get(m_ArrayType->getElementType()); } // ---------------------------------------------------------------------------- // Pass Implementation // ---------------------------------------------------------------------------- // Find the allocas that are candidates for array hoisting in the function. std::vector<AllocaInst *> HoistConstantArray::findCandidateAllocas(Function &F) { std::vector<AllocaInst *> candidates; for (Instruction &I : F.getEntryBlock()) if (AllocaInst *allocaInst = isHoistableArrayAlloca(&I)) candidates.push_back(allocaInst); return candidates; } // Remove local stores to the array. // We remove them explicitly rather than relying on DCE to find they are dead. // Other uses (e.g. geps) can be easily cleaned up by DCE. void HoistConstantArray::removeLocalArrayStores( const CandidateArray &candidate) { std::vector<StoreInst *> stores = candidate.GetArrayStores(); for (StoreInst *store : stores) store->eraseFromParent(); } // Hoist an array from a local to a global. void HoistConstantArray::hoistArray(const CandidateArray &candidate) { assert(candidate.IsConstArray()); removeLocalArrayStores(candidate); AllocaInst *local = candidate.GetLocalArray(); GlobalVariable *global = candidate.GetGlobalArray(); local->replaceAllUsesWith(global); local->eraseFromParent(); } // Perform array hoisting on a single function. bool HoistConstantArray::runOnFunction(Function &F) { bool changed = false; std::vector<AllocaInst *> candidateAllocas = findCandidateAllocas(F); for (AllocaInst *AI : candidateAllocas) { CandidateArray candidate(AI); candidate.AnalyzeUses(); if (candidate.IsConstArray()) { hoistArray(candidate); changed |= true; } } return changed; } char HoistConstantArray::ID = 0; INITIALIZE_PASS(HoistConstantArray, "hlsl-hca", "Hoist constant arrays", false, false) bool HoistConstantArray::runOnModule(Module &M) { bool changed = false; for (Function &F : M) { if (F.isDeclaration()) continue; changed |= runOnFunction(F); } return changed; } ModulePass *llvm::createHoistConstantArrayPass() { return new HoistConstantArray(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
//===-- StraightLineStrengthReduce.cpp - ------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements straight-line strength reduction (SLSR). Unlike loop // strength reduction, this algorithm is designed to reduce arithmetic // redundancy in straight-line code instead of loops. It has proven to be // effective in simplifying arithmetic statements derived from an unrolled loop. // It can also simplify the logic of SeparateConstOffsetFromGEP. // // There are many optimizations we can perform in the domain of SLSR. This file // for now contains only an initial step. Specifically, we look for strength // reduction candidates in the following forms: // // Form 1: B + i * S // Form 2: (B + i) * S // Form 3: &B[i * S] // // where S is an integer variable, and i is a constant integer. If we found two // candidates S1 and S2 in the same form and S1 dominates S2, we may rewrite S2 // in a simpler way with respect to S1. For example, // // S1: X = B + i * S // S2: Y = B + i' * S => X + (i' - i) * S // // S1: X = (B + i) * S // S2: Y = (B + i') * S => X + (i' - i) * S // // S1: X = &B[i * S] // S2: Y = &B[i' * S] => &X[(i' - i) * S] // // Note: (i' - i) * S is folded to the extent possible. // // This rewriting is in general a good idea. The code patterns we focus on // usually come from loop unrolling, so (i' - i) * S is likely the same // across iterations and can be reused. When that happens, the optimized form // takes only one add starting from the second iteration. // // When such rewriting is possible, we call S1 a "basis" of S2. When S2 has // multiple bases, we choose to rewrite S2 with respect to its "immediate" // basis, the basis that is the closest ancestor in the dominator tree. // // TODO: // // - Floating point arithmetics when fast math is enabled. // // - SLSR may decrease ILP at the architecture level. Targets that are very // sensitive to ILP may want to disable it. Having SLSR to consider ILP is // left as future work. // // - When (i' - i) is constant but i and i' are not, we could still perform // SLSR. #include <vector> // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; using namespace PatternMatch; namespace { class StraightLineStrengthReduce : public FunctionPass { public: // SLSR candidate. Such a candidate must be in one of the forms described in // the header comments. struct Candidate : public ilist_node<Candidate> { enum Kind { Invalid, // reserved for the default constructor Add, // B + i * S Mul, // (B + i) * S GEP, // &B[..][i * S][..] }; Candidate() : CandidateKind(Invalid), Base(nullptr), Index(nullptr), Stride(nullptr), Ins(nullptr), Basis(nullptr) {} Candidate(Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, Instruction *I) : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I), Basis(nullptr) {} Kind CandidateKind; const SCEV *Base; // Note that Index and Stride of a GEP candidate do not necessarily have the // same integer type. In that case, during rewriting, Stride will be // sign-extended or truncated to Index's type. ConstantInt *Index; Value *Stride; // The instruction this candidate corresponds to. It helps us to rewrite a // candidate with respect to its immediate basis. Note that one instruction // can correspond to multiple candidates depending on how you associate the // expression. For instance, // // (a + 1) * (b + 2) // // can be treated as // // <Base: a, Index: 1, Stride: b + 2> // // or // // <Base: b, Index: 2, Stride: a + 1> Instruction *Ins; // Points to the immediate basis of this candidate, or nullptr if we cannot // find any basis for this candidate. Candidate *Basis; }; static char ID; StraightLineStrengthReduce() : FunctionPass(ID), DL(nullptr), DT(nullptr), TTI(nullptr) { initializeStraightLineStrengthReducePass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addRequired<TargetTransformInfoWrapperPass>(); // We do not modify the shape of the CFG. AU.setPreservesCFG(); } bool doInitialization(Module &M) override { DL = &M.getDataLayout(); return false; } bool runOnFunction(Function &F) override; private: // Returns true if Basis is a basis for C, i.e., Basis dominates C and they // share the same base and stride. bool isBasisFor(const Candidate &Basis, const Candidate &C); // Returns whether the candidate can be folded into an addressing mode. bool isFoldable(const Candidate &C, TargetTransformInfo *TTI, const DataLayout *DL); // Returns true if C is already in a simplest form and not worth being // rewritten. bool isSimplestForm(const Candidate &C); // Checks whether I is in a candidate form. If so, adds all the matching forms // to Candidates, and tries to find the immediate basis for each of them. void allocateCandidatesAndFindBasis(Instruction *I); // Allocate candidates and find bases for Add instructions. void allocateCandidatesAndFindBasisForAdd(Instruction *I); // Given I = LHS + RHS, factors RHS into i * S and makes (LHS + i * S) a // candidate. void allocateCandidatesAndFindBasisForAdd(Value *LHS, Value *RHS, Instruction *I); // Allocate candidates and find bases for Mul instructions. void allocateCandidatesAndFindBasisForMul(Instruction *I); // Splits LHS into Base + Index and, if succeeds, calls // allocateCandidatesAndFindBasis. void allocateCandidatesAndFindBasisForMul(Value *LHS, Value *RHS, Instruction *I); // Allocate candidates and find bases for GetElementPtr instructions. void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP); // A helper function that scales Idx with ElementSize before invoking // allocateCandidatesAndFindBasis. void allocateCandidatesAndFindBasisForGEP(const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize, Instruction *I); // Adds the given form <CT, B, Idx, S> to Candidates, and finds its immediate // basis. void allocateCandidatesAndFindBasis(Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, Instruction *I); // Rewrites candidate C with respect to Basis. void rewriteCandidateWithBasis(const Candidate &C, const Candidate &Basis); // A helper function that factors ArrayIdx to a product of a stride and a // constant index, and invokes allocateCandidatesAndFindBasis with the // factorings. void factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize, GetElementPtrInst *GEP); // Emit code that computes the "bump" from Basis to C. If the candidate is a // GEP and the bump is not divisible by the element size of the GEP, this // function sets the BumpWithUglyGEP flag to notify its caller to bump the // basis using an ugly GEP. static Value *emitBump(const Candidate &Basis, const Candidate &C, IRBuilder<> &Builder, const DataLayout *DL, bool &BumpWithUglyGEP); const DataLayout *DL; DominatorTree *DT; ScalarEvolution *SE; TargetTransformInfo *TTI; ilist<Candidate> Candidates; // Temporarily holds all instructions that are unlinked (but not deleted) by // rewriteCandidateWithBasis. These instructions will be actually removed // after all rewriting finishes. std::vector<Instruction *> UnlinkedInstructions; }; } // anonymous namespace char StraightLineStrengthReduce::ID = 0; INITIALIZE_PASS_BEGIN(StraightLineStrengthReduce, "slsr", "Straight line strength reduction", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(StraightLineStrengthReduce, "slsr", "Straight line strength reduction", false, false) FunctionPass *llvm::createStraightLineStrengthReducePass() { return new StraightLineStrengthReduce(); } bool StraightLineStrengthReduce::isBasisFor(const Candidate &Basis, const Candidate &C) { return (Basis.Ins != C.Ins && // skip the same instruction // They must have the same type too. Basis.Base == C.Base doesn't // guarantee their types are the same (PR23975). Basis.Ins->getType() == C.Ins->getType() && // Basis must dominate C in order to rewrite C with respect to Basis. DT->dominates(Basis.Ins->getParent(), C.Ins->getParent()) && // They share the same base, stride, and candidate kind. Basis.Base == C.Base && Basis.Stride == C.Stride && Basis.CandidateKind == C.CandidateKind); } static bool isGEPFoldable(GetElementPtrInst *GEP, const TargetTransformInfo *TTI, const DataLayout *DL) { GlobalVariable *BaseGV = nullptr; int64_t BaseOffset = 0; bool HasBaseReg = false; int64_t Scale = 0; if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getPointerOperand())) BaseGV = GV; else HasBaseReg = true; gep_type_iterator GTI = gep_type_begin(GEP); for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I, ++GTI) { if (isa<SequentialType>(*GTI)) { int64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); if (ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I)) { BaseOffset += ConstIdx->getSExtValue() * ElementSize; } else { // Needs scale register. if (Scale != 0) { // No addressing mode takes two scale registers. return false; } Scale = ElementSize; } } else { StructType *STy = cast<StructType>(*GTI); uint64_t Field = cast<ConstantInt>(*I)->getZExtValue(); BaseOffset += DL->getStructLayout(STy)->getElementOffset(Field); } } unsigned AddrSpace = GEP->getPointerAddressSpace(); return TTI->isLegalAddressingMode(GEP->getType()->getElementType(), BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace); } // Returns whether (Base + Index * Stride) can be folded to an addressing mode. static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride, TargetTransformInfo *TTI) { return TTI->isLegalAddressingMode(Base->getType(), nullptr, 0, true, Index->getSExtValue()); } bool StraightLineStrengthReduce::isFoldable(const Candidate &C, TargetTransformInfo *TTI, const DataLayout *DL) { if (C.CandidateKind == Candidate::Add) return isAddFoldable(C.Base, C.Index, C.Stride, TTI); if (C.CandidateKind == Candidate::GEP) return isGEPFoldable(cast<GetElementPtrInst>(C.Ins), TTI, DL); return false; } // Returns true if GEP has zero or one non-zero index. static bool hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) { unsigned NumNonZeroIndices = 0; for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I) { ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I); if (ConstIdx == nullptr || !ConstIdx->isZero()) ++NumNonZeroIndices; } return NumNonZeroIndices <= 1; } bool StraightLineStrengthReduce::isSimplestForm(const Candidate &C) { if (C.CandidateKind == Candidate::Add) { // B + 1 * S or B + (-1) * S return C.Index->isOne() || C.Index->isMinusOne(); } if (C.CandidateKind == Candidate::Mul) { // (B + 0) * S return C.Index->isZero(); } if (C.CandidateKind == Candidate::GEP) { // (char*)B + S or (char*)B - S return ((C.Index->isOne() || C.Index->isMinusOne()) && hasOnlyOneNonZeroIndex(cast<GetElementPtrInst>(C.Ins))); } return false; } // TODO: We currently implement an algorithm whose time complexity is linear in // the number of existing candidates. However, we could do better by using // ScopedHashTable. Specifically, while traversing the dominator tree, we could // maintain all the candidates that dominate the basic block being traversed in // a ScopedHashTable. This hash table is indexed by the base and the stride of // a candidate. Therefore, finding the immediate basis of a candidate boils down // to one hash-table look up. void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( Candidate::Kind CT, const SCEV *B, ConstantInt *Idx, Value *S, Instruction *I) { Candidate C(CT, B, Idx, S, I); // SLSR can complicate an instruction in two cases: // // 1. If we can fold I into an addressing mode, computing I is likely free or // takes only one instruction. // // 2. I is already in a simplest form. For example, when // X = B + 8 * S // Y = B + S, // rewriting Y to X - 7 * S is probably a bad idea. // // In the above cases, we still add I to the candidate list so that I can be // the basis of other candidates, but we leave I's basis blank so that I // won't be rewritten. if (!isFoldable(C, TTI, DL) && !isSimplestForm(C)) { // Try to compute the immediate basis of C. unsigned NumIterations = 0; // Limit the scan radius to avoid running in quadratice time. static const unsigned MaxNumIterations = 50; for (auto Basis = Candidates.rbegin(); Basis != Candidates.rend() && NumIterations < MaxNumIterations; ++Basis, ++NumIterations) { if (isBasisFor(*Basis, C)) { C.Basis = &(*Basis); break; } } } // Regardless of whether we find a basis for C, we need to push C to the // candidate list so that it can be the basis of other candidates. Candidates.push_back(C); } void StraightLineStrengthReduce::allocateCandidatesAndFindBasis( Instruction *I) { switch (I->getOpcode()) { case Instruction::Add: allocateCandidatesAndFindBasisForAdd(I); break; case Instruction::Mul: allocateCandidatesAndFindBasisForMul(I); break; case Instruction::GetElementPtr: allocateCandidatesAndFindBasisForGEP(cast<GetElementPtrInst>(I)); break; } } void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd( Instruction *I) { // Try matching B + i * S. if (!isa<IntegerType>(I->getType())) return; assert(I->getNumOperands() == 2 && "isn't I an add?"); Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); allocateCandidatesAndFindBasisForAdd(LHS, RHS, I); if (LHS != RHS) allocateCandidatesAndFindBasisForAdd(RHS, LHS, I); } void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForAdd( Value *LHS, Value *RHS, Instruction *I) { Value *S = nullptr; ConstantInt *Idx = nullptr; if (match(RHS, m_Mul(m_Value(S), m_ConstantInt(Idx)))) { // I = LHS + RHS = LHS + Idx * S allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I); } else if (match(RHS, m_Shl(m_Value(S), m_ConstantInt(Idx)))) { // I = LHS + RHS = LHS + (S << Idx) = LHS + S * (1 << Idx) APInt One(Idx->getBitWidth(), 1); Idx = ConstantInt::get(Idx->getContext(), One << Idx->getValue()); allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), Idx, S, I); } else { // At least, I = LHS + 1 * RHS ConstantInt *One = ConstantInt::get(cast<IntegerType>(I->getType()), 1); allocateCandidatesAndFindBasis(Candidate::Add, SE->getSCEV(LHS), One, RHS, I); } } // Returns true if A matches B + C where C is constant. static bool matchesAdd(Value *A, Value *&B, ConstantInt *&C) { return (match(A, m_Add(m_Value(B), m_ConstantInt(C))) || match(A, m_Add(m_ConstantInt(C), m_Value(B)))); } // Returns true if A matches B | C where C is constant. static bool matchesOr(Value *A, Value *&B, ConstantInt *&C) { return (match(A, m_Or(m_Value(B), m_ConstantInt(C))) || match(A, m_Or(m_ConstantInt(C), m_Value(B)))); } void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul( Value *LHS, Value *RHS, Instruction *I) { Value *B = nullptr; ConstantInt *Idx = nullptr; if (matchesAdd(LHS, B, Idx)) { // If LHS is in the form of "Base + Index", then I is in the form of // "(Base + Index) * RHS". allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I); } else if (matchesOr(LHS, B, Idx) && haveNoCommonBitsSet(B, Idx, *DL)) { // If LHS is in the form of "Base | Index" and Base and Index have no common // bits set, then // Base | Index = Base + Index // and I is thus in the form of "(Base + Index) * RHS". allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(B), Idx, RHS, I); } else { // Otherwise, at least try the form (LHS + 0) * RHS. ConstantInt *Zero = ConstantInt::get(cast<IntegerType>(I->getType()), 0); allocateCandidatesAndFindBasis(Candidate::Mul, SE->getSCEV(LHS), Zero, RHS, I); } } void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForMul( Instruction *I) { // Try matching (B + i) * S. // TODO: we could extend SLSR to float and vector types. if (!isa<IntegerType>(I->getType())) return; assert(I->getNumOperands() == 2 && "isn't I a mul?"); Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); allocateCandidatesAndFindBasisForMul(LHS, RHS, I); if (LHS != RHS) { // Symmetrically, try to split RHS to Base + Index. allocateCandidatesAndFindBasisForMul(RHS, LHS, I); } } void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( const SCEV *B, ConstantInt *Idx, Value *S, uint64_t ElementSize, Instruction *I) { // I = B + sext(Idx *nsw S) * ElementSize // = B + (sext(Idx) * sext(S)) * ElementSize // = B + (sext(Idx) * ElementSize) * sext(S) // Casting to IntegerType is safe because we skipped vector GEPs. IntegerType *IntPtrTy = cast<IntegerType>(DL->getIntPtrType(I->getType())); ConstantInt *ScaledIdx = ConstantInt::get( IntPtrTy, Idx->getSExtValue() * (int64_t)ElementSize, true); allocateCandidatesAndFindBasis(Candidate::GEP, B, ScaledIdx, S, I); } void StraightLineStrengthReduce::factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize, GetElementPtrInst *GEP) { // At least, ArrayIdx = ArrayIdx *nsw 1. allocateCandidatesAndFindBasisForGEP( Base, ConstantInt::get(cast<IntegerType>(ArrayIdx->getType()), 1), ArrayIdx, ElementSize, GEP); Value *LHS = nullptr; ConstantInt *RHS = nullptr; // One alternative is matching the SCEV of ArrayIdx instead of ArrayIdx // itself. This would allow us to handle the shl case for free. However, // matching SCEVs has two issues: // // 1. this would complicate rewriting because the rewriting procedure // would have to translate SCEVs back to IR instructions. This translation // is difficult when LHS is further evaluated to a composite SCEV. // // 2. ScalarEvolution is designed to be control-flow oblivious. It tends // to strip nsw/nuw flags which are critical for SLSR to trace into // sext'ed multiplication. if (match(ArrayIdx, m_NSWMul(m_Value(LHS), m_ConstantInt(RHS)))) { // SLSR is currently unsafe if i * S may overflow. // GEP = Base + sext(LHS *nsw RHS) * ElementSize allocateCandidatesAndFindBasisForGEP(Base, RHS, LHS, ElementSize, GEP); } else if (match(ArrayIdx, m_NSWShl(m_Value(LHS), m_ConstantInt(RHS)))) { // GEP = Base + sext(LHS <<nsw RHS) * ElementSize // = Base + sext(LHS *nsw (1 << RHS)) * ElementSize APInt One(RHS->getBitWidth(), 1); ConstantInt *PowerOf2 = ConstantInt::get(RHS->getContext(), One << RHS->getValue()); allocateCandidatesAndFindBasisForGEP(Base, PowerOf2, LHS, ElementSize, GEP); } } void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( GetElementPtrInst *GEP) { // TODO: handle vector GEPs if (GEP->getType()->isVectorTy()) return; SmallVector<const SCEV *, 4> IndexExprs; for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I) IndexExprs.push_back(SE->getSCEV(*I)); gep_type_iterator GTI = gep_type_begin(GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { if (!isa<SequentialType>(*GTI++)) continue; const SCEV *OrigIndexExpr = IndexExprs[I - 1]; IndexExprs[I - 1] = SE->getConstant(OrigIndexExpr->getType(), 0); // The base of this candidate is GEP's base plus the offsets of all // indices except this current one. const SCEV *BaseExpr = SE->getGEPExpr(GEP->getSourceElementType(), SE->getSCEV(GEP->getPointerOperand()), IndexExprs, GEP->isInBounds()); Value *ArrayIdx = GEP->getOperand(I); uint64_t ElementSize = DL->getTypeAllocSize(*GTI); factorArrayIndex(ArrayIdx, BaseExpr, ElementSize, GEP); // When ArrayIdx is the sext of a value, we try to factor that value as // well. Handling this case is important because array indices are // typically sign-extended to the pointer size. Value *TruncatedArrayIdx = nullptr; if (match(ArrayIdx, m_SExt(m_Value(TruncatedArrayIdx)))) factorArrayIndex(TruncatedArrayIdx, BaseExpr, ElementSize, GEP); IndexExprs[I - 1] = OrigIndexExpr; } } // A helper function that unifies the bitwidth of A and B. static void unifyBitWidth(APInt &A, APInt &B) { if (A.getBitWidth() < B.getBitWidth()) A = A.sext(B.getBitWidth()); else if (A.getBitWidth() > B.getBitWidth()) B = B.sext(A.getBitWidth()); } Value *StraightLineStrengthReduce::emitBump(const Candidate &Basis, const Candidate &C, IRBuilder<> &Builder, const DataLayout *DL, bool &BumpWithUglyGEP) { APInt Idx = C.Index->getValue(), BasisIdx = Basis.Index->getValue(); unifyBitWidth(Idx, BasisIdx); APInt IndexOffset = Idx - BasisIdx; BumpWithUglyGEP = false; if (Basis.CandidateKind == Candidate::GEP) { APInt ElementSize( IndexOffset.getBitWidth(), DL->getTypeAllocSize( cast<GetElementPtrInst>(Basis.Ins)->getType()->getElementType())); APInt Q, R; APInt::sdivrem(IndexOffset, ElementSize, Q, R); if (R.getSExtValue() == 0) IndexOffset = Q; else BumpWithUglyGEP = true; } // Compute Bump = C - Basis = (i' - i) * S. // Common case 1: if (i' - i) is 1, Bump = S. if (IndexOffset.getSExtValue() == 1) return C.Stride; // Common case 2: if (i' - i) is -1, Bump = -S. if (IndexOffset.getSExtValue() == -1) return Builder.CreateNeg(C.Stride); // Otherwise, Bump = (i' - i) * sext/trunc(S). Note that (i' - i) and S may // have different bit widths. IntegerType *DeltaType = IntegerType::get(Basis.Ins->getContext(), IndexOffset.getBitWidth()); Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType); if (IndexOffset.isPowerOf2()) { // If (i' - i) is a power of 2, Bump = sext/trunc(S) << log(i' - i). ConstantInt *Exponent = ConstantInt::get(DeltaType, IndexOffset.logBase2()); return Builder.CreateShl(ExtendedStride, Exponent); } if ((-IndexOffset).isPowerOf2()) { // If (i - i') is a power of 2, Bump = -sext/trunc(S) << log(i' - i). ConstantInt *Exponent = ConstantInt::get(DeltaType, (-IndexOffset).logBase2()); return Builder.CreateNeg(Builder.CreateShl(ExtendedStride, Exponent)); } Constant *Delta = ConstantInt::get(DeltaType, IndexOffset); return Builder.CreateMul(ExtendedStride, Delta); } void StraightLineStrengthReduce::rewriteCandidateWithBasis( const Candidate &C, const Candidate &Basis) { assert(C.CandidateKind == Basis.CandidateKind && C.Base == Basis.Base && C.Stride == Basis.Stride); // We run rewriteCandidateWithBasis on all candidates in a post-order, so the // basis of a candidate cannot be unlinked before the candidate. assert(Basis.Ins->getParent() != nullptr && "the basis is unlinked"); // An instruction can correspond to multiple candidates. Therefore, instead of // simply deleting an instruction when we rewrite it, we mark its parent as // nullptr (i.e. unlink it) so that we can skip the candidates whose // instruction is already rewritten. if (!C.Ins->getParent()) return; IRBuilder<> Builder(C.Ins); bool BumpWithUglyGEP; Value *Bump = emitBump(Basis, C, Builder, DL, BumpWithUglyGEP); Value *Reduced = nullptr; // equivalent to but weaker than C.Ins switch (C.CandidateKind) { case Candidate::Add: case Candidate::Mul: // C = Basis + Bump if (BinaryOperator::isNeg(Bump)) { // If Bump is a neg instruction, emit C = Basis - (-Bump). Reduced = Builder.CreateSub(Basis.Ins, BinaryOperator::getNegArgument(Bump)); // We only use the negative argument of Bump, and Bump itself may be // trivially dead. RecursivelyDeleteTriviallyDeadInstructions(Bump); } else { // It's tempting to preserve nsw on Bump and/or Reduced. However, it's // usually unsound, e.g., // // X = (-2 +nsw 1) *nsw INT_MAX // Y = (-2 +nsw 3) *nsw INT_MAX // => // Y = X + 2 * INT_MAX // // Neither + and * in the resultant expression are nsw. Reduced = Builder.CreateAdd(Basis.Ins, Bump); } break; case Candidate::GEP: { Type *IntPtrTy = DL->getIntPtrType(C.Ins->getType()); bool InBounds = cast<GetElementPtrInst>(C.Ins)->isInBounds(); if (BumpWithUglyGEP) { // C = (char *)Basis + Bump unsigned AS = Basis.Ins->getType()->getPointerAddressSpace(); Type *CharTy = Type::getInt8PtrTy(Basis.Ins->getContext(), AS); Reduced = Builder.CreateBitCast(Basis.Ins, CharTy); if (InBounds) Reduced = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Reduced, Bump); else Reduced = Builder.CreateGEP(Builder.getInt8Ty(), Reduced, Bump); Reduced = Builder.CreateBitCast(Reduced, C.Ins->getType()); } else { // C = gep Basis, Bump // Canonicalize bump to pointer size. Bump = Builder.CreateSExtOrTrunc(Bump, IntPtrTy); if (InBounds) Reduced = Builder.CreateInBoundsGEP(nullptr, Basis.Ins, Bump); else Reduced = Builder.CreateGEP(nullptr, Basis.Ins, Bump); } } break; default: llvm_unreachable("C.CandidateKind is invalid"); }; Reduced->takeName(C.Ins); C.Ins->replaceAllUsesWith(Reduced); // Unlink C.Ins so that we can skip other candidates also corresponding to // C.Ins. The actual deletion is postponed to the end of runOnFunction. C.Ins->removeFromParent(); UnlinkedInstructions.push_back(C.Ins); } bool StraightLineStrengthReduce::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); SE = &getAnalysis<ScalarEvolution>(); // Traverse the dominator tree in the depth-first order. This order makes sure // all bases of a candidate are in Candidates when we process it. for (auto node = GraphTraits<DominatorTree *>::nodes_begin(DT); node != GraphTraits<DominatorTree *>::nodes_end(DT); ++node) { for (auto &I : *node->getBlock()) allocateCandidatesAndFindBasis(&I); } // Rewrite candidates in the reverse depth-first order. This order makes sure // a candidate being rewritten is not a basis for any other candidate. while (!Candidates.empty()) { const Candidate &C = Candidates.back(); if (C.Basis != nullptr) { rewriteCandidateWithBasis(C, *C.Basis); } Candidates.pop_back(); } // Delete all unlink instructions. for (auto *UnlinkedInst : UnlinkedInstructions) { for (unsigned I = 0, E = UnlinkedInst->getNumOperands(); I != E; ++I) { Value *Op = UnlinkedInst->getOperand(I); UnlinkedInst->setOperand(I, nullptr); RecursivelyDeleteTriviallyDeadInstructions(Op); } delete UnlinkedInst; } bool Ret = !UnlinkedInstructions.empty(); UnlinkedInstructions.clear(); return Ret; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilRemoveUnstructuredLoopExits.h
//===- DxilRemoveUnstructuredLoopExits.h - Make unrolled loops structured //---===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include <unordered_set> namespace llvm { class Loop; class LoopInfo; class DominatorTree; class BasicBlock; } // namespace llvm namespace hlsl { // exclude_set is a list of *EXIT BLOCKS* to exclude (NOTE: not *exiting* // blocks) bool RemoveUnstructuredLoopExits( llvm::Loop *L, llvm::LoopInfo *LI, llvm::DominatorTree *DT, std::unordered_set<llvm::BasicBlock *> *exclude_set = nullptr); } // namespace hlsl
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/Reg2Mem.cpp
//===- Reg2Mem.cpp - Convert registers to allocas -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file demotes all registers to memory references. It is intended to be // the inverse of PromoteMemoryToRegister. By converting to loads, the only // values live across basic blocks are allocas and loads before phi nodes. // It is intended that this should make CFG hacking much easier. // To make later hacking easier, the entry block is split into two, such that // all introduced allocas and nothing else are in the entry block. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/Local.h" #include <list> using namespace llvm; #define DEBUG_TYPE "reg2mem" STATISTIC(NumRegsDemoted, "Number of registers demoted"); STATISTIC(NumPhisDemoted, "Number of phi-nodes demoted"); namespace { struct RegToMem : public FunctionPass { static char ID; // Pass identification, replacement for typeid RegToMem() : FunctionPass(ID) { initializeRegToMemPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequiredID(BreakCriticalEdgesID); AU.addPreservedID(BreakCriticalEdgesID); } bool valueEscapes(const Instruction *Inst) const { const BasicBlock *BB = Inst->getParent(); for (const User *U : Inst->users()) { const Instruction *UI = cast<Instruction>(U); if (UI->getParent() != BB || isa<PHINode>(UI)) return true; } return false; } bool runOnFunction(Function &F) override; }; } char RegToMem::ID = 0; INITIALIZE_PASS_BEGIN(RegToMem, "reg2mem", "Demote all values to stack slots", false, false) INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges) INITIALIZE_PASS_END(RegToMem, "reg2mem", "Demote all values to stack slots", false, false) bool RegToMem::runOnFunction(Function &F) { if (F.isDeclaration()) return false; // Insert all new allocas into entry block. BasicBlock *BBEntry = &F.getEntryBlock(); assert(pred_empty(BBEntry) && "Entry block to function must not have predecessors!"); // Find first non-alloca instruction and create insertion point. This is // safe if block is well-formed: it always have terminator, otherwise // we'll get and assertion. BasicBlock::iterator I = BBEntry->begin(); while (isa<AllocaInst>(I)) ++I; CastInst *AllocaInsertionPoint = new BitCastInst(Constant::getNullValue(Type::getInt32Ty(F.getContext())), Type::getInt32Ty(F.getContext()), "reg2mem alloca point", I); // Find the escaped instructions. But don't create stack slots for // allocas in entry block. std::list<Instruction*> WorkList; for (Function::iterator ibb = F.begin(), ibe = F.end(); ibb != ibe; ++ibb) for (BasicBlock::iterator iib = ibb->begin(), iie = ibb->end(); iib != iie; ++iib) { if (!(isa<AllocaInst>(iib) && iib->getParent() == BBEntry) && valueEscapes(iib)) { WorkList.push_front(&*iib); } } // Demote escaped instructions NumRegsDemoted += WorkList.size(); for (std::list<Instruction*>::iterator ilb = WorkList.begin(), ile = WorkList.end(); ilb != ile; ++ilb) DemoteRegToStack(**ilb, false, AllocaInsertionPoint); WorkList.clear(); // Find all phi's for (Function::iterator ibb = F.begin(), ibe = F.end(); ibb != ibe; ++ibb) for (BasicBlock::iterator iib = ibb->begin(), iie = ibb->end(); iib != iie; ++iib) if (isa<PHINode>(iib)) WorkList.push_front(&*iib); // Demote phi nodes NumPhisDemoted += WorkList.size(); for (std::list<Instruction*>::iterator ilb = WorkList.begin(), ile = WorkList.end(); ilb != ile; ++ilb) DemotePHIToStack(cast<PHINode>(*ilb), AllocaInsertionPoint); return true; } // createDemoteRegisterToMemory - Provide an entry point to create this pass. char &llvm::DemoteRegisterToMemoryID = RegToMem::ID; FunctionPass *llvm::createDemoteRegisterToMemoryPass() { return new RegToMem(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/CMakeLists.txt
add_llvm_library(LLVMScalarOpts ADCE.cpp AlignmentFromAssumptions.cpp BDCE.cpp ConstantHoisting.cpp ConstantProp.cpp CorrelatedValuePropagation.cpp DCE.cpp DeadStoreElimination.cpp EarlyCSE.cpp FlattenCFGPass.cpp Float2Int.cpp GVN.cpp HoistConstantArray.cpp InductiveRangeCheckElimination.cpp IndVarSimplify.cpp JumpThreading.cpp LICM.cpp LoadCombine.cpp LoopDeletion.cpp LoopDistribute.cpp LoopIdiomRecognize.cpp LoopInstSimplify.cpp LoopInterchange.cpp LoopRerollPass.cpp LoopRotation.cpp LoopStrengthReduce.cpp LoopUnrollPass.cpp LoopUnswitch.cpp LowerAtomic.cpp LowerExpectIntrinsic.cpp LowerTypePasses.cpp MemCpyOptimizer.cpp MergedLoadStoreMotion.cpp NaryReassociate.cpp PartiallyInlineLibCalls.cpp PlaceSafepoints.cpp Reassociate.cpp Reg2Mem.cpp Reg2MemHLSL.cpp RewriteStatepointsForGC.cpp SCCP.cpp SROA.cpp SampleProfile.cpp Scalar.cpp ScalarReplAggregates.cpp ScalarReplAggregatesHLSL.cpp # HLSL Change DxilLoopUnroll.cpp # HLSL Change DxilRemoveDeadBlocks.cpp # HLSL Change DxilEraseDeadRegion.cpp # HLSL Change DxilFixConstArrayInitializer.cpp # HLSL Change DxilEliminateVector.cpp # HLSL Change DxilConditionalMem2Reg.cpp # HLSL Change DxilRemoveUnstructuredLoopExits.cpp # HLSL Change Scalarizer.cpp SeparateConstOffsetFromGEP.cpp SimplifyCFGPass.cpp Sink.cpp SpeculativeExecution.cpp StraightLineStrengthReduce.cpp StructurizeCFG.cpp TailRecursionElimination.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/Scalar ) add_dependencies(LLVMScalarOpts intrinsics_gen) target_link_libraries(LLVMScalarOpts PUBLIC LLVMDXIL LLVMHLSL) # HLSL Change
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LICM.cpp
//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass performs loop invariant code motion, attempting to remove as much // code from the body of a loop as possible. It does this by either hoisting // code into the preheader block, or by sinking code to the exit blocks if it is // safe. This pass also promotes must-aliased memory locations in the loop to // live in registers, thus hoisting and sinking "invariant" loads and stores. // // This pass uses alias analysis for two purposes: // // 1. Moving loop invariant loads and calls out of loops. If we can determine // that a load or call inside of a loop never aliases anything stored to, // we can hoist it or sink it like any other instruction. // 2. Scalar Promotion of Memory - If there is a store instruction inside of // the loop, we try to move the store to happen AFTER the loop instead of // inside of the loop. This can only happen if a few conditions are true: // A. The pointer stored through is loop invariant // B. There are no stores or loads in the loop which _may_ alias the // pointer. There are no calls in the loop which mod/ref the pointer. // If these conditions are true, we can promote the loads and stores in the // loop of the pointer to use a temporary alloca'd variable. We then use // the SSAUpdater to construct the appropriate SSA form for the value. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasSetTracker.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/PredIteratorCache.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include <algorithm> using namespace llvm; #define DEBUG_TYPE "licm" STATISTIC(NumSunk , "Number of instructions sunk out of loop"); STATISTIC(NumHoisted , "Number of instructions hoisted out of loop"); STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); STATISTIC(NumPromoted , "Number of memory locations promoted to registers"); #if 0 // HLSL Change Starts - option pending static cl::opt<bool> DisablePromotion("disable-licm-promotion", cl::Hidden, cl::desc("Disable memory promotion in LICM pass")); #else static bool DisablePromotion = false; #endif // HLSL Change Ends static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop); static bool hoist(Instruction &I, BasicBlock *Preheader); static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT, const Loop *CurLoop, AliasSetTracker *CurAST ); static bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop, const LICMSafetyInfo *SafetyInfo); static bool isSafeToExecuteUnconditionally(const Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LICMSafetyInfo *SafetyInfo, const Instruction *CtxI = nullptr); static bool pointerInvalidatedByLoop(Value *V, uint64_t Size, const AAMDNodes &AAInfo, AliasSetTracker *CurAST); static Instruction *CloneInstructionInExitBlock(const Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI); static bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo); namespace { struct LICM : public LoopPass { static char ID; // Pass identification, replacement for typeid LICM() : LoopPass(ID) { initializeLICMPass(*PassRegistry::getPassRegistry()); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; /// This transformation requires natural loop information & requires that /// loop preheaders be inserted into the CFG... /// void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequiredID(LoopSimplifyID); AU.addPreservedID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); AU.addRequired<AliasAnalysis>(); AU.addPreserved<AliasAnalysis>(); AU.addPreserved<ScalarEvolution>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } using llvm::Pass::doFinalization; bool doFinalization() override { assert(LoopToAliasSetMap.empty() && "Didn't free loop alias sets"); return false; } private: AliasAnalysis *AA; // Current AliasAnalysis information LoopInfo *LI; // Current LoopInfo DominatorTree *DT; // Dominator Tree for the current Loop. TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding. // State that is updated as we process loops. bool Changed; // Set to true when we change anything. BasicBlock *Preheader; // The preheader block of the current loop... Loop *CurLoop; // The current loop we are working on... AliasSetTracker *CurAST; // AliasSet information for the current loop... DenseMap<Loop*, AliasSetTracker*> LoopToAliasSetMap; /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info. void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) override; /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias /// set. void deleteAnalysisValue(Value *V, Loop *L) override; /// Simple Analysis hook. Delete loop L from alias set map. void deleteAnalysisLoop(Loop *L) override; }; } char LICM::ID = 0; INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false) Pass *llvm::createLICMPass() { return new LICM(); } /// Hoist expressions out of the specified loop. Note, alias info for inner /// loop is not preserved so it is not a good idea to run LICM multiple /// times on one loop. /// bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; Changed = false; // Get our Loop and Alias Analysis information... LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); AA = &getAnalysis<AliasAnalysis>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); CurAST = new AliasSetTracker(*AA); // Collect Alias info from subloops. for (Loop::iterator LoopItr = L->begin(), LoopItrE = L->end(); LoopItr != LoopItrE; ++LoopItr) { Loop *InnerL = *LoopItr; AliasSetTracker *InnerAST = LoopToAliasSetMap[InnerL]; assert(InnerAST && "Where is my AST?"); // What if InnerLoop was modified by other passes ? CurAST->add(*InnerAST); // Once we've incorporated the inner loop's AST into ours, we don't need the // subloop's anymore. delete InnerAST; LoopToAliasSetMap.erase(InnerL); } CurLoop = L; // Get the preheader block to move instructions into... Preheader = L->getLoopPreheader(); // Loop over the body of this loop, looking for calls, invokes, and stores. // Because subloops have already been incorporated into AST, we skip blocks in // subloops. // for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E; ++I) { BasicBlock *BB = *I; if (LI->getLoopFor(BB) == L) // Ignore blocks in subloops. CurAST->add(*BB); // Incorporate the specified basic block } // Compute loop safety information. LICMSafetyInfo SafetyInfo; computeLICMSafetyInfo(&SafetyInfo, CurLoop); // We want to visit all of the instructions in this loop... that are not parts // of our subloops (they have already had their invariants hoisted out of // their loop, into this loop, so there is no need to process the BODIES of // the subloops). // // Traverse the body of the loop in depth first order on the dominator tree so // that we are guaranteed to see definitions before we see uses. This allows // us to sink instructions in one pass, without iteration. After sinking // instructions, we perform another pass to hoist them out of the loop. // if (L->hasDedicatedExits()) Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, CurLoop, CurAST, &SafetyInfo); if (Preheader) Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, CurLoop, CurAST, &SafetyInfo); // Now that all loop invariants have been removed from the loop, promote any // memory references to scalars that we can. if (!DisablePromotion && (Preheader || L->hasDedicatedExits())) { SmallVector<BasicBlock *, 8> ExitBlocks; SmallVector<Instruction *, 8> InsertPts; PredIteratorCache PIC; // Loop over all of the alias sets in the tracker object. for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end(); I != E; ++I) Changed |= promoteLoopAccessesToScalars(*I, ExitBlocks, InsertPts, PIC, LI, DT, CurLoop, CurAST, &SafetyInfo); // Once we have promoted values across the loop body we have to recursively // reform LCSSA as any nested loop may now have values defined within the // loop used in the outer loop. // FIXME: This is really heavy handed. It would be a bit better to use an // SSAUpdater strategy during promotion that was LCSSA aware and reformed // it as it went. if (Changed) formLCSSARecursively(*L, *DT, LI, getAnalysisIfAvailable<ScalarEvolution>()); } // Check that neither this loop nor its parent have had LCSSA broken. LICM is // specifically moving instructions across the loop boundary and so it is // especially in need of sanity checking here. assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) && "Parent loop not left in LCSSA form after LICM!"); // Clear out loops state information for the next iteration CurLoop = nullptr; Preheader = nullptr; // If this loop is nested inside of another one, save the alias information // for when we process the outer loop. if (L->getParentLoop()) LoopToAliasSetMap[L] = CurAST; else delete CurAST; return Changed; } /// Walk the specified region of the CFG (defined by all blocks dominated by /// the specified block, and that are in the current loop) in reverse depth /// first order w.r.t the DominatorTree. This allows us to visit uses before /// definitions, allowing us to sink a loop body in one pass without iteration. /// bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) { // Verify inputs. assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion"); // Set changed as false. bool Changed = false; // Get basic block BasicBlock *BB = N->getBlock(); // If this subregion is not in the top level loop at all, exit. if (!CurLoop->contains(BB)) return Changed; // We are processing blocks in reverse dfo, so process children first. const std::vector<DomTreeNode*> &Children = N->getChildren(); for (unsigned i = 0, e = Children.size(); i != e; ++i) Changed |= sinkRegion(Children[i], AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo); // Only need to process the contents of this block if it is not part of a // subloop (which would already have been processed). if (inSubLoop(BB,CurLoop,LI)) return Changed; for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) { Instruction &I = *--II; // If the instruction is dead, we would try to sink it because it isn't used // in the loop, instead, just delete it. if (isInstructionTriviallyDead(&I, TLI)) { DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); ++II; CurAST->deleteValue(&I); I.eraseFromParent(); Changed = true; continue; } // Check to see if we can sink this instruction to the exit blocks // of the loop. We can do this if the all users of the instruction are // outside of the loop. In this case, it doesn't even matter if the // operands of the instruction are loop invariant. // if (isNotUsedInLoop(I, CurLoop) && canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo)) { ++II; Changed |= sink(I, LI, DT, CurLoop, CurAST); } } return Changed; } /// Walk the specified region of the CFG (defined by all blocks dominated by /// the specified block, and that are in the current loop) in depth first /// order w.r.t the DominatorTree. This allows us to visit definitions before /// uses, allowing us to hoist a loop body in one pass without iteration. /// bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) { // Verify inputs. assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion"); // Set changed as false. bool Changed = false; // Get basic block BasicBlock *BB = N->getBlock(); // If this subregion is not in the top level loop at all, exit. if (!CurLoop->contains(BB)) return Changed; // Only need to process the contents of this block if it is not part of a // subloop (which would already have been processed). if (!inSubLoop(BB, CurLoop, LI)) for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) { Instruction &I = *II++; // Try constant folding this instruction. If all the operands are // constants, it is technically hoistable, but it would be better to just // fold it. if (Constant *C = ConstantFoldInstruction( &I, I.getModule()->getDataLayout(), TLI)) { DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'); CurAST->copyValue(&I, C); CurAST->deleteValue(&I); I.replaceAllUsesWith(C); I.eraseFromParent(); continue; } // Try hoisting the instruction out to the preheader. We can only do this // if all of the operands of the instruction are loop invariant and if it // is safe to hoist the instruction. // if (CurLoop->hasLoopInvariantOperands(&I) && canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo) && isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo, CurLoop->getLoopPreheader()->getTerminator())) Changed |= hoist(I, CurLoop->getLoopPreheader()); } const std::vector<DomTreeNode*> &Children = N->getChildren(); for (unsigned i = 0, e = Children.size(); i != e; ++i) Changed |= hoistRegion(Children[i], AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo); return Changed; } /// Computes loop safety information, checks loop body & header /// for the possiblity of may throw exception. /// void llvm::computeLICMSafetyInfo(LICMSafetyInfo * SafetyInfo, Loop * CurLoop) { assert(CurLoop != nullptr && "CurLoop cant be null"); BasicBlock *Header = CurLoop->getHeader(); // Setting default safety values. SafetyInfo->MayThrow = false; SafetyInfo->HeaderMayThrow = false; // Iterate over header and compute dafety info. for (BasicBlock::iterator I = Header->begin(), E = Header->end(); (I != E) && !SafetyInfo->HeaderMayThrow; ++I) SafetyInfo->HeaderMayThrow |= I->mayThrow(); SafetyInfo->MayThrow = SafetyInfo->HeaderMayThrow; // Iterate over loop instructions and compute safety info. for (Loop::block_iterator BB = CurLoop->block_begin(), BBE = CurLoop->block_end(); (BB != BBE) && !SafetyInfo->MayThrow ; ++BB) for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); (I != E) && !SafetyInfo->MayThrow; ++I) SafetyInfo->MayThrow |= I->mayThrow(); } /// canSinkOrHoistInst - Return true if the hoister and sinker can handle this /// instruction. /// bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, AliasSetTracker *CurAST, LICMSafetyInfo *SafetyInfo) { // Loads have extra constraints we have to verify before we can hoist them. if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { if (!LI->isUnordered()) return false; // Don't hoist volatile/atomic loads! // Loads from constant memory are always safe to move, even if they end up // in the same alias set as something that ends up being modified. if (AA->pointsToConstantMemory(LI->getOperand(0))) return true; if (LI->getMetadata(LLVMContext::MD_invariant_load)) return true; // Don't hoist loads which have may-aliased stores in loop. uint64_t Size = 0; if (LI->getType()->isSized()) Size = AA->getTypeStoreSize(LI->getType()); AAMDNodes AAInfo; LI->getAAMetadata(AAInfo); return !pointerInvalidatedByLoop(LI->getOperand(0), Size, AAInfo, CurAST); } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { // Don't sink or hoist dbg info; it's legal, but not useful. if (isa<DbgInfoIntrinsic>(I)) return false; // Handle simple cases by querying alias analysis. AliasAnalysis::ModRefBehavior Behavior = AA->getModRefBehavior(CI); if (Behavior == AliasAnalysis::DoesNotAccessMemory) return true; if (AliasAnalysis::onlyReadsMemory(Behavior)) { // If this call only reads from memory and there are no writes to memory // in the loop, we can hoist or sink the call as appropriate. bool FoundMod = false; for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end(); I != E; ++I) { AliasSet &AS = *I; if (!AS.isForwardingAliasSet() && AS.isMod()) { FoundMod = true; break; } } if (!FoundMod) return true; } // FIXME: This should use mod/ref information to see if we can hoist or // sink the call. return false; } // Only these instructions are hoistable/sinkable. if (!isa<BinaryOperator>(I) && !isa<CastInst>(I) && !isa<SelectInst>(I) && !isa<GetElementPtrInst>(I) && !isa<CmpInst>(I) && !isa<InsertElementInst>(I) && !isa<ExtractElementInst>(I) && !isa<ShuffleVectorInst>(I) && !isa<ExtractValueInst>(I) && !isa<InsertValueInst>(I)) return false; // TODO: Plumb the context instruction through to make hoisting and sinking // more powerful. Hoisting of loads already works due to the special casing // above. return isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo, nullptr); } /// Returns true if a PHINode is a trivially replaceable with an /// Instruction. /// This is true when all incoming values are that instruction. /// This pattern occurs most often with LCSSA PHI nodes. /// static bool isTriviallyReplacablePHI(const PHINode &PN, const Instruction &I) { for (const Value *IncValue : PN.incoming_values()) if (IncValue != &I) return false; return true; } /// Return true if the only users of this instruction are outside of /// the loop. If this is true, we can sink the instruction to the exit /// blocks of the loop. /// static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop) { for (const User *U : I.users()) { const Instruction *UI = cast<Instruction>(U); if (const PHINode *PN = dyn_cast<PHINode>(UI)) { // A PHI node where all of the incoming values are this instruction are // special -- they can just be RAUW'ed with the instruction and thus // don't require a use in the predecessor. This is a particular important // special case because it is the pattern found in LCSSA form. if (isTriviallyReplacablePHI(*PN, I)) { if (CurLoop->contains(PN)) return false; else continue; } // Otherwise, PHI node uses occur in predecessor blocks if the incoming // values. Check for such a use being inside the loop. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == &I) if (CurLoop->contains(PN->getIncomingBlock(i))) return false; continue; } if (CurLoop->contains(UI)) return false; } return true; } static Instruction *CloneInstructionInExitBlock(const Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI) { Instruction *New = I.clone(); ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); if (!I.getName().empty()) New->setName(I.getName() + ".le"); // Build LCSSA PHI nodes for any in-loop operands. Note that this is // particularly cheap because we can rip off the PHI node that we're // replacing for the number and blocks of the predecessors. // OPT: If this shows up in a profile, we can instead finish sinking all // invariant instructions, and then walk their operands to re-establish // LCSSA. That will eliminate creating PHI nodes just to nuke them when // sinking bottom-up. for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE; ++OI) if (Instruction *OInst = dyn_cast<Instruction>(*OI)) if (Loop *OLoop = LI->getLoopFor(OInst->getParent())) if (!OLoop->contains(&PN)) { PHINode *OpPN = PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), OInst->getName() + ".lcssa", ExitBlock.begin()); for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); *OI = OpPN; } return New; } /// When an instruction is found to only be used outside of the loop, this /// function moves it to the exit blocks and patches up SSA form as needed. /// This method is guaranteed to remove the original instruction from its /// position, and may either delete it or move it to outside of the loop. /// static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT, const Loop *CurLoop, AliasSetTracker *CurAST ) { DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); bool Changed = false; if (isa<LoadInst>(I)) ++NumMovedLoads; else if (isa<CallInst>(I)) ++NumMovedCalls; ++NumSunk; Changed = true; #ifndef NDEBUG SmallVector<BasicBlock *, 32> ExitBlocks; CurLoop->getUniqueExitBlocks(ExitBlocks); SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), ExitBlocks.end()); #endif // Clones of this instruction. Don't create more than one per exit block! SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; // If this instruction is only used outside of the loop, then all users are // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of // the instruction. while (!I.use_empty()) { Value::user_iterator UI = I.user_begin(); auto *User = cast<Instruction>(*UI); if (!DT->isReachableFromEntry(User->getParent())) { User->replaceUsesOfWith(&I, UndefValue::get(I.getType())); continue; } // The user must be a PHI node. PHINode *PN = cast<PHINode>(User); // Surprisingly, instructions can be used outside of loops without any // exits. This can only happen in PHI nodes if the incoming block is // unreachable. Use &U = UI.getUse(); BasicBlock *BB = PN->getIncomingBlock(U); if (!DT->isReachableFromEntry(BB)) { U = UndefValue::get(I.getType()); continue; } BasicBlock *ExitBlock = PN->getParent(); assert(ExitBlockSet.count(ExitBlock) && "The LCSSA PHI is not in an exit block!"); Instruction *New; auto It = SunkCopies.find(ExitBlock); if (It != SunkCopies.end()) New = It->second; else New = SunkCopies[ExitBlock] = CloneInstructionInExitBlock(I, *ExitBlock, *PN, LI); PN->replaceAllUsesWith(New); PN->eraseFromParent(); } CurAST->deleteValue(&I); I.eraseFromParent(); return Changed; } /// When an instruction is found to only use loop invariant operands that /// is safe to hoist, this instruction is called to do the dirty work. /// static bool hoist(Instruction &I, BasicBlock *Preheader) { DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I << "\n"); // Move the new node to the Preheader, before its terminator. I.moveBefore(Preheader->getTerminator()); if (isa<LoadInst>(I)) ++NumMovedLoads; else if (isa<CallInst>(I)) ++NumMovedCalls; ++NumHoisted; return true; } /// Only sink or hoist an instruction if it is not a trapping instruction, /// or if the instruction is known not to trap when moved to the preheader. /// or if it is a trapping instruction and is guaranteed to execute. static bool isSafeToExecuteUnconditionally(const Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, const Loop *CurLoop, const LICMSafetyInfo *SafetyInfo, const Instruction *CtxI) { if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI)) return true; return isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo); } static bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop, const LICMSafetyInfo * SafetyInfo) { // We have to check to make sure that the instruction dominates all // of the exit blocks. If it doesn't, then there is a path out of the loop // which does not execute this instruction, so we can't hoist it. // If the instruction is in the header block for the loop (which is very // common), it is always guaranteed to dominate the exit blocks. Since this // is a common case, and can save some work, check it now. if (Inst.getParent() == CurLoop->getHeader()) // If there's a throw in the header block, we can't guarantee we'll reach // Inst. return !SafetyInfo->HeaderMayThrow; // Somewhere in this loop there is an instruction which may throw and make us // exit the loop. if (SafetyInfo->MayThrow) return false; // Get the exit blocks for the current loop. SmallVector<BasicBlock*, 8> ExitBlocks; CurLoop->getExitBlocks(ExitBlocks); // Verify that the block dominates each of the exit blocks of the loop. for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) if (!DT->dominates(Inst.getParent(), ExitBlocks[i])) return false; // As a degenerate case, if the loop is statically infinite then we haven't // proven anything since there are no exit blocks. if (ExitBlocks.empty()) return false; return true; } namespace { class LoopPromoter : public LoadAndStorePromoter { Value *SomePtr; // Designated pointer to store to. SmallPtrSetImpl<Value*> &PointerMustAliases; SmallVectorImpl<BasicBlock*> &LoopExitBlocks; SmallVectorImpl<Instruction*> &LoopInsertPts; PredIteratorCache &PredCache; AliasSetTracker &AST; LoopInfo &LI; DebugLoc DL; int Alignment; AAMDNodes AATags; Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { if (Instruction *I = dyn_cast<Instruction>(V)) if (Loop *L = LI.getLoopFor(I->getParent())) if (!L->contains(BB)) { // We need to create an LCSSA PHI node for the incoming value and // store that. PHINode *PN = PHINode::Create( I->getType(), PredCache.size(BB), I->getName() + ".lcssa", BB->begin()); for (BasicBlock *Pred : PredCache.get(BB)) PN->addIncoming(I, Pred); return PN; } return V; } public: LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, SmallPtrSetImpl<Value *> &PMA, SmallVectorImpl<BasicBlock *> &LEB, SmallVectorImpl<Instruction *> &LIP, PredIteratorCache &PIC, AliasSetTracker &ast, LoopInfo &li, DebugLoc dl, int alignment, const AAMDNodes &AATags) : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), LoopExitBlocks(LEB), LoopInsertPts(LIP), PredCache(PIC), AST(ast), LI(li), DL(dl), Alignment(alignment), AATags(AATags) {} bool isInstInList(Instruction *I, const SmallVectorImpl<Instruction*> &) const override { Value *Ptr; if (LoadInst *LI = dyn_cast<LoadInst>(I)) Ptr = LI->getOperand(0); else Ptr = cast<StoreInst>(I)->getPointerOperand(); return PointerMustAliases.count(Ptr); } void doExtraRewritesBeforeFinalDeletion() const override { // Insert stores after in the loop exit blocks. Each exit block gets a // store of the live-out values that feed them. Since we've already told // the SSA updater about the defs in the loop and the preheader // definition, it is all set and we can start using it. for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { BasicBlock *ExitBlock = LoopExitBlocks[i]; Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); Instruction *InsertPos = LoopInsertPts[i]; StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); NewSI->setAlignment(Alignment); NewSI->setDebugLoc(DL); if (AATags) NewSI->setAAMetadata(AATags); } } void replaceLoadWithValue(LoadInst *LI, Value *V) const override { // Update alias analysis. AST.copyValue(LI, V); } void instructionDeleted(Instruction *I) const override { AST.deleteValue(I); } }; } // end anon namespace /// Try to promote memory values to scalars by sinking stores out of the /// loop and moving loads to before the loop. We do this by looping over /// the stores in the loop, looking for stores to Must pointers which are /// loop invariant. /// bool llvm::promoteLoopAccessesToScalars(AliasSet &AS, SmallVectorImpl<BasicBlock*>&ExitBlocks, SmallVectorImpl<Instruction*>&InsertPts, PredIteratorCache &PIC, LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, AliasSetTracker *CurAST, LICMSafetyInfo * SafetyInfo) { // Verify inputs. assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars"); // Initially set Changed status to false. bool Changed = false; // We can promote this alias set if it has a store, if it is a "Must" alias // set, if the pointer is loop invariant, and if we are not eliminating any // volatile loads or stores. if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue())) return Changed; assert(!AS.empty() && "Must alias set should have at least one pointer element in it!"); Value *SomePtr = AS.begin()->getValue(); BasicBlock * Preheader = CurLoop->getLoopPreheader(); // It isn't safe to promote a load/store from the loop if the load/store is // conditional. For example, turning: // // for () { if (c) *P += 1; } // // into: // // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; // // is not safe, because *P may only be valid to access if 'c' is true. // // It is safe to promote P if all uses are direct load/stores and if at // least one is guaranteed to be executed. bool GuaranteedToExecute = false; SmallVector<Instruction*, 64> LoopUses; SmallPtrSet<Value*, 4> PointerMustAliases; // We start with an alignment of one and try to find instructions that allow // us to prove better alignment. unsigned Alignment = 1; AAMDNodes AATags; bool HasDedicatedExits = CurLoop->hasDedicatedExits(); // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. While we are at it, collect alignment and AA info. for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) { Value *ASIV = ASI->getValue(); PointerMustAliases.insert(ASIV); // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. if (SomePtr->getType() != ASIV->getType()) return Changed; for (User *U : ASIV->users()) { // Ignore instructions that are outside the loop. Instruction *UI = dyn_cast<Instruction>(U); if (!UI || !CurLoop->contains(UI)) continue; // If there is an non-load/store instruction in the loop, we can't promote // it. if (const LoadInst *load = dyn_cast<LoadInst>(UI)) { assert(!load->isVolatile() && "AST broken"); if (!load->isSimple()) return Changed; } else if (const StoreInst *store = dyn_cast<StoreInst>(UI)) { // Stores *of* the pointer are not interesting, only stores *to* the // pointer. if (UI->getOperand(1) != ASIV) continue; assert(!store->isVolatile() && "AST broken"); if (!store->isSimple()) return Changed; // Don't sink stores from loops without dedicated block exits. Exits // containing indirect branches are not transformed by loop simplify, // make sure we catch that. An additional load may be generated in the // preheader for SSA updater, so also avoid sinking when no preheader // is available. if (!HasDedicatedExits || !Preheader) return Changed; // Note that we only check GuaranteedToExecute inside the store case // so that we do not introduce stores where they did not exist before // (which would break the LLVM concurrency model). // If the alignment of this instruction allows us to specify a more // restrictive (and performant) alignment and if we are sure this // instruction will be executed, update the alignment. // Larger is better, with the exception of 0 being the best alignment. unsigned InstAlignment = store->getAlignment(); if ((InstAlignment > Alignment || InstAlignment == 0) && Alignment != 0) if (isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo)) { GuaranteedToExecute = true; Alignment = InstAlignment; } if (!GuaranteedToExecute) GuaranteedToExecute = isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo); } else return Changed; // Not a load or store. // Merge the AA tags. if (LoopUses.empty()) { // On the first load/store, just take its AA tags. UI->getAAMetadata(AATags); } else if (AATags) { UI->getAAMetadata(AATags, /* Merge = */ true); } LoopUses.push_back(UI); } } // If there isn't a guaranteed-to-execute instruction, we can't promote. if (!GuaranteedToExecute) return Changed; // Otherwise, this is safe to promote, lets do it! DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n'); Changed = true; ++NumPromoted; // Grab a debug location for the inserted loads/stores; given that the // inserted loads/stores have little relation to the original loads/stores, // this code just arbitrarily picks a location from one, since any debug // location is better than none. DebugLoc DL = LoopUses[0]->getDebugLoc(); // Figure out the loop exits and their insertion points, if this is the // first promotion. if (ExitBlocks.empty()) { CurLoop->getUniqueExitBlocks(ExitBlocks); InsertPts.resize(ExitBlocks.size()); for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) InsertPts[i] = ExitBlocks[i]->getFirstInsertionPt(); } // We use the SSAUpdater interface to insert phi nodes as required. SmallVector<PHINode*, 16> NewPHIs; SSAUpdater SSA(&NewPHIs); LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, InsertPts, PIC, *CurAST, *LI, DL, Alignment, AATags); // Set up the preheader to have a definition of the value. It is the live-out // value from the preheader that uses in the loop will use. LoadInst *PreheaderLoad = new LoadInst(SomePtr, SomePtr->getName()+".promoted", Preheader->getTerminator()); PreheaderLoad->setAlignment(Alignment); PreheaderLoad->setDebugLoc(DL); if (AATags) PreheaderLoad->setAAMetadata(AATags); SSA.AddAvailableValue(Preheader, PreheaderLoad); // Rewrite all the loads in the loop and remember all the definitions from // stores in the loop. Promoter.run(LoopUses); // If the SSAUpdater didn't use the load in the preheader, just zap it now. if (PreheaderLoad->use_empty()) PreheaderLoad->eraseFromParent(); return Changed; } /// Simple Analysis hook. Clone alias set info. /// void LICM::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) { AliasSetTracker *AST = LoopToAliasSetMap.lookup(L); if (!AST) return; AST->copyValue(From, To); } /// Simple Analysis hook. Delete value V from alias set /// void LICM::deleteAnalysisValue(Value *V, Loop *L) { AliasSetTracker *AST = LoopToAliasSetMap.lookup(L); if (!AST) return; AST->deleteValue(V); } /// Simple Analysis hook. Delete value L from alias set map. /// void LICM::deleteAnalysisLoop(Loop *L) { AliasSetTracker *AST = LoopToAliasSetMap.lookup(L); if (!AST) return; delete AST; LoopToAliasSetMap.erase(L); } /// Return true if the body of this loop may store into the memory /// location pointed to by V. /// static bool pointerInvalidatedByLoop(Value *V, uint64_t Size, const AAMDNodes &AAInfo, AliasSetTracker *CurAST) { // Check to see if any of the basic blocks in CurLoop invalidate *V. return CurAST->getAliasSetForPointer(V, Size, AAInfo).isMod(); } /// Little predicate that returns true if the specified basic block is in /// a subloop of the current one, not the current one itself. /// static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); return LI->getLoopFor(BB) != CurLoop; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/SpeculativeExecution.cpp
//===- SpeculativeExecution.cpp ---------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass hoists instructions to enable speculative execution on // targets where branches are expensive. This is aimed at GPUs. It // currently works on simple if-then and if-then-else // patterns. // // Removing branches is not the only motivation for this // pass. E.g. consider this code and assume that there is no // addressing mode for multiplying by sizeof(*a): // // if (b > 0) // c = a[i + 1] // if (d > 0) // e = a[i + 2] // // turns into // // p = &a[i + 1]; // if (b > 0) // c = *p; // q = &a[i + 2]; // if (d > 0) // e = *q; // // which could later be optimized to // // r = &a[i]; // if (b > 0) // c = r[1]; // if (d > 0) // e = r[2]; // // Later passes sink back much of the speculated code that did not enable // further optimization. // // This pass is more aggressive than the function SpeculativeyExecuteBB in // SimplifyCFG. SimplifyCFG will not speculate if no selects are introduced and // it will speculate at most one instruction. It also will not speculate if // there is a value defined in the if-block that is only used in the then-block. // These restrictions make sense since the speculation in SimplifyCFG seems // aimed at introducing cheap selects, while this pass is intended to do more // aggressive speculation while counting on later passes to either capitalize on // that or clean it up. // //===----------------------------------------------------------------------===// #include "llvm/ADT/SmallSet.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" using namespace llvm; #define DEBUG_TYPE "speculative-execution" // The risk that speculation will not pay off increases with the // number of instructions speculated, so we put a limit on that. static cl::opt<unsigned> SpecExecMaxSpeculationCost( "spec-exec-max-speculation-cost", cl::init(7), cl::Hidden, cl::desc("Speculative execution is not applied to basic blocks where " "the cost of the instructions to speculatively execute " "exceeds this limit.")); // Speculating just a few instructions from a larger block tends not // to be profitable and this limit prevents that. A reason for that is // that small basic blocks are more likely to be candidates for // further optimization. static cl::opt<unsigned> SpecExecMaxNotHoisted( "spec-exec-max-not-hoisted", cl::init(5), cl::Hidden, cl::desc("Speculative execution is not applied to basic blocks where the " "number of instructions that would not be speculatively executed " "exceeds this limit.")); namespace { class SpeculativeExecution : public FunctionPass { public: static char ID; SpeculativeExecution(): FunctionPass(ID) {} void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnFunction(Function &F) override; private: bool runOnBasicBlock(BasicBlock &B); bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock); const TargetTransformInfo *TTI = nullptr; }; } // namespace char SpeculativeExecution::ID = 0; INITIALIZE_PASS_BEGIN(SpeculativeExecution, "speculative-execution", "Speculatively execute instructions", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(SpeculativeExecution, "speculative-execution", "Speculatively execute instructions", false, false) void SpeculativeExecution::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<TargetTransformInfoWrapperPass>(); } bool SpeculativeExecution::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); bool Changed = false; for (auto& B : F) { Changed |= runOnBasicBlock(B); } return Changed; } bool SpeculativeExecution::runOnBasicBlock(BasicBlock &B) { BranchInst *BI = dyn_cast<BranchInst>(B.getTerminator()); if (BI == nullptr) return false; if (BI->getNumSuccessors() != 2) return false; BasicBlock &Succ0 = *BI->getSuccessor(0); BasicBlock &Succ1 = *BI->getSuccessor(1); if (&B == &Succ0 || &B == &Succ1 || &Succ0 == &Succ1) { return false; } // Hoist from if-then (triangle). if (Succ0.getSinglePredecessor() != nullptr && Succ0.getSingleSuccessor() == &Succ1) { return considerHoistingFromTo(Succ0, B); } // Hoist from if-else (triangle). if (Succ1.getSinglePredecessor() != nullptr && Succ1.getSingleSuccessor() == &Succ0) { return considerHoistingFromTo(Succ1, B); } // Hoist from if-then-else (diamond), but only if it is equivalent to // an if-else or if-then due to one of the branches doing nothing. if (Succ0.getSinglePredecessor() != nullptr && Succ1.getSinglePredecessor() != nullptr && Succ1.getSingleSuccessor() != nullptr && Succ1.getSingleSuccessor() != &B && Succ1.getSingleSuccessor() == Succ0.getSingleSuccessor()) { // If a block has only one instruction, then that is a terminator // instruction so that the block does nothing. This does happen. if (Succ1.size() == 1) // equivalent to if-then return considerHoistingFromTo(Succ0, B); if (Succ0.size() == 1) // equivalent to if-else return considerHoistingFromTo(Succ1, B); } return false; } static unsigned ComputeSpeculationCost(const Instruction *I, const TargetTransformInfo &TTI) { switch (Operator::getOpcode(I)) { case Instruction::GetElementPtr: case Instruction::Add: case Instruction::Mul: case Instruction::And: case Instruction::Or: case Instruction::Select: case Instruction::Shl: case Instruction::Sub: case Instruction::LShr: case Instruction::AShr: case Instruction::Xor: case Instruction::ZExt: case Instruction::SExt: return TTI.getUserCost(I); default: return UINT_MAX; // Disallow anything not whitelisted. } } bool SpeculativeExecution::considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock) { SmallSet<const Instruction *, 8> NotHoisted; const auto AllPrecedingUsesFromBlockHoisted = [&NotHoisted](User *U) { for (Value* V : U->operand_values()) { if (Instruction *I = dyn_cast<Instruction>(V)) { if (NotHoisted.count(I) > 0) return false; } } return true; }; unsigned TotalSpeculationCost = 0; for (auto& I : FromBlock) { const unsigned Cost = ComputeSpeculationCost(&I, *TTI); if (Cost != UINT_MAX && isSafeToSpeculativelyExecute(&I) && AllPrecedingUsesFromBlockHoisted(&I)) { TotalSpeculationCost += Cost; if (TotalSpeculationCost > SpecExecMaxSpeculationCost) return false; // too much to hoist } else { NotHoisted.insert(&I); if (NotHoisted.size() > SpecExecMaxNotHoisted) return false; // too much left behind } } if (TotalSpeculationCost == 0) return false; // nothing to hoist for (auto I = FromBlock.begin(); I != FromBlock.end();) { // We have to increment I before moving Current as moving Current // changes the list that I is iterating through. auto Current = I; ++I; if (!NotHoisted.count(Current)) { Current->moveBefore(ToBlock.getTerminator()); } } return true; } namespace llvm { FunctionPass *createSpeculativeExecutionPass() { return new SpeculativeExecution(); } } // namespace llvm
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/Reassociate.cpp
//===- Reassociate.cpp - Reassociate binary expressions -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass reassociates commutative expressions in an order that is designed // to promote better constant propagation, GCSE, LICM, PRE, etc. // // For example: 4 + (x + 5) -> x + (4 + 5) // // In the implementation of this algorithm, constants are assigned rank = 0, // function arguments are rank = 1, and other values are assigned ranks // corresponding to the reverse post order traversal of current function // (starting at 2), which effectively gives values in deep loops higher rank // than values not in loops. // //===----------------------------------------------------------------------===// #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include <algorithm> using namespace llvm; #define DEBUG_TYPE "reassociate" STATISTIC(NumChanged, "Number of insts reassociated"); STATISTIC(NumAnnihil, "Number of expr tree annihilated"); STATISTIC(NumFactor , "Number of multiplies factored"); namespace { struct ValueEntry { unsigned Rank; Value *Op; ValueEntry(unsigned R, Value *O) : Rank(R), Op(O) {} }; inline bool operator<(const ValueEntry &LHS, const ValueEntry &RHS) { return LHS.Rank > RHS.Rank; // Sort so that highest rank goes to start. } } #ifndef NDEBUG /// Print out the expression identified in the Ops list. /// static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) { Module *M = I->getParent()->getParent()->getParent(); dbgs() << Instruction::getOpcodeName(I->getOpcode()) << " " << *Ops[0].Op->getType() << '\t'; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { dbgs() << "[ "; Ops[i].Op->printAsOperand(dbgs(), false, M); dbgs() << ", #" << Ops[i].Rank << "] "; } } #endif namespace { /// \brief Utility class representing a base and exponent pair which form one /// factor of some product. struct Factor { Value *Base; unsigned Power; Factor(Value *Base, unsigned Power) : Base(Base), Power(Power) {} /// \brief Sort factors by their Base. struct BaseSorter { bool operator()(const Factor &LHS, const Factor &RHS) { return LHS.Base < RHS.Base; } }; /// \brief Compare factors for equal bases. struct BaseEqual { bool operator()(const Factor &LHS, const Factor &RHS) { return LHS.Base == RHS.Base; } }; /// \brief Sort factors in descending order by their power. struct PowerDescendingSorter { bool operator()(const Factor &LHS, const Factor &RHS) { return LHS.Power > RHS.Power; } }; /// \brief Compare factors for equal powers. struct PowerEqual { bool operator()(const Factor &LHS, const Factor &RHS) { return LHS.Power == RHS.Power; } }; }; /// Utility class representing a non-constant Xor-operand. We classify /// non-constant Xor-Operands into two categories: /// C1) The operand is in the form "X & C", where C is a constant and C != ~0 /// C2) /// C2.1) The operand is in the form of "X | C", where C is a non-zero /// constant. /// C2.2) Any operand E which doesn't fall into C1 and C2.1, we view this /// operand as "E | 0" class XorOpnd { public: XorOpnd(Value *V); bool isInvalid() const { return SymbolicPart == nullptr; } bool isOrExpr() const { return isOr; } Value *getValue() const { return OrigVal; } Value *getSymbolicPart() const { return SymbolicPart; } unsigned getSymbolicRank() const { return SymbolicRank; } const APInt &getConstPart() const { return ConstPart; } void Invalidate() { SymbolicPart = OrigVal = nullptr; } void setSymbolicRank(unsigned R) { SymbolicRank = R; } // Sort the XorOpnd-Pointer in ascending order of symbolic-value-rank. // The purpose is twofold: // 1) Cluster together the operands sharing the same symbolic-value. // 2) Operand having smaller symbolic-value-rank is permuted earlier, which // could potentially shorten crital path, and expose more loop-invariants. // Note that values' rank are basically defined in RPO order (FIXME). // So, if Rank(X) < Rank(Y) < Rank(Z), it means X is defined earlier // than Y which is defined earlier than Z. Permute "x | 1", "Y & 2", // "z" in the order of X-Y-Z is better than any other orders. struct PtrSortFunctor { bool operator()(XorOpnd * const &LHS, XorOpnd * const &RHS) { return LHS->getSymbolicRank() < RHS->getSymbolicRank(); } }; private: Value *OrigVal; Value *SymbolicPart; APInt ConstPart; unsigned SymbolicRank; bool isOr; }; } namespace { class Reassociate : public FunctionPass { DenseMap<BasicBlock*, unsigned> RankMap; DenseMap<AssertingVH<Value>, unsigned> ValueRankMap; SetVector<AssertingVH<Instruction> > RedoInsts; // Arbitrary, but prevents quadratic behavior. static const unsigned GlobalReassociateLimit = 10; static const unsigned NumBinaryOps = Instruction::BinaryOpsEnd - Instruction::BinaryOpsBegin; struct PairMapValue { WeakVH Value1; WeakVH Value2; unsigned Score; bool isValid() const { return Value1 && Value2; } }; DenseMap<std::pair<Value *, Value *>, PairMapValue> PairMap[NumBinaryOps]; bool MadeChange; public: static char ID; // Pass identification, replacement for typeid Reassociate() : FunctionPass(ID) { initializeReassociatePass(*PassRegistry::getPassRegistry()); } // HLSL Change - begin // Enable global reassociation when HLSLEnableAggressiveReassociation is // set bool HLSLEnableAggressiveReassociation = true; Reassociate(bool HLSLEnableAggressiveReassociation) : Reassociate() { this->HLSLEnableAggressiveReassociation = HLSLEnableAggressiveReassociation; } void applyOptions(PassOptions O) override { GetPassOptionBool(O, "EnableAggressiveReassociation", &HLSLEnableAggressiveReassociation, /*defaultValue*/ true); } // HLSL Change - end bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); } private: void BuildRankMap(Function &F); unsigned getRank(Value *V); void canonicalizeOperands(Instruction *I); void ReassociateExpression(BinaryOperator *I); void RewriteExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops); Value *OptimizeExpression(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops); Value *OptimizeAdd(Instruction *I, SmallVectorImpl<ValueEntry> &Ops); Value *OptimizeXor(Instruction *I, SmallVectorImpl<ValueEntry> &Ops); bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, APInt &ConstOpnd, Value *&Res); bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2, APInt &ConstOpnd, Value *&Res); bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops, SmallVectorImpl<Factor> &Factors); Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder, SmallVectorImpl<Factor> &Factors); Value *OptimizeMul(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops); Value *RemoveFactorFromExpression(Value *V, Value *Factor); void EraseInst(Instruction *I); void OptimizeInst(Instruction *I); Instruction *canonicalizeNegConstExpr(Instruction *I); void BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT); }; } XorOpnd::XorOpnd(Value *V) { assert(!isa<ConstantInt>(V) && "No ConstantInt"); OrigVal = V; Instruction *I = dyn_cast<Instruction>(V); SymbolicRank = 0; if (I && (I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And)) { Value *V0 = I->getOperand(0); Value *V1 = I->getOperand(1); if (isa<ConstantInt>(V0)) std::swap(V0, V1); if (ConstantInt *C = dyn_cast<ConstantInt>(V1)) { ConstPart = C->getValue(); SymbolicPart = V0; isOr = (I->getOpcode() == Instruction::Or); return; } } // view the operand as "V | 0" SymbolicPart = V; ConstPart = APInt::getNullValue(V->getType()->getIntegerBitWidth()); isOr = true; } char Reassociate::ID = 0; INITIALIZE_PASS(Reassociate, "reassociate", "Reassociate expressions", false, false) // Public interface to the Reassociate pass FunctionPass *llvm::createReassociatePass() { return new Reassociate(); } // HLSL Change - begin FunctionPass * llvm::createReassociatePass(bool HLSLEnableAggressiveReassociation) { return new Reassociate(HLSLEnableAggressiveReassociation); } // HLSL Change - end /// Return true if V is an instruction of the specified opcode and if it /// only has one use. static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) { if (V->hasOneUse() && isa<Instruction>(V) && cast<Instruction>(V)->getOpcode() == Opcode && (!isa<FPMathOperator>(V) || cast<Instruction>(V)->hasUnsafeAlgebra())) return cast<BinaryOperator>(V); return nullptr; } static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode1, unsigned Opcode2) { if (V->hasOneUse() && isa<Instruction>(V) && (cast<Instruction>(V)->getOpcode() == Opcode1 || cast<Instruction>(V)->getOpcode() == Opcode2) && (!isa<FPMathOperator>(V) || cast<Instruction>(V)->hasUnsafeAlgebra())) return cast<BinaryOperator>(V); return nullptr; } static bool isUnmovableInstruction(Instruction *I) { switch (I->getOpcode()) { case Instruction::PHI: case Instruction::LandingPad: case Instruction::Alloca: case Instruction::Load: case Instruction::Invoke: case Instruction::UDiv: case Instruction::SDiv: case Instruction::FDiv: case Instruction::URem: case Instruction::SRem: case Instruction::FRem: return true; case Instruction::Call: return !isa<DbgInfoIntrinsic>(I); default: return false; } } void Reassociate::BuildRankMap(Function &F) { unsigned i = 2; // Assign distinct ranks to function arguments. for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { ValueRankMap[&*I] = ++i; DEBUG(dbgs() << "Calculated Rank[" << I->getName() << "] = " << i << "\n"); } ReversePostOrderTraversal<Function*> RPOT(&F); for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I) { BasicBlock *BB = *I; unsigned BBRank = RankMap[BB] = ++i << 16; // Walk the basic block, adding precomputed ranks for any instructions that // we cannot move. This ensures that the ranks for these instructions are // all different in the block. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) if (isUnmovableInstruction(I)) ValueRankMap[&*I] = ++BBRank; } } unsigned Reassociate::getRank(Value *V) { Instruction *I = dyn_cast<Instruction>(V); if (!I) { if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument. return 0; // Otherwise it's a global or constant, rank 0. } if (unsigned Rank = ValueRankMap[I]) return Rank; // Rank already known? // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that // we can reassociate expressions for code motion! Since we do not recurse // for PHI nodes, we cannot have infinite recursion here, because there // cannot be loops in the value graph that do not go through PHI nodes. unsigned Rank = 0, MaxRank = RankMap[I->getParent()]; for (unsigned i = 0, e = I->getNumOperands(); i != e && Rank != MaxRank; ++i) Rank = std::max(Rank, getRank(I->getOperand(i))); // If this is a not or neg instruction, do not count it for rank. This // assures us that X and ~X will have the same rank. if (!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I) && !BinaryOperator::isFNeg(I)) ++Rank; DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank << "\n"); return ValueRankMap[I] = Rank; } // Canonicalize constants to RHS. Otherwise, sort the operands by rank. void Reassociate::canonicalizeOperands(Instruction *I) { assert(isa<BinaryOperator>(I) && "Expected binary operator."); assert(I->isCommutative() && "Expected commutative operator."); Value *LHS = I->getOperand(0); Value *RHS = I->getOperand(1); unsigned LHSRank = getRank(LHS); unsigned RHSRank = getRank(RHS); if (isa<Constant>(RHS)) return; if (isa<Constant>(LHS) || RHSRank < LHSRank) cast<BinaryOperator>(I)->swapOperands(); } static BinaryOperator *CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp) { if (S1->getType()->isIntOrIntVectorTy()) return BinaryOperator::CreateAdd(S1, S2, Name, InsertBefore); else { BinaryOperator *Res = BinaryOperator::CreateFAdd(S1, S2, Name, InsertBefore); Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags()); return Res; } } static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp) { if (S1->getType()->isIntOrIntVectorTy()) return BinaryOperator::CreateMul(S1, S2, Name, InsertBefore); else { BinaryOperator *Res = BinaryOperator::CreateFMul(S1, S2, Name, InsertBefore); Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags()); return Res; } } static BinaryOperator *CreateNeg(Value *S1, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp) { if (S1->getType()->isIntOrIntVectorTy()) return BinaryOperator::CreateNeg(S1, Name, InsertBefore); else { BinaryOperator *Res = BinaryOperator::CreateFNeg(S1, Name, InsertBefore); Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags()); return Res; } } /// Replace 0-X with X*-1. static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) { Type *Ty = Neg->getType(); Constant *NegOne = Ty->isIntOrIntVectorTy() ? ConstantInt::getAllOnesValue(Ty) : ConstantFP::get(Ty, -1.0); BinaryOperator *Res = CreateMul(Neg->getOperand(1), NegOne, "", Neg, Neg); Neg->setOperand(1, Constant::getNullValue(Ty)); // Drop use of op. Res->takeName(Neg); Neg->replaceAllUsesWith(Res); Res->setDebugLoc(Neg->getDebugLoc()); return Res; } /// Returns k such that lambda(2^Bitwidth) = 2^k, where lambda is the Carmichael /// function. This means that x^(2^k) === 1 mod 2^Bitwidth for /// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic. /// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every /// even x in Bitwidth-bit arithmetic. static unsigned CarmichaelShift(unsigned Bitwidth) { if (Bitwidth < 3) return Bitwidth - 1; return Bitwidth - 2; } /// Add the extra weight 'RHS' to the existing weight 'LHS', /// reducing the combined weight using any special properties of the operation. /// The existing weight LHS represents the computation X op X op ... op X where /// X occurs LHS times. The combined weight represents X op X op ... op X with /// X occurring LHS + RHS times. If op is "Xor" for example then the combined /// operation is equivalent to X if LHS + RHS is odd, or 0 if LHS + RHS is even; /// the routine returns 1 in LHS in the first case, and 0 in LHS in the second. static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) { // If we were working with infinite precision arithmetic then the combined // weight would be LHS + RHS. But we are using finite precision arithmetic, // and the APInt sum LHS + RHS may not be correct if it wraps (it is correct // for nilpotent operations and addition, but not for idempotent operations // and multiplication), so it is important to correctly reduce the combined // weight back into range if wrapping would be wrong. // If RHS is zero then the weight didn't change. if (RHS.isMinValue()) return; // If LHS is zero then the combined weight is RHS. if (LHS.isMinValue()) { LHS = RHS; return; } // From this point on we know that neither LHS nor RHS is zero. if (Instruction::isIdempotent(Opcode)) { // Idempotent means X op X === X, so any non-zero weight is equivalent to a // weight of 1. Keeping weights at zero or one also means that wrapping is // not a problem. assert(LHS == 1 && RHS == 1 && "Weights not reduced!"); return; // Return a weight of 1. } if (Instruction::isNilpotent(Opcode)) { // Nilpotent means X op X === 0, so reduce weights modulo 2. assert(LHS == 1 && RHS == 1 && "Weights not reduced!"); LHS = 0; // 1 + 1 === 0 modulo 2. return; } if (Opcode == Instruction::Add || Opcode == Instruction::FAdd) { // TODO: Reduce the weight by exploiting nsw/nuw? LHS += RHS; return; } assert((Opcode == Instruction::Mul || Opcode == Instruction::FMul) && "Unknown associative operation!"); unsigned Bitwidth = LHS.getBitWidth(); // If CM is the Carmichael number then a weight W satisfying W >= CM+Bitwidth // can be replaced with W-CM. That's because x^W=x^(W-CM) for every Bitwidth // bit number x, since either x is odd in which case x^CM = 1, or x is even in // which case both x^W and x^(W - CM) are zero. By subtracting off multiples // of CM like this weights can always be reduced to the range [0, CM+Bitwidth) // which by a happy accident means that they can always be represented using // Bitwidth bits. // TODO: Reduce the weight by exploiting nsw/nuw? (Could do much better than // the Carmichael number). if (Bitwidth > 3) { /// CM - The value of Carmichael's lambda function. APInt CM = APInt::getOneBitSet(Bitwidth, CarmichaelShift(Bitwidth)); // Any weight W >= Threshold can be replaced with W - CM. APInt Threshold = CM + Bitwidth; assert(LHS.ult(Threshold) && RHS.ult(Threshold) && "Weights not reduced!"); // For Bitwidth 4 or more the following sum does not overflow. LHS += RHS; while (LHS.uge(Threshold)) LHS -= CM; } else { // To avoid problems with overflow do everything the same as above but using // a larger type. unsigned CM = 1U << CarmichaelShift(Bitwidth); unsigned Threshold = CM + Bitwidth; assert(LHS.getZExtValue() < Threshold && RHS.getZExtValue() < Threshold && "Weights not reduced!"); unsigned Total = LHS.getZExtValue() + RHS.getZExtValue(); while (Total >= Threshold) Total -= CM; LHS = Total; } } typedef std::pair<Value*, APInt> RepeatedValue; /// Given an associative binary expression, return the leaf /// nodes in Ops along with their weights (how many times the leaf occurs). The /// original expression is the same as /// (Ops[0].first op Ops[0].first op ... Ops[0].first) <- Ops[0].second times /// op /// (Ops[1].first op Ops[1].first op ... Ops[1].first) <- Ops[1].second times /// op /// ... /// op /// (Ops[N].first op Ops[N].first op ... Ops[N].first) <- Ops[N].second times /// /// Note that the values Ops[0].first, ..., Ops[N].first are all distinct. /// /// This routine may modify the function, in which case it returns 'true'. The /// changes it makes may well be destructive, changing the value computed by 'I' /// to something completely different. Thus if the routine returns 'true' then /// you MUST either replace I with a new expression computed from the Ops array, /// or use RewriteExprTree to put the values back in. /// /// A leaf node is either not a binary operation of the same kind as the root /// node 'I' (i.e. is not a binary operator at all, or is, but with a different /// opcode), or is the same kind of binary operator but has a use which either /// does not belong to the expression, or does belong to the expression but is /// a leaf node. Every leaf node has at least one use that is a non-leaf node /// of the expression, while for non-leaf nodes (except for the root 'I') every /// use is a non-leaf node of the expression. /// /// For example: /// expression graph node names /// /// + | I /// / \ | /// + + | A, B /// / \ / \ | /// * + * | C, D, E /// / \ / \ / \ | /// + * | F, G /// /// The leaf nodes are C, E, F and G. The Ops array will contain (maybe not in /// that order) (C, 1), (E, 1), (F, 2), (G, 2). /// /// The expression is maximal: if some instruction is a binary operator of the /// same kind as 'I', and all of its uses are non-leaf nodes of the expression, /// then the instruction also belongs to the expression, is not a leaf node of /// it, and its operands also belong to the expression (but may be leaf nodes). /// /// NOTE: This routine will set operands of non-leaf non-root nodes to undef in /// order to ensure that every non-root node in the expression has *exactly one* /// use by a non-leaf node of the expression. This destruction means that the /// caller MUST either replace 'I' with a new expression or use something like /// RewriteExprTree to put the values back in if the routine indicates that it /// made a change by returning 'true'. /// /// In the above example either the right operand of A or the left operand of B /// will be replaced by undef. If it is B's operand then this gives: /// /// + | I /// / \ | /// + + | A, B - operand of B replaced with undef /// / \ \ | /// * + * | C, D, E /// / \ / \ / \ | /// + * | F, G /// /// Note that such undef operands can only be reached by passing through 'I'. /// For example, if you visit operands recursively starting from a leaf node /// then you will never see such an undef operand unless you get back to 'I', /// which requires passing through a phi node. /// /// Note that this routine may also mutate binary operators of the wrong type /// that have all uses inside the expression (i.e. only used by non-leaf nodes /// of the expression) if it can turn them into binary operators of the right /// type and thus make the expression bigger. static bool LinearizeExprTree(BinaryOperator *I, SmallVectorImpl<RepeatedValue> &Ops) { DEBUG(dbgs() << "LINEARIZE: " << *I << '\n'); unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits(); unsigned Opcode = I->getOpcode(); assert(I->isAssociative() && I->isCommutative() && "Expected an associative and commutative operation!"); // Visit all operands of the expression, keeping track of their weight (the // number of paths from the expression root to the operand, or if you like // the number of times that operand occurs in the linearized expression). // For example, if I = X + A, where X = A + B, then I, X and B have weight 1 // while A has weight two. // Worklist of non-leaf nodes (their operands are in the expression too) along // with their weights, representing a certain number of paths to the operator. // If an operator occurs in the worklist multiple times then we found multiple // ways to get to it. SmallVector<std::pair<BinaryOperator*, APInt>, 8> Worklist; // (Op, Weight) Worklist.push_back(std::make_pair(I, APInt(Bitwidth, 1))); bool Changed = false; // Leaves of the expression are values that either aren't the right kind of // operation (eg: a constant, or a multiply in an add tree), or are, but have // some uses that are not inside the expression. For example, in I = X + X, // X = A + B, the value X has two uses (by I) that are in the expression. If // X has any other uses, for example in a return instruction, then we consider // X to be a leaf, and won't analyze it further. When we first visit a value, // if it has more than one use then at first we conservatively consider it to // be a leaf. Later, as the expression is explored, we may discover some more // uses of the value from inside the expression. If all uses turn out to be // from within the expression (and the value is a binary operator of the right // kind) then the value is no longer considered to be a leaf, and its operands // are explored. // Leaves - Keeps track of the set of putative leaves as well as the number of // paths to each leaf seen so far. typedef DenseMap<Value*, APInt> LeafMap; LeafMap Leaves; // Leaf -> Total weight so far. SmallVector<Value*, 8> LeafOrder; // Ensure deterministic leaf output order. #ifndef NDEBUG SmallPtrSet<Value*, 8> Visited; // For sanity checking the iteration scheme. #endif while (!Worklist.empty()) { std::pair<BinaryOperator*, APInt> P = Worklist.pop_back_val(); I = P.first; // We examine the operands of this binary operator. for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx) { // Visit operands. Value *Op = I->getOperand(OpIdx); APInt Weight = P.second; // Number of paths to this operand. DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n"); assert(!Op->use_empty() && "No uses, so how did we get to it?!"); // If this is a binary operation of the right kind with only one use then // add its operands to the expression. if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { assert(Visited.insert(Op).second && "Not first visit!"); DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n"); Worklist.push_back(std::make_pair(BO, Weight)); continue; } // Appears to be a leaf. Is the operand already in the set of leaves? LeafMap::iterator It = Leaves.find(Op); if (It == Leaves.end()) { // Not in the leaf map. Must be the first time we saw this operand. assert(Visited.insert(Op).second && "Not first visit!"); if (!Op->hasOneUse()) { // This value has uses not accounted for by the expression, so it is // not safe to modify. Mark it as being a leaf. DEBUG(dbgs() << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n"); LeafOrder.push_back(Op); Leaves[Op] = Weight; continue; } // No uses outside the expression, try morphing it. } else if (It != Leaves.end()) { // Already in the leaf map. assert(Visited.count(Op) && "In leaf map but not visited!"); // Update the number of paths to the leaf. IncorporateWeight(It->second, Weight, Opcode); #if 0 // TODO: Re-enable once PR13021 is fixed. // The leaf already has one use from inside the expression. As we want // exactly one such use, drop this new use of the leaf. assert(!Op->hasOneUse() && "Only one use, but we got here twice!"); I->setOperand(OpIdx, UndefValue::get(I->getType())); Changed = true; // If the leaf is a binary operation of the right kind and we now see // that its multiple original uses were in fact all by nodes belonging // to the expression, then no longer consider it to be a leaf and add // its operands to the expression. if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n"); Worklist.push_back(std::make_pair(BO, It->second)); Leaves.erase(It); continue; } #endif // If we still have uses that are not accounted for by the expression // then it is not safe to modify the value. if (!Op->hasOneUse()) continue; // No uses outside the expression, try morphing it. Weight = It->second; Leaves.erase(It); // Since the value may be morphed below. } // At this point we have a value which, first of all, is not a binary // expression of the right kind, and secondly, is only used inside the // expression. This means that it can safely be modified. See if we // can usefully morph it into an expression of the right kind. assert((!isa<Instruction>(Op) || cast<Instruction>(Op)->getOpcode() != Opcode || (isa<FPMathOperator>(Op) && !cast<Instruction>(Op)->hasUnsafeAlgebra())) && "Should have been handled above!"); assert(Op->hasOneUse() && "Has uses outside the expression tree!"); // If this is a multiply expression, turn any internal negations into // multiplies by -1 so they can be reassociated. if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) if ((Opcode == Instruction::Mul && BinaryOperator::isNeg(BO)) || (Opcode == Instruction::FMul && BinaryOperator::isFNeg(BO))) { DEBUG(dbgs() << "MORPH LEAF: " << *Op << " (" << Weight << ") TO "); BO = LowerNegateToMultiply(BO); DEBUG(dbgs() << *BO << '\n'); Worklist.push_back(std::make_pair(BO, Weight)); Changed = true; continue; } // Failed to morph into an expression of the right type. This really is // a leaf. DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n"); assert(!isReassociableOp(Op, Opcode) && "Value was morphed?"); LeafOrder.push_back(Op); Leaves[Op] = Weight; } } // The leaves, repeated according to their weights, represent the linearized // form of the expression. for (unsigned i = 0, e = LeafOrder.size(); i != e; ++i) { Value *V = LeafOrder[i]; LeafMap::iterator It = Leaves.find(V); if (It == Leaves.end()) // Node initially thought to be a leaf wasn't. continue; assert(!isReassociableOp(V, Opcode) && "Shouldn't be a leaf!"); APInt Weight = It->second; if (Weight.isMinValue()) // Leaf already output or weight reduction eliminated it. continue; // Ensure the leaf is only output once. It->second = 0; Ops.push_back(std::make_pair(V, Weight)); } // For nilpotent operations or addition there may be no operands, for example // because the expression was "X xor X" or consisted of 2^Bitwidth additions: // in both cases the weight reduces to 0 causing the value to be skipped. if (Ops.empty()) { Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType()); assert(Identity && "Associative operation without identity!"); Ops.emplace_back(Identity, APInt(Bitwidth, 1)); } return Changed; } /// Now that the operands for this expression tree are /// linearized and optimized, emit them in-order. void Reassociate::RewriteExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops) { assert(Ops.size() > 1 && "Single values should be used directly!"); // Since our optimizations should never increase the number of operations, the // new expression can usually be written reusing the existing binary operators // from the original expression tree, without creating any new instructions, // though the rewritten expression may have a completely different topology. // We take care to not change anything if the new expression will be the same // as the original. If more than trivial changes (like commuting operands) // were made then we are obliged to clear out any optional subclass data like // nsw flags. /// NodesToRewrite - Nodes from the original expression available for writing /// the new expression into. SmallVector<BinaryOperator*, 8> NodesToRewrite; unsigned Opcode = I->getOpcode(); BinaryOperator *Op = I; /// NotRewritable - The operands being written will be the leaves of the new /// expression and must not be used as inner nodes (via NodesToRewrite) by /// mistake. Inner nodes are always reassociable, and usually leaves are not /// (if they were they would have been incorporated into the expression and so /// would not be leaves), so most of the time there is no danger of this. But /// in rare cases a leaf may become reassociable if an optimization kills uses /// of it, or it may momentarily become reassociable during rewriting (below) /// due it being removed as an operand of one of its uses. Ensure that misuse /// of leaf nodes as inner nodes cannot occur by remembering all of the future /// leaves and refusing to reuse any of them as inner nodes. SmallPtrSet<Value*, 8> NotRewritable; for (unsigned i = 0, e = Ops.size(); i != e; ++i) NotRewritable.insert(Ops[i].Op); // ExpressionChanged - Non-null if the rewritten expression differs from the // original in some non-trivial way, requiring the clearing of optional flags. // Flags are cleared from the operator in ExpressionChanged up to I inclusive. BinaryOperator *ExpressionChanged = nullptr; for (unsigned i = 0; ; ++i) { // The last operation (which comes earliest in the IR) is special as both // operands will come from Ops, rather than just one with the other being // a subexpression. if (i+2 == Ops.size()) { Value *NewLHS = Ops[i].Op; Value *NewRHS = Ops[i+1].Op; Value *OldLHS = Op->getOperand(0); Value *OldRHS = Op->getOperand(1); if (NewLHS == OldLHS && NewRHS == OldRHS) // Nothing changed, leave it alone. break; if (NewLHS == OldRHS && NewRHS == OldLHS) { // The order of the operands was reversed. Swap them. DEBUG(dbgs() << "RA: " << *Op << '\n'); Op->swapOperands(); DEBUG(dbgs() << "TO: " << *Op << '\n'); MadeChange = true; ++NumChanged; break; } // The new operation differs non-trivially from the original. Overwrite // the old operands with the new ones. DEBUG(dbgs() << "RA: " << *Op << '\n'); if (NewLHS != OldLHS) { BinaryOperator *BO = isReassociableOp(OldLHS, Opcode); if (BO && !NotRewritable.count(BO)) NodesToRewrite.push_back(BO); Op->setOperand(0, NewLHS); } if (NewRHS != OldRHS) { BinaryOperator *BO = isReassociableOp(OldRHS, Opcode); if (BO && !NotRewritable.count(BO)) NodesToRewrite.push_back(BO); Op->setOperand(1, NewRHS); } DEBUG(dbgs() << "TO: " << *Op << '\n'); ExpressionChanged = Op; MadeChange = true; ++NumChanged; break; } // Not the last operation. The left-hand side will be a sub-expression // while the right-hand side will be the current element of Ops. Value *NewRHS = Ops[i].Op; if (NewRHS != Op->getOperand(1)) { DEBUG(dbgs() << "RA: " << *Op << '\n'); if (NewRHS == Op->getOperand(0)) { // The new right-hand side was already present as the left operand. If // we are lucky then swapping the operands will sort out both of them. Op->swapOperands(); } else { // Overwrite with the new right-hand side. BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode); if (BO && !NotRewritable.count(BO)) NodesToRewrite.push_back(BO); Op->setOperand(1, NewRHS); ExpressionChanged = Op; } DEBUG(dbgs() << "TO: " << *Op << '\n'); MadeChange = true; ++NumChanged; } // Now deal with the left-hand side. If this is already an operation node // from the original expression then just rewrite the rest of the expression // into it. BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode); if (BO && !NotRewritable.count(BO)) { Op = BO; continue; } // Otherwise, grab a spare node from the original expression and use that as // the left-hand side. If there are no nodes left then the optimizers made // an expression with more nodes than the original! This usually means that // they did something stupid but it might mean that the problem was just too // hard (finding the mimimal number of multiplications needed to realize a // multiplication expression is NP-complete). Whatever the reason, smart or // stupid, create a new node if there are none left. BinaryOperator *NewOp; if (NodesToRewrite.empty()) { Constant *Undef = UndefValue::get(I->getType()); NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode), Undef, Undef, "", I); if (NewOp->getType()->isFPOrFPVectorTy()) NewOp->setFastMathFlags(I->getFastMathFlags()); } else { NewOp = NodesToRewrite.pop_back_val(); } DEBUG(dbgs() << "RA: " << *Op << '\n'); Op->setOperand(0, NewOp); DEBUG(dbgs() << "TO: " << *Op << '\n'); ExpressionChanged = Op; MadeChange = true; ++NumChanged; Op = NewOp; } // If the expression changed non-trivially then clear out all subclass data // starting from the operator specified in ExpressionChanged, and compactify // the operators to just before the expression root to guarantee that the // expression tree is dominated by all of Ops. if (ExpressionChanged) do { // Preserve FastMathFlags. if (isa<FPMathOperator>(I)) { FastMathFlags Flags = I->getFastMathFlags(); ExpressionChanged->clearSubclassOptionalData(); ExpressionChanged->setFastMathFlags(Flags); } else ExpressionChanged->clearSubclassOptionalData(); if (ExpressionChanged == I) break; ExpressionChanged->moveBefore(I); ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->user_begin()); } while (1); // Throw away any left over nodes from the original expression. for (unsigned i = 0, e = NodesToRewrite.size(); i != e; ++i) RedoInsts.insert(NodesToRewrite[i]); } /// Insert instructions before the instruction pointed to by BI, /// that computes the negative version of the value specified. The negative /// version of the value is returned, and BI is left pointing at the instruction /// that should be processed next by the reassociation pass. static Value *NegateValue(Value *V, Instruction *BI) { if (Constant *C = dyn_cast<Constant>(V)) { if (C->getType()->isFPOrFPVectorTy()) { return ConstantExpr::getFNeg(C); } return ConstantExpr::getNeg(C); } // We are trying to expose opportunity for reassociation. One of the things // that we want to do to achieve this is to push a negation as deep into an // expression chain as possible, to expose the add instructions. In practice, // this means that we turn this: // X = -(A+12+C+D) into X = -A + -12 + -C + -D = -12 + -A + -C + -D // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate // the constants. We assume that instcombine will clean up the mess later if // we introduce tons of unnecessary negation instructions. // if (BinaryOperator *I = isReassociableOp(V, Instruction::Add, Instruction::FAdd)) { // Push the negates through the add. I->setOperand(0, NegateValue(I->getOperand(0), BI)); I->setOperand(1, NegateValue(I->getOperand(1), BI)); if (I->getOpcode() == Instruction::Add) { I->setHasNoUnsignedWrap(false); I->setHasNoSignedWrap(false); } // We must move the add instruction here, because the neg instructions do // not dominate the old add instruction in general. By moving it, we are // assured that the neg instructions we just inserted dominate the // instruction we are about to insert after them. // I->moveBefore(BI); I->setName(I->getName()+".neg"); return I; } // Okay, we need to materialize a negated version of V with an instruction. // Scan the use lists of V to see if we have one already. for (User *U : V->users()) { if (!BinaryOperator::isNeg(U) && !BinaryOperator::isFNeg(U)) continue; // We found one! Now we have to make sure that the definition dominates // this use. We do this by moving it to the entry block (if it is a // non-instruction value) or right after the definition. These negates will // be zapped by reassociate later, so we don't need much finesse here. BinaryOperator *TheNeg = cast<BinaryOperator>(U); // Verify that the negate is in this function, V might be a constant expr. if (TheNeg->getParent()->getParent() != BI->getParent()->getParent()) continue; BasicBlock::iterator InsertPt; if (Instruction *InstInput = dyn_cast<Instruction>(V)) { if (InvokeInst *II = dyn_cast<InvokeInst>(InstInput)) { InsertPt = II->getNormalDest()->begin(); } else { InsertPt = InstInput; ++InsertPt; } while (isa<PHINode>(InsertPt)) ++InsertPt; } else { InsertPt = TheNeg->getParent()->getParent()->getEntryBlock().begin(); } TheNeg->moveBefore(InsertPt); if (TheNeg->getOpcode() == Instruction::Sub) { TheNeg->setHasNoUnsignedWrap(false); TheNeg->setHasNoSignedWrap(false); } else { TheNeg->andIRFlags(BI); } return TheNeg; } // Insert a 'neg' instruction that subtracts the value from zero to get the // negation. return CreateNeg(V, V->getName() + ".neg", BI, BI); } /// Return true if we should break up this subtract of X-Y into (X + -Y). static bool ShouldBreakUpSubtract(Instruction *Sub) { // If this is a negation, we can't split it up! if (BinaryOperator::isNeg(Sub) || BinaryOperator::isFNeg(Sub)) return false; // Don't breakup X - undef. if (isa<UndefValue>(Sub->getOperand(1))) return false; // Don't bother to break this up unless either the LHS is an associable add or // subtract or if this is only used by one. Value *V0 = Sub->getOperand(0); if (isReassociableOp(V0, Instruction::Add, Instruction::FAdd) || isReassociableOp(V0, Instruction::Sub, Instruction::FSub)) return true; Value *V1 = Sub->getOperand(1); if (isReassociableOp(V1, Instruction::Add, Instruction::FAdd) || isReassociableOp(V1, Instruction::Sub, Instruction::FSub)) return true; Value *VB = Sub->user_back(); if (Sub->hasOneUse() && (isReassociableOp(VB, Instruction::Add, Instruction::FAdd) || isReassociableOp(VB, Instruction::Sub, Instruction::FSub))) return true; return false; } /// If we have (X-Y), and if either X is an add, or if this is only used by an /// add, transform this into (X+(0-Y)) to promote better reassociation. static BinaryOperator *BreakUpSubtract(Instruction *Sub) { // Convert a subtract into an add and a neg instruction. This allows sub // instructions to be commuted with other add instructions. // // Calculate the negative value of Operand 1 of the sub instruction, // and set it as the RHS of the add instruction we just made. // Value *NegVal = NegateValue(Sub->getOperand(1), Sub); BinaryOperator *New = CreateAdd(Sub->getOperand(0), NegVal, "", Sub, Sub); Sub->setOperand(0, Constant::getNullValue(Sub->getType())); // Drop use of op. Sub->setOperand(1, Constant::getNullValue(Sub->getType())); // Drop use of op. New->takeName(Sub); // Everyone now refers to the add instruction. Sub->replaceAllUsesWith(New); New->setDebugLoc(Sub->getDebugLoc()); DEBUG(dbgs() << "Negated: " << *New << '\n'); return New; } /// If this is a shift of a reassociable multiply or is used by one, change /// this into a multiply by a constant to assist with further reassociation. static BinaryOperator *ConvertShiftToMul(Instruction *Shl) { Constant *MulCst = ConstantInt::get(Shl->getType(), 1); MulCst = ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1))); BinaryOperator *Mul = BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl); Shl->setOperand(0, UndefValue::get(Shl->getType())); // Drop use of op. Mul->takeName(Shl); // Everyone now refers to the mul instruction. Shl->replaceAllUsesWith(Mul); Mul->setDebugLoc(Shl->getDebugLoc()); // We can safely preserve the nuw flag in all cases. It's also safe to turn a // nuw nsw shl into a nuw nsw mul. However, nsw in isolation requires special // handling. bool NSW = cast<BinaryOperator>(Shl)->hasNoSignedWrap(); bool NUW = cast<BinaryOperator>(Shl)->hasNoUnsignedWrap(); if (NSW && NUW) Mul->setHasNoSignedWrap(true); Mul->setHasNoUnsignedWrap(NUW); return Mul; } /// Scan backwards and forwards among values with the same rank as element i /// to see if X exists. If X does not exist, return i. This is useful when /// scanning for 'x' when we see '-x' because they both get the same rank. static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i, Value *X) { unsigned XRank = Ops[i].Rank; unsigned e = Ops.size(); for (unsigned j = i+1; j != e && Ops[j].Rank == XRank; ++j) { if (Ops[j].Op == X) return j; if (Instruction *I1 = dyn_cast<Instruction>(Ops[j].Op)) if (Instruction *I2 = dyn_cast<Instruction>(X)) if (I1->isIdenticalTo(I2)) return j; } // Scan backwards. for (unsigned j = i-1; j != ~0U && Ops[j].Rank == XRank; --j) { if (Ops[j].Op == X) return j; if (Instruction *I1 = dyn_cast<Instruction>(Ops[j].Op)) if (Instruction *I2 = dyn_cast<Instruction>(X)) if (I1->isIdenticalTo(I2)) return j; } return i; } /// Emit a tree of add instructions, summing Ops together /// and returning the result. Insert the tree before I. static Value *EmitAddTreeOfValues(Instruction *I, SmallVectorImpl<WeakTrackingVH> &Ops) { if (Ops.size() == 1) return Ops.back(); Value *V1 = Ops.back(); Ops.pop_back(); Value *V2 = EmitAddTreeOfValues(I, Ops); return CreateAdd(V2, V1, "tmp", I, I); } /// If V is an expression tree that is a multiplication sequence, /// and if this sequence contains a multiply by Factor, /// remove Factor from the tree and return the new tree. Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) { BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul); if (!BO) return nullptr; SmallVector<RepeatedValue, 8> Tree; MadeChange |= LinearizeExprTree(BO, Tree); SmallVector<ValueEntry, 8> Factors; Factors.reserve(Tree.size()); for (unsigned i = 0, e = Tree.size(); i != e; ++i) { RepeatedValue E = Tree[i]; Factors.append(E.second.getZExtValue(), ValueEntry(getRank(E.first), E.first)); } bool FoundFactor = false; bool NeedsNegate = false; for (unsigned i = 0, e = Factors.size(); i != e; ++i) { if (Factors[i].Op == Factor) { FoundFactor = true; Factors.erase(Factors.begin()+i); break; } // If this is a negative version of this factor, remove it. if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor)) { if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op)) if (FC1->getValue() == -FC2->getValue()) { FoundFactor = NeedsNegate = true; Factors.erase(Factors.begin()+i); break; } } else if (ConstantFP *FC1 = dyn_cast<ConstantFP>(Factor)) { if (ConstantFP *FC2 = dyn_cast<ConstantFP>(Factors[i].Op)) { APFloat F1(FC1->getValueAPF()); APFloat F2(FC2->getValueAPF()); F2.changeSign(); if (F1.compare(F2) == APFloat::cmpEqual) { FoundFactor = NeedsNegate = true; Factors.erase(Factors.begin() + i); break; } } } } if (!FoundFactor) { // Make sure to restore the operands to the expression tree. RewriteExprTree(BO, Factors); return nullptr; } BasicBlock::iterator InsertPt = BO; ++InsertPt; // If this was just a single multiply, remove the multiply and return the only // remaining operand. if (Factors.size() == 1) { RedoInsts.insert(BO); V = Factors[0].Op; } else { RewriteExprTree(BO, Factors); V = BO; } if (NeedsNegate) V = CreateNeg(V, "neg", InsertPt, BO); return V; } /// If V is a single-use multiply, recursively add its operands as factors, /// otherwise add V to the list of factors. /// /// Ops is the top-level list of add operands we're trying to factor. static void FindSingleUseMultiplyFactors(Value *V, SmallVectorImpl<Value*> &Factors, const SmallVectorImpl<ValueEntry> &Ops) { BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul); if (!BO) { Factors.push_back(V); return; } // Otherwise, add the LHS and RHS to the list of factors. FindSingleUseMultiplyFactors(BO->getOperand(1), Factors, Ops); FindSingleUseMultiplyFactors(BO->getOperand(0), Factors, Ops); } /// Optimize a series of operands to an 'and', 'or', or 'xor' instruction. /// This optimizes based on identities. If it can be reduced to a single Value, /// it is returned, otherwise the Ops list is mutated as necessary. static Value *OptimizeAndOrXor(unsigned Opcode, SmallVectorImpl<ValueEntry> &Ops) { // Scan the operand lists looking for X and ~X pairs, along with X,X pairs. // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1. for (unsigned i = 0, e = Ops.size(); i != e; ++i) { // First, check for X and ~X in the operand list. assert(i < Ops.size()); if (BinaryOperator::isNot(Ops[i].Op)) { // Cannot occur for ^. Value *X = BinaryOperator::getNotArgument(Ops[i].Op); unsigned FoundX = FindInOperandList(Ops, i, X); if (FoundX != i) { if (Opcode == Instruction::And) // ...&X&~X = 0 return Constant::getNullValue(X->getType()); if (Opcode == Instruction::Or) // ...|X|~X = -1 return Constant::getAllOnesValue(X->getType()); } } // Next, check for duplicate pairs of values, which we assume are next to // each other, due to our sorting criteria. assert(i < Ops.size()); if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) { if (Opcode == Instruction::And || Opcode == Instruction::Or) { // Drop duplicate values for And and Or. Ops.erase(Ops.begin()+i); --i; --e; ++NumAnnihil; continue; } // Drop pairs of values for Xor. assert(Opcode == Instruction::Xor); if (e == 2) return Constant::getNullValue(Ops[0].Op->getType()); // Y ^ X^X -> Y Ops.erase(Ops.begin()+i, Ops.begin()+i+2); i -= 1; e -= 2; ++NumAnnihil; } } return nullptr; } /// Helper funciton of CombineXorOpnd(). It creates a bitwise-and /// instruction with the given two operands, and return the resulting /// instruction. There are two special cases: 1) if the constant operand is 0, /// it will return NULL. 2) if the constant is ~0, the symbolic operand will /// be returned. static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd, const APInt &ConstOpnd) { if (ConstOpnd != 0) { if (!ConstOpnd.isAllOnesValue()) { LLVMContext &Ctx = Opnd->getType()->getContext(); Instruction *I; I = BinaryOperator::CreateAnd(Opnd, ConstantInt::get(Ctx, ConstOpnd), "and.ra", InsertBefore); I->setDebugLoc(InsertBefore->getDebugLoc()); return I; } return Opnd; } return nullptr; } // Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd" // into "R ^ C", where C would be 0, and R is a symbolic value. // // If it was successful, true is returned, and the "R" and "C" is returned // via "Res" and "ConstOpnd", respectively; otherwise, false is returned, // and both "Res" and "ConstOpnd" remain unchanged. // bool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, APInt &ConstOpnd, Value *&Res) { // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2 // = ((x | c1) ^ c1) ^ (c1 ^ c2) // = (x & ~c1) ^ (c1 ^ c2) // It is useful only when c1 == c2. if (Opnd1->isOrExpr() && Opnd1->getConstPart() != 0) { if (!Opnd1->getValue()->hasOneUse()) return false; const APInt &C1 = Opnd1->getConstPart(); if (C1 != ConstOpnd) return false; Value *X = Opnd1->getSymbolicPart(); Res = createAndInstr(I, X, ~C1); // ConstOpnd was C2, now C1 ^ C2. ConstOpnd ^= C1; if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) RedoInsts.insert(T); return true; } return false; } // Helper function of OptimizeXor(). It tries to simplify // "Opnd1 ^ Opnd2 ^ ConstOpnd" into "R ^ C", where C would be 0, and R is a // symbolic value. // // If it was successful, true is returned, and the "R" and "C" is returned // via "Res" and "ConstOpnd", respectively (If the entire expression is // evaluated to a constant, the Res is set to NULL); otherwise, false is // returned, and both "Res" and "ConstOpnd" remain unchanged. bool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2, APInt &ConstOpnd, Value *&Res) { Value *X = Opnd1->getSymbolicPart(); if (X != Opnd2->getSymbolicPart()) return false; // This many instruction become dead.(At least "Opnd1 ^ Opnd2" will die.) int DeadInstNum = 1; if (Opnd1->getValue()->hasOneUse()) DeadInstNum++; if (Opnd2->getValue()->hasOneUse()) DeadInstNum++; // Xor-Rule 2: // (x | c1) ^ (x & c2) // = (x|c1) ^ (x&c2) ^ (c1 ^ c1) = ((x|c1) ^ c1) ^ (x & c2) ^ c1 // = (x & ~c1) ^ (x & c2) ^ c1 // Xor-Rule 1 // = (x & c3) ^ c1, where c3 = ~c1 ^ c2 // Xor-rule 3 // if (Opnd1->isOrExpr() != Opnd2->isOrExpr()) { if (Opnd2->isOrExpr()) std::swap(Opnd1, Opnd2); const APInt &C1 = Opnd1->getConstPart(); const APInt &C2 = Opnd2->getConstPart(); APInt C3((~C1) ^ C2); // Do not increase code size! if (C3 != 0 && !C3.isAllOnesValue()) { int NewInstNum = ConstOpnd != 0 ? 1 : 2; if (NewInstNum > DeadInstNum) return false; } Res = createAndInstr(I, X, C3); ConstOpnd ^= C1; } else if (Opnd1->isOrExpr()) { // Xor-Rule 3: (x | c1) ^ (x | c2) = (x & c3) ^ c3 where c3 = c1 ^ c2 // const APInt &C1 = Opnd1->getConstPart(); const APInt &C2 = Opnd2->getConstPart(); APInt C3 = C1 ^ C2; // Do not increase code size if (C3 != 0 && !C3.isAllOnesValue()) { int NewInstNum = ConstOpnd != 0 ? 1 : 2; if (NewInstNum > DeadInstNum) return false; } Res = createAndInstr(I, X, C3); ConstOpnd ^= C3; } else { // Xor-Rule 4: (x & c1) ^ (x & c2) = (x & (c1^c2)) // const APInt &C1 = Opnd1->getConstPart(); const APInt &C2 = Opnd2->getConstPart(); APInt C3 = C1 ^ C2; Res = createAndInstr(I, X, C3); } // Put the original operands in the Redo list; hope they will be deleted // as dead code. if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) RedoInsts.insert(T); if (Instruction *T = dyn_cast<Instruction>(Opnd2->getValue())) RedoInsts.insert(T); return true; } /// Optimize a series of operands to an 'xor' instruction. If it can be reduced /// to a single Value, it is returned, otherwise the Ops list is mutated as /// necessary. Value *Reassociate::OptimizeXor(Instruction *I, SmallVectorImpl<ValueEntry> &Ops) { if (Value *V = OptimizeAndOrXor(Instruction::Xor, Ops)) return V; if (Ops.size() == 1) return nullptr; SmallVector<XorOpnd, 8> Opnds; SmallVector<XorOpnd*, 8> OpndPtrs; Type *Ty = Ops[0].Op->getType(); APInt ConstOpnd(Ty->getIntegerBitWidth(), 0); // Step 1: Convert ValueEntry to XorOpnd for (unsigned i = 0, e = Ops.size(); i != e; ++i) { Value *V = Ops[i].Op; if (!isa<ConstantInt>(V)) { XorOpnd O(V); O.setSymbolicRank(getRank(O.getSymbolicPart())); Opnds.push_back(O); } else ConstOpnd ^= cast<ConstantInt>(V)->getValue(); } // NOTE: From this point on, do *NOT* add/delete element to/from "Opnds". // It would otherwise invalidate the "Opnds"'s iterator, and hence invalidate // the "OpndPtrs" as well. For the similar reason, do not fuse this loop // with the previous loop --- the iterator of the "Opnds" may be invalidated // when new elements are added to the vector. for (unsigned i = 0, e = Opnds.size(); i != e; ++i) OpndPtrs.push_back(&Opnds[i]); // Step 2: Sort the Xor-Operands in a way such that the operands containing // the same symbolic value cluster together. For instance, the input operand // sequence ("x | 123", "y & 456", "x & 789") will be sorted into: // ("x | 123", "x & 789", "y & 456"). std::stable_sort(OpndPtrs.begin(), OpndPtrs.end(), XorOpnd::PtrSortFunctor()); // Step 3: Combine adjacent operands XorOpnd *PrevOpnd = nullptr; bool Changed = false; for (unsigned i = 0, e = Opnds.size(); i < e; i++) { XorOpnd *CurrOpnd = OpndPtrs[i]; // The combined value Value *CV; // Step 3.1: Try simplifying "CurrOpnd ^ ConstOpnd" if (ConstOpnd != 0 && CombineXorOpnd(I, CurrOpnd, ConstOpnd, CV)) { Changed = true; if (CV) *CurrOpnd = XorOpnd(CV); else { CurrOpnd->Invalidate(); continue; } } if (!PrevOpnd || CurrOpnd->getSymbolicPart() != PrevOpnd->getSymbolicPart()) { PrevOpnd = CurrOpnd; continue; } // step 3.2: When previous and current operands share the same symbolic // value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd" // if (CombineXorOpnd(I, CurrOpnd, PrevOpnd, ConstOpnd, CV)) { // Remove previous operand PrevOpnd->Invalidate(); if (CV) { *CurrOpnd = XorOpnd(CV); PrevOpnd = CurrOpnd; } else { CurrOpnd->Invalidate(); PrevOpnd = nullptr; } Changed = true; } } // Step 4: Reassemble the Ops if (Changed) { Ops.clear(); for (unsigned int i = 0, e = Opnds.size(); i < e; i++) { XorOpnd &O = Opnds[i]; if (O.isInvalid()) continue; ValueEntry VE(getRank(O.getValue()), O.getValue()); Ops.push_back(VE); } if (ConstOpnd != 0) { Value *C = ConstantInt::get(Ty->getContext(), ConstOpnd); ValueEntry VE(getRank(C), C); Ops.push_back(VE); } int Sz = Ops.size(); if (Sz == 1) return Ops.back().Op; else if (Sz == 0) { assert(ConstOpnd == 0); return ConstantInt::get(Ty->getContext(), ConstOpnd); } } return nullptr; } /// Optimize a series of operands to an 'add' instruction. This /// optimizes based on identities. If it can be reduced to a single Value, it /// is returned, otherwise the Ops list is mutated as necessary. Value *Reassociate::OptimizeAdd(Instruction *I, SmallVectorImpl<ValueEntry> &Ops) { // Scan the operand lists looking for X and -X pairs. If we find any, we // can simplify expressions like X+-X == 0 and X+~X ==-1. While we're at it, // scan for any // duplicates. We want to canonicalize Y+Y+Y+Z -> 3*Y+Z. for (unsigned i = 0, e = Ops.size(); i != e; ++i) { Value *TheOp = Ops[i].Op; // Check to see if we've seen this operand before. If so, we factor all // instances of the operand together. Due to our sorting criteria, we know // that these need to be next to each other in the vector. if (i+1 != Ops.size() && Ops[i+1].Op == TheOp) { // Rescan the list, remove all instances of this operand from the expr. unsigned NumFound = 0; do { Ops.erase(Ops.begin()+i); ++NumFound; } while (i != Ops.size() && Ops[i].Op == TheOp); DEBUG(dbgs() << "\nFACTORING [" << NumFound << "]: " << *TheOp << '\n'); ++NumFactor; // Insert a new multiply. Type *Ty = TheOp->getType(); Constant *C = Ty->isIntOrIntVectorTy() ? ConstantInt::get(Ty, NumFound) : ConstantFP::get(Ty, NumFound); Instruction *Mul = CreateMul(TheOp, C, "factor", I, I); // Now that we have inserted a multiply, optimize it. This allows us to // handle cases that require multiple factoring steps, such as this: // (X*2) + (X*2) + (X*2) -> (X*2)*3 -> X*6 RedoInsts.insert(Mul); // If every add operand was a duplicate, return the multiply. if (Ops.empty()) return Mul; // Otherwise, we had some input that didn't have the dupe, such as // "A + A + B" -> "A*2 + B". Add the new multiply to the list of // things being added by this operation. Ops.insert(Ops.begin(), ValueEntry(getRank(Mul), Mul)); --i; e = Ops.size(); continue; } // Check for X and -X or X and ~X in the operand list. if (!BinaryOperator::isNeg(TheOp) && !BinaryOperator::isFNeg(TheOp) && !BinaryOperator::isNot(TheOp)) continue; Value *X = nullptr; if (BinaryOperator::isNeg(TheOp) || BinaryOperator::isFNeg(TheOp)) X = BinaryOperator::getNegArgument(TheOp); else if (BinaryOperator::isNot(TheOp)) X = BinaryOperator::getNotArgument(TheOp); unsigned FoundX = FindInOperandList(Ops, i, X); if (FoundX == i) continue; // Remove X and -X from the operand list. if (Ops.size() == 2 && (BinaryOperator::isNeg(TheOp) || BinaryOperator::isFNeg(TheOp))) return Constant::getNullValue(X->getType()); // Remove X and ~X from the operand list. if (Ops.size() == 2 && BinaryOperator::isNot(TheOp)) return Constant::getAllOnesValue(X->getType()); Ops.erase(Ops.begin()+i); if (i < FoundX) --FoundX; else --i; // Need to back up an extra one. Ops.erase(Ops.begin()+FoundX); ++NumAnnihil; --i; // Revisit element. e -= 2; // Removed two elements. // if X and ~X we append -1 to the operand list. if (BinaryOperator::isNot(TheOp)) { Value *V = Constant::getAllOnesValue(X->getType()); Ops.insert(Ops.end(), ValueEntry(getRank(V), V)); e += 1; } } // Scan the operand list, checking to see if there are any common factors // between operands. Consider something like A*A+A*B*C+D. We would like to // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies. // To efficiently find this, we count the number of times a factor occurs // for any ADD operands that are MULs. DenseMap<Value*, unsigned> FactorOccurrences; // Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4) // where they are actually the same multiply. unsigned MaxOcc = 0; Value *MaxOccVal = nullptr; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul); if (!BOp) continue; // Compute all of the factors of this added value. SmallVector<Value*, 8> Factors; FindSingleUseMultiplyFactors(BOp, Factors, Ops); assert(Factors.size() > 1 && "Bad linearize!"); // Add one to FactorOccurrences for each unique factor in this op. SmallPtrSet<Value*, 8> Duplicates; for (unsigned i = 0, e = Factors.size(); i != e; ++i) { Value *Factor = Factors[i]; if (!Duplicates.insert(Factor).second) continue; unsigned Occ = ++FactorOccurrences[Factor]; if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; } // If Factor is a negative constant, add the negated value as a factor // because we can percolate the negate out. Watch for minint, which // cannot be positivified. if (ConstantInt *CI = dyn_cast<ConstantInt>(Factor)) { if (CI->isNegative() && !CI->isMinValue(true)) { Factor = ConstantInt::get(CI->getContext(), -CI->getValue()); // It might have been added on an earlier pass, so don't double count. if (!Duplicates.insert(Factor).second) continue; unsigned Occ = ++FactorOccurrences[Factor]; if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; } } } else if (ConstantFP *CF = dyn_cast<ConstantFP>(Factor)) { if (CF->isNegative()) { APFloat F(CF->getValueAPF()); F.changeSign(); Factor = ConstantFP::get(CF->getContext(), F); // It might have been added on an earlier pass, so don't double count. if (!Duplicates.insert(Factor).second) continue; unsigned Occ = ++FactorOccurrences[Factor]; if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; } } } } } // If any factor occurred more than one time, we can pull it out. if (MaxOcc > 1) { DEBUG(dbgs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << '\n'); ++NumFactor; // Create a new instruction that uses the MaxOccVal twice. If we don't do // this, we could otherwise run into situations where removing a factor // from an expression will drop a use of maxocc, and this can cause // RemoveFactorFromExpression on successive values to behave differently. Instruction *DummyInst = I->getType()->isIntOrIntVectorTy() ? BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal) : BinaryOperator::CreateFAdd(MaxOccVal, MaxOccVal); SmallVector<WeakTrackingVH, 4> NewMulOps; for (unsigned i = 0; i != Ops.size(); ++i) { // Only try to remove factors from expressions we're allowed to. BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul); if (!BOp) continue; if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) { // The factorized operand may occur several times. Convert them all in // one fell swoop. for (unsigned j = Ops.size(); j != i;) { --j; if (Ops[j].Op == Ops[i].Op) { NewMulOps.push_back(V); Ops.erase(Ops.begin()+j); } } --i; } } // No need for extra uses anymore. delete DummyInst; unsigned NumAddedValues = NewMulOps.size(); Value *V = EmitAddTreeOfValues(I, NewMulOps); // Now that we have inserted the add tree, optimize it. This allows us to // handle cases that require multiple factoring steps, such as this: // A*A*B + A*A*C --> A*(A*B+A*C) --> A*(A*(B+C)) assert(NumAddedValues > 1 && "Each occurrence should contribute a value"); (void)NumAddedValues; if (Instruction *VI = dyn_cast<Instruction>(V)) RedoInsts.insert(VI); // Create the multiply. Instruction *V2 = CreateMul(V, MaxOccVal, "tmp", I, I); // Rerun associate on the multiply in case the inner expression turned into // a multiply. We want to make sure that we keep things in canonical form. RedoInsts.insert(V2); // If every add operand included the factor (e.g. "A*B + A*C"), then the // entire result expression is just the multiply "A*(B+C)". if (Ops.empty()) return V2; // Otherwise, we had some input that didn't have the factor, such as // "A*B + A*C + D" -> "A*(B+C) + D". Add the new multiply to the list of // things being added by this operation. Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2)); } return nullptr; } /// \brief Build up a vector of value/power pairs factoring a product. /// /// Given a series of multiplication operands, build a vector of factors and /// the powers each is raised to when forming the final product. Sort them in /// the order of descending power. /// /// (x*x) -> [(x, 2)] /// ((x*x)*x) -> [(x, 3)] /// ((((x*y)*x)*y)*x) -> [(x, 3), (y, 2)] /// /// \returns Whether any factors have a power greater than one. bool Reassociate::collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops, SmallVectorImpl<Factor> &Factors) { // FIXME: Have Ops be (ValueEntry, Multiplicity) pairs, simplifying this. // Compute the sum of powers of simplifiable factors. unsigned FactorPowerSum = 0; for (unsigned Idx = 1, Size = Ops.size(); Idx < Size; ++Idx) { Value *Op = Ops[Idx-1].Op; // Count the number of occurrences of this value. unsigned Count = 1; for (; Idx < Size && Ops[Idx].Op == Op; ++Idx) ++Count; // Track for simplification all factors which occur 2 or more times. if (Count > 1) FactorPowerSum += Count; } // We can only simplify factors if the sum of the powers of our simplifiable // factors is 4 or higher. When that is the case, we will *always* have // a simplification. This is an important invariant to prevent cyclicly // trying to simplify already minimal formations. if (FactorPowerSum < 4) return false; // Now gather the simplifiable factors, removing them from Ops. FactorPowerSum = 0; for (unsigned Idx = 1; Idx < Ops.size(); ++Idx) { Value *Op = Ops[Idx-1].Op; // Count the number of occurrences of this value. unsigned Count = 1; for (; Idx < Ops.size() && Ops[Idx].Op == Op; ++Idx) ++Count; if (Count == 1) continue; // Move an even number of occurrences to Factors. Count &= ~1U; Idx -= Count; FactorPowerSum += Count; Factors.push_back(Factor(Op, Count)); Ops.erase(Ops.begin()+Idx, Ops.begin()+Idx+Count); } // None of the adjustments above should have reduced the sum of factor powers // below our mininum of '4'. assert(FactorPowerSum >= 4); std::stable_sort(Factors.begin(), Factors.end(), Factor::PowerDescendingSorter()); return true; } /// \brief Build a tree of multiplies, computing the product of Ops. static Value *buildMultiplyTree(IRBuilder<> &Builder, SmallVectorImpl<Value*> &Ops) { if (Ops.size() == 1) return Ops.back(); Value *LHS = Ops.pop_back_val(); do { if (LHS->getType()->isIntOrIntVectorTy()) LHS = Builder.CreateMul(LHS, Ops.pop_back_val()); else LHS = Builder.CreateFMul(LHS, Ops.pop_back_val()); } while (!Ops.empty()); return LHS; } /// \brief Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*... /// /// Given a vector of values raised to various powers, where no two values are /// equal and the powers are sorted in decreasing order, compute the minimal /// DAG of multiplies to compute the final product, and return that product /// value. Value *Reassociate::buildMinimalMultiplyDAG(IRBuilder<> &Builder, SmallVectorImpl<Factor> &Factors) { assert(Factors[0].Power); SmallVector<Value *, 4> OuterProduct; for (unsigned LastIdx = 0, Idx = 1, Size = Factors.size(); Idx < Size && Factors[Idx].Power > 0; ++Idx) { if (Factors[Idx].Power != Factors[LastIdx].Power) { LastIdx = Idx; continue; } // We want to multiply across all the factors with the same power so that // we can raise them to that power as a single entity. Build a mini tree // for that. SmallVector<Value *, 4> InnerProduct; InnerProduct.push_back(Factors[LastIdx].Base); do { InnerProduct.push_back(Factors[Idx].Base); ++Idx; } while (Idx < Size && Factors[Idx].Power == Factors[LastIdx].Power); // Reset the base value of the first factor to the new expression tree. // We'll remove all the factors with the same power in a second pass. Value *M = Factors[LastIdx].Base = buildMultiplyTree(Builder, InnerProduct); if (Instruction *MI = dyn_cast<Instruction>(M)) RedoInsts.insert(MI); LastIdx = Idx; } // Unique factors with equal powers -- we've folded them into the first one's // base. Factors.erase(std::unique(Factors.begin(), Factors.end(), Factor::PowerEqual()), Factors.end()); // Iteratively collect the base of each factor with an add power into the // outer product, and halve each power in preparation for squaring the // expression. for (unsigned Idx = 0, Size = Factors.size(); Idx != Size; ++Idx) { if (Factors[Idx].Power & 1) OuterProduct.push_back(Factors[Idx].Base); Factors[Idx].Power >>= 1; } if (Factors[0].Power) { Value *SquareRoot = buildMinimalMultiplyDAG(Builder, Factors); OuterProduct.push_back(SquareRoot); OuterProduct.push_back(SquareRoot); } if (OuterProduct.size() == 1) return OuterProduct.front(); Value *V = buildMultiplyTree(Builder, OuterProduct); return V; } Value *Reassociate::OptimizeMul(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops) { // We can only optimize the multiplies when there is a chain of more than // three, such that a balanced tree might require fewer total multiplies. if (Ops.size() < 4) return nullptr; // Try to turn linear trees of multiplies without other uses of the // intermediate stages into minimal multiply DAGs with perfect sub-expression // re-use. SmallVector<Factor, 4> Factors; if (!collectMultiplyFactors(Ops, Factors)) return nullptr; // All distinct factors, so nothing left for us to do. IRBuilder<> Builder(I); Value *V = buildMinimalMultiplyDAG(Builder, Factors); if (Ops.empty()) return V; ValueEntry NewEntry = ValueEntry(getRank(V), V); Ops.insert(std::lower_bound(Ops.begin(), Ops.end(), NewEntry), NewEntry); return nullptr; } Value *Reassociate::OptimizeExpression(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops) { // Now that we have the linearized expression tree, try to optimize it. // Start by folding any constants that we found. Constant *Cst = nullptr; unsigned Opcode = I->getOpcode(); while (!Ops.empty() && isa<Constant>(Ops.back().Op)) { Constant *C = cast<Constant>(Ops.pop_back_val().Op); Cst = Cst ? ConstantExpr::get(Opcode, C, Cst) : C; } // If there was nothing but constants then we are done. if (Ops.empty()) return Cst; // Put the combined constant back at the end of the operand list, except if // there is no point. For example, an add of 0 gets dropped here, while a // multiplication by zero turns the whole expression into zero. if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) { if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType())) return Cst; Ops.push_back(ValueEntry(0, Cst)); } if (Ops.size() == 1) return Ops[0].Op; // Handle destructive annihilation due to identities between elements in the // argument list here. unsigned NumOps = Ops.size(); switch (Opcode) { default: break; case Instruction::And: case Instruction::Or: if (Value *Result = OptimizeAndOrXor(Opcode, Ops)) return Result; break; case Instruction::Xor: if (Value *Result = OptimizeXor(I, Ops)) return Result; break; case Instruction::Add: case Instruction::FAdd: if (Value *Result = OptimizeAdd(I, Ops)) return Result; break; case Instruction::Mul: case Instruction::FMul: if (Value *Result = OptimizeMul(I, Ops)) return Result; break; } if (Ops.size() != NumOps) return OptimizeExpression(I, Ops); return nullptr; } /// Zap the given instruction, adding interesting operands to the work list. void Reassociate::EraseInst(Instruction *I) { assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!"); SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end()); // Erase the dead instruction. ValueRankMap.erase(I); RedoInsts.remove(I); I->eraseFromParent(); // Optimize its operands. SmallPtrSet<Instruction *, 8> Visited; // Detect self-referential nodes. for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Instruction *Op = dyn_cast<Instruction>(Ops[i])) { // If this is a node in an expression tree, climb to the expression root // and add that since that's where optimization actually happens. unsigned Opcode = Op->getOpcode(); while (Op->hasOneUse() && Op->user_back()->getOpcode() == Opcode && Visited.insert(Op).second) Op = Op->user_back(); RedoInsts.insert(Op); } } // Canonicalize expressions of the following form: // x + (-Constant * y) -> x - (Constant * y) // x - (-Constant * y) -> x + (Constant * y) Instruction *Reassociate::canonicalizeNegConstExpr(Instruction *I) { if (!I->hasOneUse() || I->getType()->isVectorTy()) return nullptr; // Must be a fmul or fdiv instruction. unsigned Opcode = I->getOpcode(); if (Opcode != Instruction::FMul && Opcode != Instruction::FDiv) return nullptr; auto *C0 = dyn_cast<ConstantFP>(I->getOperand(0)); auto *C1 = dyn_cast<ConstantFP>(I->getOperand(1)); // Both operands are constant, let it get constant folded away. if (C0 && C1) return nullptr; ConstantFP *CF = C0 ? C0 : C1; // Must have one constant operand. if (!CF) return nullptr; // Must be a negative ConstantFP. if (!CF->isNegative()) return nullptr; // User must be a binary operator with one or more uses. Instruction *User = I->user_back(); if (!isa<BinaryOperator>(User) || !User->hasNUsesOrMore(1)) return nullptr; unsigned UserOpcode = User->getOpcode(); if (UserOpcode != Instruction::FAdd && UserOpcode != Instruction::FSub) return nullptr; // Subtraction is not commutative. Explicitly, the following transform is // not valid: (-Constant * y) - x -> x + (Constant * y) if (!User->isCommutative() && User->getOperand(1) != I) return nullptr; // Change the sign of the constant. APFloat Val = CF->getValueAPF(); Val.changeSign(); I->setOperand(C0 ? 0 : 1, ConstantFP::get(CF->getContext(), Val)); // Canonicalize I to RHS to simplify the next bit of logic. E.g., // ((-Const*y) + x) -> (x + (-Const*y)). if (User->getOperand(0) == I && User->isCommutative()) cast<BinaryOperator>(User)->swapOperands(); Value *Op0 = User->getOperand(0); Value *Op1 = User->getOperand(1); BinaryOperator *NI; switch (UserOpcode) { default: llvm_unreachable("Unexpected Opcode!"); case Instruction::FAdd: NI = BinaryOperator::CreateFSub(Op0, Op1); NI->setFastMathFlags(cast<FPMathOperator>(User)->getFastMathFlags()); break; case Instruction::FSub: NI = BinaryOperator::CreateFAdd(Op0, Op1); NI->setFastMathFlags(cast<FPMathOperator>(User)->getFastMathFlags()); break; } NI->insertBefore(User); NI->setName(User->getName()); User->replaceAllUsesWith(NI); NI->setDebugLoc(I->getDebugLoc()); RedoInsts.insert(I); MadeChange = true; return NI; } /// Inspect and optimize the given instruction. Note that erasing /// instructions is not allowed. void Reassociate::OptimizeInst(Instruction *I) { // Only consider operations that we understand. if (!isa<BinaryOperator>(I)) return; if (I->getOpcode() == Instruction::Shl && isa<ConstantInt>(I->getOperand(1))) // If an operand of this shift is a reassociable multiply, or if the shift // is used by a reassociable multiply or add, turn into a multiply. if (isReassociableOp(I->getOperand(0), Instruction::Mul) || (I->hasOneUse() && (isReassociableOp(I->user_back(), Instruction::Mul) || isReassociableOp(I->user_back(), Instruction::Add)))) { Instruction *NI = ConvertShiftToMul(I); RedoInsts.insert(I); MadeChange = true; I = NI; } // Canonicalize negative constants out of expressions. if (Instruction *Res = canonicalizeNegConstExpr(I)) I = Res; // Commute binary operators, to canonicalize the order of their operands. // This can potentially expose more CSE opportunities, and makes writing other // transformations simpler. if (I->isCommutative()) canonicalizeOperands(I); // TODO: We should optimize vector Xor instructions, but they are // currently unsupported. if (I->getType()->isVectorTy() && I->getOpcode() == Instruction::Xor) return; // Don't optimize floating point instructions that don't have unsafe algebra. if (I->getType()->isFloatingPointTy() && !I->hasUnsafeAlgebra()) return; // Do not reassociate boolean (i1) expressions. We want to preserve the // original order of evaluation for short-circuited comparisons that // SimplifyCFG has folded to AND/OR expressions. If the expression // is not further optimized, it is likely to be transformed back to a // short-circuited form for code gen, and the source order may have been // optimized for the most likely conditions. if (I->getType()->isIntegerTy(1)) return; // If this is a subtract instruction which is not already in negate form, // see if we can convert it to X+-Y. if (I->getOpcode() == Instruction::Sub) { if (ShouldBreakUpSubtract(I)) { Instruction *NI = BreakUpSubtract(I); RedoInsts.insert(I); MadeChange = true; I = NI; } else if (BinaryOperator::isNeg(I)) { // Otherwise, this is a negation. See if the operand is a multiply tree // and if this is not an inner node of a multiply tree. if (isReassociableOp(I->getOperand(1), Instruction::Mul) && (!I->hasOneUse() || !isReassociableOp(I->user_back(), Instruction::Mul))) { Instruction *NI = LowerNegateToMultiply(I); RedoInsts.insert(I); MadeChange = true; I = NI; } } } else if (I->getOpcode() == Instruction::FSub) { if (ShouldBreakUpSubtract(I)) { Instruction *NI = BreakUpSubtract(I); RedoInsts.insert(I); MadeChange = true; I = NI; } else if (BinaryOperator::isFNeg(I)) { // Otherwise, this is a negation. See if the operand is a multiply tree // and if this is not an inner node of a multiply tree. if (isReassociableOp(I->getOperand(1), Instruction::FMul) && (!I->hasOneUse() || !isReassociableOp(I->user_back(), Instruction::FMul))) { Instruction *NI = LowerNegateToMultiply(I); RedoInsts.insert(I); MadeChange = true; I = NI; } } } // If this instruction is an associative binary operator, process it. if (!I->isAssociative()) return; BinaryOperator *BO = cast<BinaryOperator>(I); // If this is an interior node of a reassociable tree, ignore it until we // get to the root of the tree, to avoid N^2 analysis. unsigned Opcode = BO->getOpcode(); if (BO->hasOneUse() && BO->user_back()->getOpcode() == Opcode) return; // If this is an add tree that is used by a sub instruction, ignore it // until we process the subtract. if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add && cast<Instruction>(BO->user_back())->getOpcode() == Instruction::Sub) return; if (BO->hasOneUse() && BO->getOpcode() == Instruction::FAdd && cast<Instruction>(BO->user_back())->getOpcode() == Instruction::FSub) return; ReassociateExpression(BO); } void Reassociate::ReassociateExpression(BinaryOperator *I) { // First, walk the expression tree, linearizing the tree, collecting the // operand information. SmallVector<RepeatedValue, 8> Tree; MadeChange |= LinearizeExprTree(I, Tree); SmallVector<ValueEntry, 8> Ops; Ops.reserve(Tree.size()); for (unsigned i = 0, e = Tree.size(); i != e; ++i) { RepeatedValue E = Tree[i]; Ops.append(E.second.getZExtValue(), ValueEntry(getRank(E.first), E.first)); } DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n'); // Now that we have linearized the tree to a list and have gathered all of // the operands and their ranks, sort the operands by their rank. Use a // stable_sort so that values with equal ranks will have their relative // positions maintained (and so the compiler is deterministic). Note that // this sorts so that the highest ranking values end up at the beginning of // the vector. std::stable_sort(Ops.begin(), Ops.end()); // Now that we have the expression tree in a convenient // sorted form, optimize it globally if possible. if (Value *V = OptimizeExpression(I, Ops)) { if (V == I) // Self-referential expression in unreachable code. return; // This expression tree simplified to something that isn't a tree, // eliminate it. DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n'); I->replaceAllUsesWith(V); if (Instruction *VI = dyn_cast<Instruction>(V)) VI->setDebugLoc(I->getDebugLoc()); RedoInsts.insert(I); ++NumAnnihil; return; } // We want to sink immediates as deeply as possible except in the case where // this is a multiply tree used only by an add, and the immediate is a -1. // In this case we reassociate to put the negation on the outside so that we // can fold the negation into the add: (-X)*Y + Z -> Z-X*Y if (I->hasOneUse()) { if (I->getOpcode() == Instruction::Mul && cast<Instruction>(I->user_back())->getOpcode() == Instruction::Add && isa<ConstantInt>(Ops.back().Op) && cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) { ValueEntry Tmp = Ops.pop_back_val(); Ops.insert(Ops.begin(), Tmp); } else if (I->getOpcode() == Instruction::FMul && cast<Instruction>(I->user_back())->getOpcode() == Instruction::FAdd && isa<ConstantFP>(Ops.back().Op) && cast<ConstantFP>(Ops.back().Op)->isExactlyValue(-1.0)) { ValueEntry Tmp = Ops.pop_back_val(); Ops.insert(Ops.begin(), Tmp); } } DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n'); if (Ops.size() == 1) { if (Ops[0].Op == I) // Self-referential expression in unreachable code. return; // This expression tree simplified to something that isn't a tree, // eliminate it. I->replaceAllUsesWith(Ops[0].Op); if (Instruction *OI = dyn_cast<Instruction>(Ops[0].Op)) OI->setDebugLoc(I->getDebugLoc()); RedoInsts.insert(I); return; } if (HLSLEnableAggressiveReassociation && // HLSL Change (Ops.size() > 2 && Ops.size() <= GlobalReassociateLimit)) { // Find the pair with the highest count in the pairmap and move it to the // back of the list so that it can later be CSE'd. // example: // a*b*c*d*e // if c*e is the most "popular" pair, we can express this as // (((c*e)*d)*b)*a unsigned Max = 1; unsigned BestRank = 0; std::pair<unsigned, unsigned> BestPair; unsigned Idx = I->getOpcode() - Instruction::BinaryOpsBegin; for (unsigned i = 0; i < Ops.size() - 1; ++i) for (unsigned j = i + 1; j < Ops.size(); ++j) { unsigned Score = 0; Value *Op0 = Ops[i].Op; Value *Op1 = Ops[j].Op; if (std::less<Value *>()(Op1, Op0)) std::swap(Op0, Op1); auto it = PairMap[Idx].find({Op0, Op1}); if (it != PairMap[Idx].end()) { // Functions like BreakUpSubtract() can erase the Values we're using // as keys and create new Values after we built the PairMap. There's a // small chance that the new nodes can have the same address as // something already in the table. We shouldn't accumulate the stored // score in that case as it refers to the wrong Value. if (it->second.isValid()) { Score += it->second.Score; } } unsigned MaxRank = std::max(Ops[i].Rank, Ops[j].Rank); if (Score > Max || (Score == Max && MaxRank < BestRank)) { BestPair = {i, j}; Max = Score; BestRank = MaxRank; } } if (Max > 1) { auto Op0 = Ops[BestPair.first]; auto Op1 = Ops[BestPair.second]; Ops.erase(&Ops[BestPair.second]); Ops.erase(&Ops[BestPair.first]); Ops.push_back(Op0); Ops.push_back(Op1); } } // Now that we ordered and optimized the expressions, splat them back into // the expression tree, removing any unneeded nodes. RewriteExprTree(I, Ops); } void Reassociate::BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT) { // Make a "pairmap" of how often each operand pair occurs. for (BasicBlock *BI : RPOT) { for (Instruction &I : *BI) { if (!I.isAssociative()) continue; // Ignore nodes that aren't at the root of trees. if (I.hasOneUse() && I.user_back()->getOpcode() == I.getOpcode()) continue; // Collect all operands in a single reassociable expression. // Since Reassociate has already been run once, we can assume things // are already canonical according to Reassociation's regime. SmallVector<Value *, 8> Worklist = {I.getOperand(0), I.getOperand(1)}; SmallVector<Value *, 8> Ops; while (!Worklist.empty() && Ops.size() <= GlobalReassociateLimit) { Value *Op = Worklist.pop_back_val(); Instruction *OpI = dyn_cast<Instruction>(Op); if (!OpI || OpI->getOpcode() != I.getOpcode() || !OpI->hasOneUse()) { Ops.push_back(Op); continue; } // Be paranoid about self-referencing expressions in unreachable code. if (OpI->getOperand(0) != OpI) Worklist.push_back(OpI->getOperand(0)); if (OpI->getOperand(1) != OpI) Worklist.push_back(OpI->getOperand(1)); } // Skip extremely long expressions. if (Ops.size() > GlobalReassociateLimit) continue; // Add all pairwise combinations of operands to the pair map. unsigned BinaryIdx = I.getOpcode() - Instruction::BinaryOpsBegin; SmallSet<std::pair<Value *, Value *>, 32> Visited; for (unsigned i = 0; i < Ops.size() - 1; ++i) { for (unsigned j = i + 1; j < Ops.size(); ++j) { // Canonicalize operand orderings. Value *Op0 = Ops[i]; Value *Op1 = Ops[j]; if (std::less<Value *>()(Op1, Op0)) std::swap(Op0, Op1); if (!Visited.insert({Op0, Op1}).second) continue; auto res = PairMap[BinaryIdx].insert({{Op0, Op1}, {Op0, Op1, 1}}); if (!res.second) { // If either key value has been erased then we've got the same // address by coincidence. That can't happen here because nothing is // erasing values but it can happen by the time we're querying the // map. assert(res.first->second.isValid() && "WeakVH invalidated"); ++res.first->second.Score; } } } } } } bool Reassociate::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; // Calculate the rank map for F BuildRankMap(F); if (HLSLEnableAggressiveReassociation) { // HLSL Change // Build the pair map before running reassociate. // Technically this would be more accurate if we did it after one round // of reassociation, but in practice it doesn't seem to help much on // real-world code, so don't waste the compile time running reassociate // twice. // If a user wants, they could expicitly run reassociate twice in their // pass pipeline for further potential gains. // It might also be possible to update the pair map during runtime, but the // overhead of that may be large if there's many reassociable chains. // TODO: RPOT // Get the functions basic blocks in Reverse Post Order. This order is used // by BuildRankMap to pre calculate ranks correctly. It also excludes dead // basic blocks (it has been seen that the analysis in this pass could hang // when analysing dead basic blocks). ReversePostOrderTraversal<Function *> RPOT(&F); BuildPairMap(RPOT); } // HLSL Change MadeChange = false; for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { // Optimize every instruction in the basic block. for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; ) if (isInstructionTriviallyDead(II)) { EraseInst(II++); } else { OptimizeInst(II); assert(II->getParent() == BI && "Moved to a different block!"); ++II; } // If this produced extra instructions to optimize, handle them now. while (!RedoInsts.empty()) { Instruction *I = RedoInsts.pop_back_val(); if (isInstructionTriviallyDead(I)) EraseInst(I); else OptimizeInst(I); } } // We are done with the rank map and pair map. RankMap.clear(); ValueRankMap.clear(); if (HLSLEnableAggressiveReassociation) { // HLSL Change for (auto &Entry : PairMap) Entry.clear(); } // HLSL Change return MadeChange; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LLVMBuild.txt
;===- ./lib/Transforms/Scalar/LLVMBuild.txt --------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = Scalar parent = Transforms library_name = ScalarOpts required_libraries = Analysis Core InstCombine ProfileData Support TransformUtils
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilRemoveUnstructuredLoopExits.cpp
//===- DxilRemoveUnstructuredLoopExits.cpp - Make unrolled loops structured //---===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // // Loops that look like the following when unrolled becomes unstructured: // // for(;;) { // if (a) { // if (b) { // exit_code_0; // break; // Unstructured loop exit // } // // code_0; // // if (c) { // if (d) { // exit_code_1; // break; // Unstructured loop exit // } // code_1; // } // // code_2; // // ... // } // // code_3; // // if (exit) // break; // } // // // This pass transforms the loop into the following form: // // bool broke_0 = false; // bool broke_1 = false; // // for(;;) { // if (a) { // if (b) { // broke_0 = true; // Break flag // } // // if (!broke_0) { // code_0; // } // // if (!broke_0) { // if (c) { // if (d) { // broke_1 = true; // Break flag // } // if (!broke_1) { // code_1; // } // } // // if (!broke_1) { // code_2; // } // } // // ... // } // // if (broke_0) { // break; // } // // if (broke_1) { // break; // } // // code_3; // // if (exit) // break; // } // // if (broke_0) { // exit_code_0; // } // // if (broke_1) { // exit_code_1; // } // // Essentially it hoists the exit branch out of the loop: // - That is, any exiting block must dominate the latch block. // - All exits go through a single latch-exit block. // - The action of the exiting blocks are deferred and conditionally // executed after reaching the latch-exit block. // // This function should be called any time before a function is unrolled to // avoid generating unstructured code. // // There are several limitations at the moment: // // - if code_0, code_1, etc has any loops in there, this transform // does not take place. Since the values that flow out of the conditions // are phi of undef, I do not want to risk the loops not exiting. // // - code_0, code_1, etc, become conditional only when there are // side effects in there. This doesn't impact code correctness, // but the code will execute for one iteration even if the exit condition // is met. // // - If any exiting block uses a switch statement to conditionally exit the // loop, we currently do not handle that case. // // These limitations can be fixed in the future as needed. // //===----------------------------------------------------------------------===// #include "dxc/HLSL/DxilNoops.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include <unordered_map> #include <unordered_set> #include "DxilRemoveUnstructuredLoopExits.h" using namespace llvm; #define DEBUG_TYPE "dxil-remove-unstructured-loop-exits" namespace { bool IsNoop(Instruction *inst) { if (CallInst *ci = dyn_cast<CallInst>(inst)) { if (Function *f = ci->getCalledFunction()) { return f->getName() == hlsl::kNoopName; } } return false; } bool HasSideEffects(BasicBlock *bb) { for (Instruction &I : *bb) { if (I.mayReadOrWriteMemory() && !IsNoop(&I)) { return true; } } return false; } // Captures information about a value which is propagated from the exiting block // to the exit block. A special 'exiting-condition' case occurs when the // value is the condition on the exiting branch; by prior arrangement the // exit path is taken when the value is 'true'. struct Value_Info { // The value from the exiting block. Value *val; // The False value, if 'val' is the exiting-condition value. // Otherwise, a default value for the 'val's type. Value *false_val; // nullptr if 'val' is the exiting-condition and not otherwise propagated into // the exit block. Otherwise, this is the single-input phi that carries 'val' // into the exit block. The LCSSA form guarantees this exits for any value // carried out of the loop via this exit path. PHINode *exit_phi; }; // A Propagator does the following: // - Let EB be an exiting block for a loop. // - Let exit_values be the values that EB sends out of the loop to its // corresponding exit block. // - The Run method: // - Traverses blocks all the loop reachable from EB, stopping at // a block that dominates the loop latch. // (The stopping block is unique, by a contradiction argument.) // - Modifies traversed blocks to add phi nodes to propagate values // from exit_values // - Remembers which phi node is used to propagate each exit value // to each traversed block. // - The Get method is used to look up those phi nodes. // // The Run method can fail, in which case it cleans up after itself by // removing the phi nodes it may have added in the meantime. struct Propagator { // Maps a {block B, value V} to the phi that is used to get V in B. DenseMap<std::pair<BasicBlock *, Value *>, PHINode *> cached_phis; // The set of blocks visited. Traversals start at an exiting block, then // follow successors that are in the same loop. Stop when we reach a block // that dominates the latch block. That block is unique: otherwise there // would be different such blocks X and Y that dominate the latch, but not // each other. That's a contradiction.) // The algorithm stops early (and fails) if any traversed blocks are part // of an *inner* loop differnt from L. std::unordered_set<BasicBlock *> seen; // Get propagated value for val. It's guaranteed to be safe to use in bb. Value *Get(Value *val, BasicBlock *bb) { auto it = cached_phis.find({bb, val}); if (it == cached_phis.end()) return nullptr; return it->second; } // Erase any phis that may have been created, and forget them. void DeleteAllNewValues() { for (auto &pair : cached_phis) { pair.second->dropAllReferences(); } for (auto &pair : cached_phis) { pair.second->eraseFromParent(); } cached_phis.clear(); } // Given loop L, and exiting block EB, take all exit values from EB // and try to propagate them into other blocks in L reachable from EB, // stopping at a block that dominates the loop latch. (Only one // such block dominates the loop latch; otherwise there would be // different such blocks X and Y that dominate the latch, but not // each other. That's a contradiction.) // Assumes EB does not dominate the latch. // Exit values are propagated using phis. // Also collect the traversed blocks that have side effects, other // than the initial exiting block. // Fail if the traversal finds a block in L that is also in an (inner) // loop contained inside L. Failure is signaled by returning null. // On success return the found block that dominates the latch. BasicBlock *Run(const SmallVector<Value_Info, 8> &exit_values, BasicBlock *exiting_block, BasicBlock *latch, DominatorTree *DT, Loop *L, LoopInfo *LI, std::vector<BasicBlock *> &blocks_with_side_effect) { BasicBlock *ret = RunImpl(exit_values, exiting_block, latch, DT, L, LI, blocks_with_side_effect); // If we failed, remove all the values we added. if (!ret) { DeleteAllNewValues(); } return ret; } BasicBlock *RunImpl(const SmallVector<Value_Info, 8> &exit_values, BasicBlock *exiting_block, BasicBlock *latch, DominatorTree *DT, Loop *L, LoopInfo *LI, std::vector<BasicBlock *> &blocks_with_side_effect) { struct Edge { BasicBlock *prev; BasicBlock *bb; }; BasicBlock *new_exiting_block = nullptr; SmallVector<Edge, 4> work_list; work_list.push_back({nullptr, exiting_block}); seen.insert(exiting_block); for (unsigned i = 0; i < work_list.size(); i++) { auto &edge = work_list[i]; BasicBlock *prev = edge.prev; BasicBlock *bb = edge.bb; // Don't continue to propagate when we hit the latch or dominate it. if (DT->dominates(bb, latch)) { new_exiting_block = bb; continue; } // Do not include the exiting block itself in this calculation if (prev != nullptr) { // If this block is part of an inner loop... Give up for now. if (LI->getLoopFor(bb) != L) { return nullptr; } // Otherwise remember the blocks with side effects (including the // latch) if (HasSideEffects(bb)) { blocks_with_side_effect.push_back(bb); } } for (BasicBlock *succ : llvm::successors(bb)) { // Don't propagate if block is not part of this loop. if (!L->contains(succ)) continue; for (const auto &ev : exit_values) { // Find or create phi for the value in the successor block PHINode *phi = cached_phis[{succ, ev.val}]; if (!phi) { // Create a phi node with all dummy values for now. phi = PHINode::Create(ev.false_val->getType(), 0, "dx.struct_exit.prop", &*succ->begin()); for (BasicBlock *pred : llvm::predecessors(succ)) { phi->addIncoming(ev.false_val, pred); } cached_phis[{succ, ev.val}] = phi; } // Find the incoming value for successor block Value *incoming = nullptr; if (!prev) { incoming = ev.val; } else { incoming = cached_phis[{bb, ev.val}]; } // Set incoming value for our phi for (unsigned i = 0; i < phi->getNumIncomingValues(); i++) { if (phi->getIncomingBlock(i) == bb) { phi->setIncomingValue(i, incoming); } } // Add to worklist if (!seen.count(succ)) { work_list.push_back({bb, succ}); seen.insert(succ); } } } // for each succ } // for each in worklist if (new_exiting_block == exiting_block) { return nullptr; } return new_exiting_block; } }; // struct Propagator } // Unnamed namespace static Value *GetDefaultValue(Type *type) { if (type->isIntegerTy()) { return ConstantInt::get(type, 0); } else if (type->isFloatingPointTy()) { return ConstantFP::get(type, 0); } return UndefValue::get(type); } static BasicBlock *GetExitBlockForExitingBlock(Loop *L, BasicBlock *exiting_block) { BranchInst *br = dyn_cast<BranchInst>(exiting_block->getTerminator()); assert(L->contains(exiting_block)); assert(br->isConditional()); BasicBlock *result = L->contains(br->getSuccessor(0)) ? br->getSuccessor(1) : br->getSuccessor(0); assert(!L->contains(result)); return result; } // Branch over the block's content when skip_cond is true. // All values used outside the block are replaced by a phi. static void SkipBlockWithBranch(BasicBlock *bb, Value *skip_cond, Loop *L, LoopInfo *LI) { BasicBlock *body = bb->splitBasicBlock(bb->getFirstNonPHI()); body->setName("dx.struct_exit.cond_body"); BasicBlock *end = body->splitBasicBlock(body->getTerminator()); end->setName("dx.struct_exit.cond_end"); bb->getTerminator()->eraseFromParent(); BranchInst::Create(end, body, skip_cond, bb); for (Instruction &inst : *body) { // For each user that's outside of 'body', replace its use of 'inst' with a // phi created in 'end' SmallPtrSet<Instruction *, 8> users_in_other_blocks; for (auto *user : inst.users()) { Instruction *user_inst = cast<Instruction>(user); if (user_inst->getParent() != body) { users_in_other_blocks.insert(user_inst); } } if (users_in_other_blocks.size() > 0) { auto *phi = PHINode::Create(inst.getType(), 2, "", &*end->begin()); phi->addIncoming(GetDefaultValue(inst.getType()), bb); phi->addIncoming(&inst, body); for (auto *user_inst : users_in_other_blocks) { user_inst->replaceUsesOfWith(&inst, phi); } } } // For each inst in body L->addBasicBlockToLoop(body, *LI); L->addBasicBlockToLoop(end, *LI); } static unsigned GetNumPredecessors(BasicBlock *bb) { unsigned ret = 0; for (BasicBlock *pred : llvm::predecessors(bb)) { (void)pred; ret++; } return ret; } // Returns a vector of Value_Info: // - one for each value carried from the loop into the exit block via the // exiting block. // - one for the new exit condition (the one that will be used to exit the // loop from a block later in the loop body) static SmallVector<Value_Info, 8> CollectExitValues(Value *new_exit_cond, BasicBlock *exiting_block, BasicBlock *exit_block) { SmallVector<Value_Info, 8> exit_values; // Look at the lcssa phi's in the exit block. bool exit_cond_has_phi = false; for (Instruction &I : *exit_block) { if (PHINode *phi = dyn_cast<PHINode>(&I)) { // If there are values flowing out of the loop into the exit_block, // add them to the list to be propagated Value *value = phi->getIncomingValueForBlock(exiting_block); Value *false_value = nullptr; if (value == new_exit_cond) { false_value = Constant::getNullValue(value->getType()); exit_cond_has_phi = true; } else { false_value = GetDefaultValue(value->getType()); } exit_values.push_back({value, false_value, phi}); } else { break; } } // If the new exit condition is not among the exit phi's, add it. if (!exit_cond_has_phi) { exit_values.push_back({new_exit_cond, Constant::getNullValue(new_exit_cond->getType()), nullptr}); } return exit_values; } // Restructures exiting_block so its work, including its exit branch, is moved // to a block B that dominates the latch block. Let's call B the // newly-exiting-block. // Assumes the loop has a single latch block, and the terminator on that // latch block is a conditional branch. static bool RemoveUnstructuredLoopExitsIteration(BasicBlock *exiting_block, Loop *L, LoopInfo *LI, DominatorTree *DT) { BasicBlock *latch = L->getLoopLatch(); BasicBlock *latch_exit = GetExitBlockForExitingBlock(L, latch); // Ensure the latch-exit is "dedicated": no block outside the loop // branches to it. // // Suppose this iteration successfully moves an exit block X until // after the latch block. It will do so by rewiring the CFG so // the latch *exit* block will branch to X. If the latch exit // block is already reachable from X, then the rewiring will // create an unwanted loop. // So prevent this from happening by ensuring the latch exit is // "dedicated": the only branches to it come from inside the // loop, and hence not from X. // // The latch_exit block could have *multiple* branches to it from // outside the loop. // // When the edge from latch to latch_exit is split, the local picture is: // // latch --> middle --> tail // // where: // - Branches that used to go to latch_exit, from outside the loop, now // point to 'tail'. // - 'middle' is now an exit block for the loop, and its only incoming // edge is from latch. for (auto *pred : predecessors(latch_exit)) { if (!L->contains(pred)) { SplitEdge(latch, latch_exit, DT, LI); // Quit early and recalculate exit blocks. return true; } } BasicBlock *exit_block = GetExitBlockForExitingBlock(L, exiting_block); // If exiting block already dominates latch, then no need to do anything. if (DT->dominates(exiting_block, latch)) { return false; } Propagator prop; // The newly-exiting-block B will end in a conditional branch, with // the true branch exiting the loop, and the false branch falling through // (staying in the loop). // Compute the exit condition for B. BranchInst *exiting_br = cast<BranchInst>(exiting_block->getTerminator()); Value *exit_if_true = exiting_br->getCondition(); // When the original exit_block is the false block, use the negate the // condition. if (exiting_br->getSuccessor(1) == exit_block) { IRBuilder<> B(exiting_br); exit_if_true = B.CreateNot(exit_if_true); } // Collect relevant information about values that flow from this loop // into the exit block. const auto exit_values = CollectExitValues(exit_if_true, exiting_block, exit_block); // // Propagate those values we just found to a block that dominates the latch, // and return that final block. // Also, remember the blocks along the traversal that have side effects. // This can fail, signaled by returning null. std::vector<BasicBlock *> blocks_with_side_effect; BasicBlock *new_exiting_block = prop.Run(exit_values, exiting_block, latch, DT, L, LI, blocks_with_side_effect); // Stop now if we failed. if (!new_exiting_block) return false; // If any blocks on the traversal have side effects, skip them when the loop // should be exiting. for (BasicBlock *bb : blocks_with_side_effect) { Value *exit_cond_for_block = prop.Get(exit_if_true, bb); SkipBlockWithBranch(bb, exit_cond_for_block, L, LI); } // Make the exiting block not exit. { BasicBlock *non_exiting_block = exiting_br->getSuccessor( exiting_br->getSuccessor(0) == exit_block ? 1 : 0); BranchInst::Create(non_exiting_block, exiting_block); exiting_br->eraseFromParent(); exiting_br = nullptr; } Value *new_exit_cond = prop.Get(exit_if_true, new_exiting_block); assert(new_exit_cond); // Split the block where we're now exiting from, and branch to latch exit std::string old_name = new_exiting_block->getName().str(); BasicBlock *new_not_exiting_block = new_exiting_block->splitBasicBlock(new_exiting_block->getFirstNonPHI()); new_exiting_block->setName("dx.struct_exit.new_exiting"); new_not_exiting_block->setName(old_name); // Query for new_exiting_block's own loop to add new_not_exiting_block to. // It's possible that new_exiting_block is part of another inner loop // separate from L. If added directly to L, the inner loop(s) will not // contain new_not_exiting_block, making them malformed. Loop *inner_loop_of_exiting_block = LI->getLoopFor(new_exiting_block); inner_loop_of_exiting_block->addBasicBlockToLoop(new_not_exiting_block, *LI); // Branch to latch_exit new_exiting_block->getTerminator()->eraseFromParent(); BranchInst::Create(latch_exit, new_not_exiting_block, new_exit_cond, new_exiting_block); // If the exit block and the latch exit are the same, then we're already good. // just update the phi nodes in the exit block. Use the values that were // propagated down to the newly exiting node. // This can't happen if the loop is in LoopSimplifyForm, because that requires // 'dedicated exits', and we already know that exiting_block is not the same // as the latch block. if (latch_exit == exit_block) { for (const Value_Info &info : exit_values) { // Take the phi node in the exit block and reset incoming block and value // from latch_exit PHINode *exit_phi = info.exit_phi; if (exit_phi) { for (unsigned i = 0; i < exit_phi->getNumIncomingValues(); i++) { if (exit_phi->getIncomingBlock(i) == exiting_block) { exit_phi->setIncomingBlock(i, new_exiting_block); exit_phi->setIncomingValue(i, prop.Get(info.val, new_exiting_block)); } } } } } // Otherwise... else { // 1. Split the latch exit, since it's going to branch to the real exit // block BasicBlock *post_exit_location = latch_exit->splitBasicBlock(latch_exit->getFirstNonPHI()); { // If latch exit is part of an outer loop, add its split in there too. if (Loop *outer_loop = LI->getLoopFor(latch_exit)) { outer_loop->addBasicBlockToLoop(post_exit_location, *LI); } // If the original exit block is part of an outer loop, then latch exit // (which is the new exit block) must be part of it, since all blocks that // branch to within a loop must be part of that loop structure. else if (Loop *outer_loop = LI->getLoopFor(exit_block)) { outer_loop->addBasicBlockToLoop(latch_exit, *LI); } } // 2. Add incoming values to latch_exit's phi nodes. // Since now new exiting block is branching to latch exit, its phis need to // be updated. for (Instruction &inst : *latch_exit) { PHINode *phi = dyn_cast<PHINode>(&inst); if (!phi) break; // We don't care about the values for these old phis when taking the // newly constructed exit path. phi->addIncoming(GetDefaultValue(phi->getType()), new_exiting_block); } unsigned latch_exit_num_predecessors = GetNumPredecessors(latch_exit); PHINode *exit_cond_lcssa = nullptr; for (const Value_Info &info : exit_values) { // 3. Create lcssa phi's for all the propagated values at latch_exit. // Make exit values visible in the latch_exit PHINode *val_lcssa = PHINode::Create(info.val->getType(), latch_exit_num_predecessors, "dx.struct_exit.val_lcssa", latch_exit->begin()); if (info.val == exit_if_true) { // Record the phi for the exit condition exit_cond_lcssa = val_lcssa; exit_cond_lcssa->setName("dx.struct_exit.exit_cond_lcssa"); } for (BasicBlock *pred : llvm::predecessors(latch_exit)) { if (pred == new_exiting_block) { Value *incoming = prop.Get(info.val, new_exiting_block); assert(incoming); val_lcssa->addIncoming(incoming, pred); } else { val_lcssa->addIncoming(info.false_val, pred); } } // 4. Update the phis in the exit_block to use the lcssa phi's we just // created. PHINode *exit_phi = info.exit_phi; if (exit_phi) { for (unsigned i = 0; i < exit_phi->getNumIncomingValues(); i++) { if (exit_phi->getIncomingBlock(i) == exiting_block) { exit_phi->setIncomingBlock(i, latch_exit); exit_phi->setIncomingValue(i, val_lcssa); } } } } // 5. Take the first half of latch_exit and branch it to the exit_block // based on the propagated exit condition. // (Currently the latch_exit unconditionally branches to // post_exit_location.) latch_exit->getTerminator()->eraseFromParent(); BranchInst::Create(exit_block, post_exit_location, exit_cond_lcssa, latch_exit); } DT->recalculate(*L->getHeader()->getParent()); assert(L->isLCSSAForm(*DT)); return true; } bool hlsl::RemoveUnstructuredLoopExits( llvm::Loop *L, llvm::LoopInfo *LI, llvm::DominatorTree *DT, std::unordered_set<llvm::BasicBlock *> *exclude_set) { bool changed = false; if (!L->isLCSSAForm(*DT)) return false; // Check that the exiting blocks in the loop have BranchInst terminators (as // opposed to SwitchInst). At the moment we only handle BranchInst case. { llvm::SmallVector<BasicBlock *, 4> exiting_blocks; L->getExitingBlocks(exiting_blocks); for (BasicBlock *BB : exiting_blocks) { if (!isa<BranchInst>(BB->getTerminator())) return false; } } // Give up if loop is not rotated somehow. // This condition is ensured by DxilLoopUnrollPass. if (BasicBlock *latch = L->getLoopLatch()) { if (!cast<BranchInst>(latch->getTerminator())->isConditional()) return false; } // Give up if there's not a single latch else { return false; } // The loop might not be in LoopSimplifyForm. // Therefore exit blocks might not be dominated by the exiting block. for (;;) { // Recompute exiting block every time, since they could change between // iterations llvm::SmallVector<BasicBlock *, 4> exiting_blocks; L->getExitingBlocks(exiting_blocks); bool local_changed = false; for (BasicBlock *exiting_block : exiting_blocks) { if (exclude_set && exclude_set->count(GetExitBlockForExitingBlock(L, exiting_block))) continue; // As soon as we got a success, break and start a new iteration, since // exiting blocks could have changed. local_changed = RemoveUnstructuredLoopExitsIteration(exiting_block, L, LI, DT); if (local_changed) { break; } } changed |= local_changed; if (!local_changed) { break; } } return changed; } // This pass runs hlsl::RemoveUnstructuredLoopExits. // It is used for testing, and can be run from `opt` like this: // opt -dxil-remove-unstructured-loop-exits module.ll namespace { class DxilRemoveUnstructuredLoopExits : public LoopPass { public: static char ID; DxilRemoveUnstructuredLoopExits() : LoopPass(ID) { initializeDxilRemoveUnstructuredLoopExitsPass( *PassRegistry::getPassRegistry()); } StringRef getPassName() const override { return "Dxil Remove Unstructured Loop Exits"; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequiredID(&LCSSAID); // Don't assume it's in LoopSimplifyForm. That is not guaranteed // by the usual callers. } bool runOnLoop(Loop *L, LPPassManager &LPM) override { LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); return hlsl::RemoveUnstructuredLoopExits(L, LI, DT); } }; } // namespace char DxilRemoveUnstructuredLoopExits::ID; Pass *llvm::createDxilRemoveUnstructuredLoopExitsPass() { return new DxilRemoveUnstructuredLoopExits(); } INITIALIZE_PASS_BEGIN(DxilRemoveUnstructuredLoopExits, "dxil-remove-unstructured-loop-exits", "DXIL Remove Unstructured Loop Exits", false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(DxilRemoveUnstructuredLoopExits, "dxil-remove-unstructured-loop-exits", "DXIL Remove Unstructured Loop Exits", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/ScalarReplAggregates.cpp
//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This transformation implements the well known scalar replacement of // aggregates transformation. This xform breaks up alloca instructions of // aggregate type (structure or array) into individual alloca instructions for // each member (if possible). Then, if possible, it transforms the individual // alloca instructions into nice clean scalar SSA form. // // This combines a simple SRoA algorithm with the Mem2Reg algorithm because they // often interact, especially for C++ programs. As such, iterating between // SRoA, then Mem2Reg until we run out of things to promote works well. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" using namespace llvm; #define DEBUG_TYPE "scalarrepl" STATISTIC(NumReplaced, "Number of allocas broken up"); STATISTIC(NumPromoted, "Number of allocas promoted"); STATISTIC(NumAdjusted, "Number of scalar allocas adjusted to allow promotion"); STATISTIC(NumConverted, "Number of aggregates converted to scalar"); namespace { struct SROA : public FunctionPass { SROA(int T, bool hasDT, char &ID, int ST, int AT, int SLT) : FunctionPass(ID), HasDomTree(hasDT) { if (T == -1) SRThreshold = 128; else SRThreshold = T; if (ST == -1) StructMemberThreshold = 32; else StructMemberThreshold = ST; if (AT == -1) ArrayElementThreshold = 8; else ArrayElementThreshold = AT; if (SLT == -1) // Do not limit the scalar integer load size if no threshold is given. ScalarLoadThreshold = -1; else ScalarLoadThreshold = SLT; } bool runOnFunction(Function &F) override; bool performScalarRepl(Function &F); bool performPromotion(Function &F); private: bool HasDomTree; /// DeadInsts - Keep track of instructions we have made dead, so that /// we can remove them after we are done working. SmallVector<Value*, 32> DeadInsts; /// AllocaInfo - When analyzing uses of an alloca instruction, this captures /// information about the uses. All these fields are initialized to false /// and set to true when something is learned. struct AllocaInfo { /// The alloca to promote. AllocaInst *AI; /// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite /// looping and avoid redundant work. SmallPtrSet<PHINode*, 8> CheckedPHIs; /// isUnsafe - This is set to true if the alloca cannot be SROA'd. bool isUnsafe : 1; /// isMemCpySrc - This is true if this aggregate is memcpy'd from. bool isMemCpySrc : 1; /// isMemCpyDst - This is true if this aggregate is memcpy'd into. bool isMemCpyDst : 1; /// hasSubelementAccess - This is true if a subelement of the alloca is /// ever accessed, or false if the alloca is only accessed with mem /// intrinsics or load/store that only access the entire alloca at once. bool hasSubelementAccess : 1; /// hasALoadOrStore - This is true if there are any loads or stores to it. /// The alloca may just be accessed with memcpy, for example, which would /// not set this. bool hasALoadOrStore : 1; explicit AllocaInfo(AllocaInst *ai) : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false), hasSubelementAccess(false), hasALoadOrStore(false) {} }; /// SRThreshold - The maximum alloca size to considered for SROA. unsigned SRThreshold; /// StructMemberThreshold - The maximum number of members a struct can /// contain to be considered for SROA. unsigned StructMemberThreshold; /// ArrayElementThreshold - The maximum number of elements an array can /// have to be considered for SROA. unsigned ArrayElementThreshold; /// ScalarLoadThreshold - The maximum size in bits of scalars to load when /// converting to scalar unsigned ScalarLoadThreshold; void MarkUnsafe(AllocaInfo &I, Instruction *User) { I.isUnsafe = true; DEBUG(dbgs() << " Transformation preventing inst: " << *User << '\n'); } bool isSafeAllocaToScalarRepl(AllocaInst *AI); void isSafeForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info); void isSafePHISelectUseForScalarRepl(Instruction *User, uint64_t Offset, AllocaInfo &Info); void isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info); void isSafeMemAccess(uint64_t Offset, uint64_t MemSize, Type *MemOpType, bool isStore, AllocaInfo &Info, Instruction *TheAccess, bool AllowWholeAccess); bool TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size, const DataLayout &DL); uint64_t FindElementAndOffset(Type *&T, uint64_t &Offset, Type *&IdxTy, const DataLayout &DL); void DoScalarReplacement(AllocaInst *AI, std::vector<AllocaInst*> &WorkList); void DeleteDeadInstructions(); void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts); void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts); void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts); void RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts); void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, AllocaInst *AI, SmallVectorImpl<AllocaInst *> &NewElts); void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, SmallVectorImpl<AllocaInst *> &NewElts); void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, SmallVectorImpl<AllocaInst *> &NewElts); bool ShouldAttemptScalarRepl(AllocaInst *AI); }; // SROA_DT - SROA that uses DominatorTree. struct SROA_DT : public SROA { static char ID; public: SROA_DT(int T = -1, int ST = -1, int AT = -1, int SLT = -1) : SROA(T, true, ID, ST, AT, SLT) { initializeSROA_DTPass(*PassRegistry::getPassRegistry()); } // getAnalysisUsage - This pass does not require any passes, but we know it // will not alter the CFG, so say so. void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.setPreservesCFG(); } }; // SROA_SSAUp - SROA that uses SSAUpdater. struct SROA_SSAUp : public SROA { static char ID; public: SROA_SSAUp(int T = -1, int ST = -1, int AT = -1, int SLT = -1) : SROA(T, false, ID, ST, AT, SLT) { initializeSROA_SSAUpPass(*PassRegistry::getPassRegistry()); } // getAnalysisUsage - This pass does not require any passes, but we know it // will not alter the CFG, so say so. void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.setPreservesCFG(); } }; } char SROA_DT::ID = 0; char SROA_SSAUp::ID = 0; INITIALIZE_PASS_BEGIN(SROA_DT, "scalarrepl", "Scalar Replacement of Aggregates (DT)", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(SROA_DT, "scalarrepl", "Scalar Replacement of Aggregates (DT)", false, false) INITIALIZE_PASS_BEGIN(SROA_SSAUp, "scalarrepl-ssa", "Scalar Replacement of Aggregates (SSAUp)", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_END(SROA_SSAUp, "scalarrepl-ssa", "Scalar Replacement of Aggregates (SSAUp)", false, false) // Public interface to the ScalarReplAggregates pass FunctionPass *llvm::createScalarReplAggregatesPass(int Threshold, bool UseDomTree, int StructMemberThreshold, int ArrayElementThreshold, int ScalarLoadThreshold) { if (UseDomTree) return new SROA_DT(Threshold, StructMemberThreshold, ArrayElementThreshold, ScalarLoadThreshold); return new SROA_SSAUp(Threshold, StructMemberThreshold, ArrayElementThreshold, ScalarLoadThreshold); } //===----------------------------------------------------------------------===// // Convert To Scalar Optimization. //===----------------------------------------------------------------------===// namespace { /// ConvertToScalarInfo - This class implements the "Convert To Scalar" /// optimization, which scans the uses of an alloca and determines if it can /// rewrite it in terms of a single new alloca that can be mem2reg'd. class ConvertToScalarInfo { /// AllocaSize - The size of the alloca being considered in bytes. unsigned AllocaSize; const DataLayout &DL; unsigned ScalarLoadThreshold; /// IsNotTrivial - This is set to true if there is some access to the object /// which means that mem2reg can't promote it. bool IsNotTrivial; /// ScalarKind - Tracks the kind of alloca being considered for promotion, /// computed based on the uses of the alloca rather than the LLVM type system. enum { Unknown, // Accesses via GEPs that are consistent with element access of a vector // type. This will not be converted into a vector unless there is a later // access using an actual vector type. ImplicitVector, // Accesses via vector operations and GEPs that are consistent with the // layout of a vector type. Vector, // An integer bag-of-bits with bitwise operations for insertion and // extraction. Any combination of types can be converted into this kind // of scalar. Integer } ScalarKind; /// VectorTy - This tracks the type that we should promote the vector to if /// it is possible to turn it into a vector. This starts out null, and if it /// isn't possible to turn into a vector type, it gets set to VoidTy. VectorType *VectorTy; /// HadNonMemTransferAccess - True if there is at least one access to the /// alloca that is not a MemTransferInst. We don't want to turn structs into /// large integers unless there is some potential for optimization. bool HadNonMemTransferAccess; /// HadDynamicAccess - True if some element of this alloca was dynamic. /// We don't yet have support for turning a dynamic access into a large /// integer. bool HadDynamicAccess; public: explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL, unsigned SLT) : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false), ScalarKind(Unknown), VectorTy(nullptr), HadNonMemTransferAccess(false), HadDynamicAccess(false) { } AllocaInst *TryConvert(AllocaInst *AI); private: bool CanConvertToScalar(Value *V, uint64_t Offset, Value* NonConstantIdx); void MergeInTypeForLoadOrStore(Type *In, uint64_t Offset); bool MergeInVectorType(VectorType *VInTy, uint64_t Offset); void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset, Value *NonConstantIdx); Value *ConvertScalar_ExtractValue(Value *NV, Type *ToType, uint64_t Offset, Value* NonConstantIdx, IRBuilder<> &Builder); Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, uint64_t Offset, Value* NonConstantIdx, IRBuilder<> &Builder); }; } // end anonymous namespace. /// TryConvert - Analyze the specified alloca, and if it is safe to do so, /// rewrite it to be a new alloca which is mem2reg'able. This returns the new /// alloca if possible or null if not. AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) { // If we can't convert this scalar, or if mem2reg can trivially do it, bail // out. if (!CanConvertToScalar(AI, 0, nullptr) || !IsNotTrivial) return nullptr; // If an alloca has only memset / memcpy uses, it may still have an Unknown // ScalarKind. Treat it as an Integer below. if (ScalarKind == Unknown) ScalarKind = Integer; if (ScalarKind == Vector && VectorTy->getBitWidth() != AllocaSize * 8) ScalarKind = Integer; // If we were able to find a vector type that can handle this with // insert/extract elements, and if there was at least one use that had // a vector type, promote this to a vector. We don't want to promote // random stuff that doesn't use vectors (e.g. <9 x double>) because then // we just get a lot of insert/extracts. If at least one vector is // involved, then we probably really do have a union of vector/array. Type *NewTy; if (ScalarKind == Vector) { assert(VectorTy && "Missing type for vector scalar."); DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = " << *VectorTy << '\n'); NewTy = VectorTy; // Use the vector type. } else { unsigned BitWidth = AllocaSize * 8; // Do not convert to scalar integer if the alloca size exceeds the // scalar load threshold. if (BitWidth > ScalarLoadThreshold) return nullptr; if ((ScalarKind == ImplicitVector || ScalarKind == Integer) && !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth)) return nullptr; // Dynamic accesses on integers aren't yet supported. They need us to shift // by a dynamic amount which could be difficult to work out as we might not // know whether to use a left or right shift. if (ScalarKind == Integer && HadDynamicAccess) return nullptr; DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"); // Create and insert the integer alloca. NewTy = IntegerType::get(AI->getContext(), BitWidth); } AllocaInst *NewAI = new AllocaInst(NewTy, nullptr, "", AI->getParent()->begin()); ConvertUsesToScalar(AI, NewAI, 0, nullptr); return NewAI; } /// MergeInTypeForLoadOrStore - Add the 'In' type to the accumulated vector type /// (VectorTy) so far at the offset specified by Offset (which is specified in /// bytes). /// /// There are two cases we handle here: /// 1) A union of vector types of the same size and potentially its elements. /// Here we turn element accesses into insert/extract element operations. /// This promotes a <4 x float> with a store of float to the third element /// into a <4 x float> that uses insert element. /// 2) A fully general blob of memory, which we turn into some (potentially /// large) integer type with extract and insert operations where the loads /// and stores would mutate the memory. We mark this by setting VectorTy /// to VoidTy. void ConvertToScalarInfo::MergeInTypeForLoadOrStore(Type *In, uint64_t Offset) { // If we already decided to turn this into a blob of integer memory, there is // nothing to be done. if (ScalarKind == Integer) return; // If this could be contributing to a vector, analyze it. // If the In type is a vector that is the same size as the alloca, see if it // matches the existing VecTy. if (VectorType *VInTy = dyn_cast<VectorType>(In)) { if (MergeInVectorType(VInTy, Offset)) return; } else if (In->isFloatTy() || In->isDoubleTy() || (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 && isPowerOf2_32(In->getPrimitiveSizeInBits()))) { // Full width accesses can be ignored, because they can always be turned // into bitcasts. unsigned EltSize = In->getPrimitiveSizeInBits()/8; if (EltSize == AllocaSize) return; // If we're accessing something that could be an element of a vector, see // if the implied vector agrees with what we already have and if Offset is // compatible with it. if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 && (!VectorTy || EltSize == VectorTy->getElementType() ->getPrimitiveSizeInBits()/8)) { if (!VectorTy) { ScalarKind = ImplicitVector; VectorTy = VectorType::get(In, AllocaSize/EltSize); } return; } } // Otherwise, we have a case that we can't handle with an optimized vector // form. We can still turn this into a large integer. ScalarKind = Integer; } /// MergeInVectorType - Handles the vector case of MergeInTypeForLoadOrStore, /// returning true if the type was successfully merged and false otherwise. bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy, uint64_t Offset) { if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) { // If we're storing/loading a vector of the right size, allow it as a // vector. If this the first vector we see, remember the type so that // we know the element size. If this is a subsequent access, ignore it // even if it is a differing type but the same size. Worst case we can // bitcast the resultant vectors. if (!VectorTy) VectorTy = VInTy; ScalarKind = Vector; return true; } return false; } /// CanConvertToScalar - V is a pointer. If we can convert the pointee and all /// its accesses to a single vector type, return true and set VecTy to /// the new type. If we could convert the alloca into a single promotable /// integer, return true but set VecTy to VoidTy. Further, if the use is not a /// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset /// is the current offset from the base of the alloca being analyzed. /// /// If we see at least one access to the value that is as a vector type, set the /// SawVec flag. bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset, Value* NonConstantIdx) { for (User *U : V->users()) { Instruction *UI = cast<Instruction>(U); if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { // Don't break volatile loads. if (!LI->isSimple()) return false; // Don't touch MMX operations. if (LI->getType()->isX86_MMXTy()) return false; HadNonMemTransferAccess = true; MergeInTypeForLoadOrStore(LI->getType(), Offset); continue; } if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { // Storing the pointer, not into the value? if (SI->getOperand(0) == V || !SI->isSimple()) return false; // Don't touch MMX operations. if (SI->getOperand(0)->getType()->isX86_MMXTy()) return false; HadNonMemTransferAccess = true; MergeInTypeForLoadOrStore(SI->getOperand(0)->getType(), Offset); continue; } if (BitCastInst *BCI = dyn_cast<BitCastInst>(UI)) { if (!onlyUsedByLifetimeMarkers(BCI)) IsNotTrivial = true; // Can't be mem2reg'd. if (!CanConvertToScalar(BCI, Offset, NonConstantIdx)) return false; continue; } if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UI)) { // If this is a GEP with a variable indices, we can't handle it. PointerType* PtrTy = dyn_cast<PointerType>(GEP->getPointerOperandType()); if (!PtrTy) return false; // Compute the offset that this GEP adds to the pointer. SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); Value *GEPNonConstantIdx = nullptr; if (!GEP->hasAllConstantIndices()) { if (!isa<VectorType>(PtrTy->getElementType())) return false; if (NonConstantIdx) return false; GEPNonConstantIdx = Indices.pop_back_val(); if (!GEPNonConstantIdx->getType()->isIntegerTy(32)) return false; HadDynamicAccess = true; } else GEPNonConstantIdx = NonConstantIdx; uint64_t GEPOffset = DL.getIndexedOffset(PtrTy, Indices); // See if all uses can be converted. if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx)) return false; IsNotTrivial = true; // Can't be mem2reg'd. HadNonMemTransferAccess = true; continue; } // If this is a constant sized memset of a constant value (e.g. 0) we can // handle it. if (MemSetInst *MSI = dyn_cast<MemSetInst>(UI)) { // Store to dynamic index. if (NonConstantIdx) return false; // Store of constant value. if (!isa<ConstantInt>(MSI->getValue())) return false; // Store of constant size. ConstantInt *Len = dyn_cast<ConstantInt>(MSI->getLength()); if (!Len) return false; // If the size differs from the alloca, we can only convert the alloca to // an integer bag-of-bits. // FIXME: This should handle all of the cases that are currently accepted // as vector element insertions. if (Len->getZExtValue() != AllocaSize || Offset != 0) ScalarKind = Integer; IsNotTrivial = true; // Can't be mem2reg'd. HadNonMemTransferAccess = true; continue; } // If this is a memcpy or memmove into or out of the whole allocation, we // can handle it like a load or store of the scalar type. if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(UI)) { // Store to dynamic index. if (NonConstantIdx) return false; ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()); if (!Len || Len->getZExtValue() != AllocaSize || Offset != 0) return false; IsNotTrivial = true; // Can't be mem2reg'd. continue; } // If this is a lifetime intrinsic, we can handle it. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(UI)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) { continue; } } // Otherwise, we cannot handle this! return false; } return true; } /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca /// directly. This happens when we are converting an "integer union" to a /// single integer scalar, or when we are converting a "vector union" to a /// vector with insert/extractelement instructions. /// /// Offset is an offset from the original alloca, in bits that need to be /// shifted to the right. By the end of this, there should be no uses of Ptr. void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset, Value* NonConstantIdx) { while (!Ptr->use_empty()) { Instruction *User = cast<Instruction>(Ptr->user_back()); if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) { ConvertUsesToScalar(CI, NewAI, Offset, NonConstantIdx); CI->eraseFromParent(); continue; } if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { // Compute the offset that this GEP adds to the pointer. SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); Value* GEPNonConstantIdx = nullptr; if (!GEP->hasAllConstantIndices()) { assert(!NonConstantIdx && "Dynamic GEP reading from dynamic GEP unsupported"); GEPNonConstantIdx = Indices.pop_back_val(); } else GEPNonConstantIdx = NonConstantIdx; uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(), Indices); ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx); GEP->eraseFromParent(); continue; } IRBuilder<> Builder(User); if (LoadInst *LI = dyn_cast<LoadInst>(User)) { // The load is a bit extract from NewAI shifted right by Offset bits. Value *LoadedVal = Builder.CreateLoad(NewAI); Value *NewLoadVal = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, NonConstantIdx, Builder); LI->replaceAllUsesWith(NewLoadVal); LI->eraseFromParent(); continue; } if (StoreInst *SI = dyn_cast<StoreInst>(User)) { assert(SI->getOperand(0) != Ptr && "Consistency error!"); Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, NonConstantIdx, Builder); Builder.CreateStore(New, NewAI); SI->eraseFromParent(); // If the load we just inserted is now dead, then the inserted store // overwrote the entire thing. if (Old->use_empty()) Old->eraseFromParent(); continue; } // If this is a constant sized memset of a constant value (e.g. 0) we can // transform it into a store of the expanded constant value. if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) { assert(MSI->getRawDest() == Ptr && "Consistency error!"); assert(!NonConstantIdx && "Cannot replace dynamic memset with insert"); int64_t SNumBytes = cast<ConstantInt>(MSI->getLength())->getSExtValue(); if (SNumBytes > 0 && (SNumBytes >> 32) == 0) { unsigned NumBytes = static_cast<unsigned>(SNumBytes); unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue(); // Compute the value replicated the right number of times. APInt APVal(NumBytes*8, Val); // Splat the value if non-zero. if (Val) for (unsigned i = 1; i != NumBytes; ++i) APVal |= APVal << 8; Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); Value *New = ConvertScalar_InsertValue( ConstantInt::get(User->getContext(), APVal), Old, Offset, nullptr, Builder); Builder.CreateStore(New, NewAI); // If the load we just inserted is now dead, then the memset overwrote // the entire thing. if (Old->use_empty()) Old->eraseFromParent(); } MSI->eraseFromParent(); continue; } // If this is a memcpy or memmove into or out of the whole allocation, we // can handle it like a load or store of the scalar type. if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) { assert(Offset == 0 && "must be store to start of alloca"); assert(!NonConstantIdx && "Cannot replace dynamic transfer with insert"); // If the source and destination are both to the same alloca, then this is // a noop copy-to-self, just delete it. Otherwise, emit a load and store // as appropriate. AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, DL, 0)); if (GetUnderlyingObject(MTI->getSource(), DL, 0) != OrigAI) { // Dest must be OrigAI, change this to be a load from the original // pointer (bitcasted), then a store to our new alloca. assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?"); Value *SrcPtr = MTI->getSource(); PointerType* SPTy = cast<PointerType>(SrcPtr->getType()); PointerType* AIPTy = cast<PointerType>(NewAI->getType()); if (SPTy->getAddressSpace() != AIPTy->getAddressSpace()) { AIPTy = PointerType::get(AIPTy->getElementType(), SPTy->getAddressSpace()); } SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy); LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval"); SrcVal->setAlignment(MTI->getAlignment()); Builder.CreateStore(SrcVal, NewAI); } else if (GetUnderlyingObject(MTI->getDest(), DL, 0) != OrigAI) { // Src must be OrigAI, change this to be a load from NewAI then a store // through the original dest pointer (bitcasted). assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?"); LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval"); PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType()); PointerType* AIPTy = cast<PointerType>(NewAI->getType()); if (DPTy->getAddressSpace() != AIPTy->getAddressSpace()) { AIPTy = PointerType::get(AIPTy->getElementType(), DPTy->getAddressSpace()); } Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy); StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr); NewStore->setAlignment(MTI->getAlignment()); } else { // Noop transfer. Src == Dst } MTI->eraseFromParent(); continue; } if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) { // There's no need to preserve these, as the resulting alloca will be // converted to a register anyways. II->eraseFromParent(); continue; } } llvm_unreachable("Unsupported operation!"); } } /// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer /// or vector value FromVal, extracting the bits from the offset specified by /// Offset. This returns the value, which is of type ToType. /// /// This happens when we are converting an "integer union" to a single /// integer scalar, or when we are converting a "vector union" to a vector with /// insert/extractelement instructions. /// /// Offset is an offset from the original alloca, in bits that need to be /// shifted to the right. Value *ConvertToScalarInfo:: ConvertScalar_ExtractValue(Value *FromVal, Type *ToType, uint64_t Offset, Value* NonConstantIdx, IRBuilder<> &Builder) { // If the load is of the whole new alloca, no conversion is needed. Type *FromType = FromVal->getType(); if (FromType == ToType && Offset == 0) return FromVal; // If the result alloca is a vector type, this is either an element // access or a bitcast to another vector type of the same size. if (VectorType *VTy = dyn_cast<VectorType>(FromType)) { unsigned FromTypeSize = DL.getTypeAllocSize(FromType); unsigned ToTypeSize = DL.getTypeAllocSize(ToType); if (FromTypeSize == ToTypeSize) return Builder.CreateBitCast(FromVal, ToType); // Otherwise it must be an element access. unsigned Elt = 0; if (Offset) { unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType()); Elt = Offset/EltSize; assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); } // Return the element extracted out of it. Value *Idx; if (NonConstantIdx) { if (Elt) Idx = Builder.CreateAdd(NonConstantIdx, Builder.getInt32(Elt), "dyn.offset"); else Idx = NonConstantIdx; } else Idx = Builder.getInt32(Elt); Value *V = Builder.CreateExtractElement(FromVal, Idx); if (V->getType() != ToType) V = Builder.CreateBitCast(V, ToType); return V; } // If ToType is a first class aggregate, extract out each of the pieces and // use insertvalue's to form the FCA. if (StructType *ST = dyn_cast<StructType>(ToType)) { assert(!NonConstantIdx && "Dynamic indexing into struct types not supported"); const StructLayout &Layout = *DL.getStructLayout(ST); Value *Res = UndefValue::get(ST); for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), Offset+Layout.getElementOffsetInBits(i), nullptr, Builder); Res = Builder.CreateInsertValue(Res, Elt, i); } return Res; } if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) { assert(!NonConstantIdx && "Dynamic indexing into array types not supported"); uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType()); Value *Res = UndefValue::get(AT); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), Offset+i*EltSize, nullptr, Builder); Res = Builder.CreateInsertValue(Res, Elt, i); } return Res; } // Otherwise, this must be a union that was converted to an integer value. IntegerType *NTy = cast<IntegerType>(FromVal->getType()); // If this is a big-endian system and the load is narrower than the // full alloca type, we need to do a shift to get the right bits. int ShAmt = 0; if (DL.isBigEndian()) { // On big-endian machines, the lowest bit is stored at the bit offset // from the pointer given by getTypeStoreSizeInBits. This matters for // integers with a bitwidth that is not a multiple of 8. ShAmt = DL.getTypeStoreSizeInBits(NTy) - DL.getTypeStoreSizeInBits(ToType) - Offset; } else { ShAmt = Offset; } // Note: we support negative bitwidths (with shl) which are not defined. // We do this to support (f.e.) loads off the end of a structure where // only some bits are used. if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) FromVal = Builder.CreateLShr(FromVal, ConstantInt::get(FromVal->getType(), ShAmt)); else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) FromVal = Builder.CreateShl(FromVal, ConstantInt::get(FromVal->getType(), -ShAmt)); // Finally, unconditionally truncate the integer to the right width. unsigned LIBitWidth = DL.getTypeSizeInBits(ToType); if (LIBitWidth < NTy->getBitWidth()) FromVal = Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(), LIBitWidth)); else if (LIBitWidth > NTy->getBitWidth()) FromVal = Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(), LIBitWidth)); // If the result is an integer, this is a trunc or bitcast. if (ToType->isIntegerTy()) { // Should be done. } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) { // Just do a bitcast, we know the sizes match up. FromVal = Builder.CreateBitCast(FromVal, ToType); } else { // Otherwise must be a pointer. FromVal = Builder.CreateIntToPtr(FromVal, ToType); } assert(FromVal->getType() == ToType && "Didn't convert right?"); return FromVal; } /// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer /// or vector value "Old" at the offset specified by Offset. /// /// This happens when we are converting an "integer union" to a /// single integer scalar, or when we are converting a "vector union" to a /// vector with insert/extractelement instructions. /// /// Offset is an offset from the original alloca, in bits that need to be /// shifted to the right. /// /// NonConstantIdx is an index value if there was a GEP with a non-constant /// index value. If this is 0 then all GEPs used to find this insert address /// are constant. Value *ConvertToScalarInfo:: ConvertScalar_InsertValue(Value *SV, Value *Old, uint64_t Offset, Value* NonConstantIdx, IRBuilder<> &Builder) { // Convert the stored type to the actual type, shift it left to insert // then 'or' into place. Type *AllocaType = Old->getType(); LLVMContext &Context = Old->getContext(); if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy); uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType()); // Changing the whole vector with memset or with an access of a different // vector type? if (ValSize == VecSize) return Builder.CreateBitCast(SV, AllocaType); // Must be an element insertion. Type *EltTy = VTy->getElementType(); if (SV->getType() != EltTy) SV = Builder.CreateBitCast(SV, EltTy); uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy); unsigned Elt = Offset/EltSize; Value *Idx; if (NonConstantIdx) { if (Elt) Idx = Builder.CreateAdd(NonConstantIdx, Builder.getInt32(Elt), "dyn.offset"); else Idx = NonConstantIdx; } else Idx = Builder.getInt32(Elt); return Builder.CreateInsertElement(Old, SV, Idx); } // If SV is a first-class aggregate value, insert each value recursively. if (StructType *ST = dyn_cast<StructType>(SV->getType())) { assert(!NonConstantIdx && "Dynamic indexing into struct types not supported"); const StructLayout &Layout = *DL.getStructLayout(ST); for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { Value *Elt = Builder.CreateExtractValue(SV, i); Old = ConvertScalar_InsertValue(Elt, Old, Offset+Layout.getElementOffsetInBits(i), nullptr, Builder); } return Old; } if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { assert(!NonConstantIdx && "Dynamic indexing into array types not supported"); uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType()); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { Value *Elt = Builder.CreateExtractValue(SV, i); Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, nullptr, Builder); } return Old; } // If SV is a float, convert it to the appropriate integer type. // If it is a pointer, do the same. unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType()); unsigned DestWidth = DL.getTypeSizeInBits(AllocaType); unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType()); unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType); if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy()) SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth)); else if (SV->getType()->isPointerTy()) SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType())); // Zero extend or truncate the value if needed. if (SV->getType() != AllocaType) { if (SV->getType()->getPrimitiveSizeInBits() < AllocaType->getPrimitiveSizeInBits()) SV = Builder.CreateZExt(SV, AllocaType); else { // Truncation may be needed if storing more than the alloca can hold // (undefined behavior). SV = Builder.CreateTrunc(SV, AllocaType); SrcWidth = DestWidth; SrcStoreWidth = DestStoreWidth; } } // If this is a big-endian system and the store is narrower than the // full alloca type, we need to do a shift to get the right bits. int ShAmt = 0; if (DL.isBigEndian()) { // On big-endian machines, the lowest bit is stored at the bit offset // from the pointer given by getTypeStoreSizeInBits. This matters for // integers with a bitwidth that is not a multiple of 8. ShAmt = DestStoreWidth - SrcStoreWidth - Offset; } else { ShAmt = Offset; } // Note: we support negative bitwidths (with shr) which are not defined. // We do this to support (f.e.) stores off the end of a structure where // only some bits in the structure are set. APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(), ShAmt)); Mask <<= ShAmt; } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(), -ShAmt)); Mask = Mask.lshr(-ShAmt); } // Mask out the bits we are about to insert from the old value, and or // in the new bits. if (SrcWidth != DestWidth) { assert(DestWidth > SrcWidth); Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask"); SV = Builder.CreateOr(Old, SV, "ins"); } return SV; } //===----------------------------------------------------------------------===// // SRoA Driver //===----------------------------------------------------------------------===// bool SROA::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; bool Changed = performPromotion(F); while (1) { bool LocalChange = performScalarRepl(F); if (!LocalChange) break; // No need to repromote if no scalarrepl Changed = true; LocalChange = performPromotion(F); if (!LocalChange) break; // No need to re-scalarrepl if no promotion } return Changed; } namespace { class AllocaPromoter : public LoadAndStorePromoter { AllocaInst *AI; DIBuilder *DIB; SmallVector<DbgDeclareInst *, 4> DDIs; SmallVector<DbgValueInst *, 4> DVIs; public: AllocaPromoter(ArrayRef<Instruction*> Insts, SSAUpdater &S, DIBuilder *DB) : LoadAndStorePromoter(Insts, S), AI(nullptr), DIB(DB) {} void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) { // Remember which alloca we're promoting (for isInstInList). this->AI = AI; if (auto *L = LocalAsMetadata::getIfExists(AI)) { if (auto *DINode = MetadataAsValue::getIfExists(AI->getContext(), L)) { for (User *U : DINode->users()) if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) DDIs.push_back(DDI); else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) DVIs.push_back(DVI); } } LoadAndStorePromoter::run(Insts); AI->eraseFromParent(); for (SmallVectorImpl<DbgDeclareInst *>::iterator I = DDIs.begin(), E = DDIs.end(); I != E; ++I) { DbgDeclareInst *DDI = *I; DDI->eraseFromParent(); } for (SmallVectorImpl<DbgValueInst *>::iterator I = DVIs.begin(), E = DVIs.end(); I != E; ++I) { DbgValueInst *DVI = *I; DVI->eraseFromParent(); } } bool isInstInList(Instruction *I, const SmallVectorImpl<Instruction*> &Insts) const override { if (LoadInst *LI = dyn_cast<LoadInst>(I)) return LI->getOperand(0) == AI; return cast<StoreInst>(I)->getPointerOperand() == AI; } void updateDebugInfo(Instruction *Inst) const override { for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(), E = DDIs.end(); I != E; ++I) { DbgDeclareInst *DDI = *I; if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) ConvertDebugDeclareToDebugValue(DDI, SI, *DIB); else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) ConvertDebugDeclareToDebugValue(DDI, LI, *DIB); } for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(), E = DVIs.end(); I != E; ++I) { DbgValueInst *DVI = *I; Value *Arg = nullptr; if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { // If an argument is zero extended then use argument directly. The ZExt // may be zapped by an optimization pass in future. if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) Arg = dyn_cast<Argument>(ZExt->getOperand(0)); if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) Arg = dyn_cast<Argument>(SExt->getOperand(0)); if (!Arg) Arg = SI->getOperand(0); } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { Arg = LI->getOperand(0); } else { continue; } DIB->insertDbgValueIntrinsic(Arg, 0, DVI->getVariable(), DVI->getExpression(), DVI->getDebugLoc(), Inst); } } }; } // end anon namespace /// isSafeSelectToSpeculate - Select instructions that use an alloca and are /// subsequently loaded can be rewritten to load both input pointers and then /// select between the result, allowing the load of the alloca to be promoted. /// From this: /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other /// %V = load i32* %P2 /// to: /// %V1 = load i32* %Alloca -> will be mem2reg'd /// %V2 = load i32* %Other /// %V = select i1 %cond, i32 %V1, i32 %V2 /// /// We can do this to a select if its only uses are loads and if the operand to /// the select can be loaded unconditionally. static bool isSafeSelectToSpeculate(SelectInst *SI) { const DataLayout &DL = SI->getModule()->getDataLayout(); bool TDerefable = isDereferenceablePointer(SI->getTrueValue(), DL); bool FDerefable = isDereferenceablePointer(SI->getFalseValue(), DL); for (User *U : SI->users()) { LoadInst *LI = dyn_cast<LoadInst>(U); if (!LI || !LI->isSimple()) return false; // Both operands to the select need to be dereferencable, either absolutely // (e.g. allocas) or at this point because we can see other accesses to it. if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI, LI->getAlignment())) return false; if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI, LI->getAlignment())) return false; } return true; } /// isSafePHIToSpeculate - PHI instructions that use an alloca and are /// subsequently loaded can be rewritten to load both input pointers in the pred /// blocks and then PHI the results, allowing the load of the alloca to be /// promoted. /// From this: /// %P2 = phi [i32* %Alloca, i32* %Other] /// %V = load i32* %P2 /// to: /// %V1 = load i32* %Alloca -> will be mem2reg'd /// ... /// %V2 = load i32* %Other /// ... /// %V = phi [i32 %V1, i32 %V2] /// /// We can do this to a select if its only uses are loads and if the operand to /// the select can be loaded unconditionally. static bool isSafePHIToSpeculate(PHINode *PN) { // For now, we can only do this promotion if the load is in the same block as // the PHI, and if there are no stores between the phi and load. // TODO: Allow recursive phi users. // TODO: Allow stores. BasicBlock *BB = PN->getParent(); unsigned MaxAlign = 0; for (User *U : PN->users()) { LoadInst *LI = dyn_cast<LoadInst>(U); if (!LI || !LI->isSimple()) return false; // For now we only allow loads in the same block as the PHI. This is a // common case that happens when instcombine merges two loads through a PHI. if (LI->getParent() != BB) return false; // Ensure that there are no instructions between the PHI and the load that // could store. for (BasicBlock::iterator BBI = PN; &*BBI != LI; ++BBI) if (BBI->mayWriteToMemory()) return false; MaxAlign = std::max(MaxAlign, LI->getAlignment()); } const DataLayout &DL = PN->getModule()->getDataLayout(); // Okay, we know that we have one or more loads in the same block as the PHI. // We can transform this if it is safe to push the loads into the predecessor // blocks. The only thing to watch out for is that we can't put a possibly // trapping load in the predecessor if it is a critical edge. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { BasicBlock *Pred = PN->getIncomingBlock(i); Value *InVal = PN->getIncomingValue(i); // If the terminator of the predecessor has side-effects (an invoke), // there is no safe place to put a load in the predecessor. if (Pred->getTerminator()->mayHaveSideEffects()) return false; // If the value is produced by the terminator of the predecessor // (an invoke), there is no valid place to put a load in the predecessor. if (Pred->getTerminator() == InVal) return false; // If the predecessor has a single successor, then the edge isn't critical. if (Pred->getTerminator()->getNumSuccessors() == 1) continue; // If this pointer is always safe to load, or if we can prove that there is // already a load in the block, then we can move the load to the pred block. if (isDereferenceablePointer(InVal, DL) || isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign)) continue; return false; } return true; } /// tryToMakeAllocaBePromotable - This returns true if the alloca only has /// direct (non-volatile) loads and stores to it. If the alloca is close but /// not quite there, this will transform the code to allow promotion. As such, /// it is a non-pure predicate. static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout &DL) { SetVector<Instruction*, SmallVector<Instruction*, 4>, SmallPtrSet<Instruction*, 4> > InstsToRewrite; for (User *U : AI->users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (!LI->isSimple()) return false; continue; } if (StoreInst *SI = dyn_cast<StoreInst>(U)) { if (SI->getOperand(0) == AI || !SI->isSimple()) return false; // Don't allow a store OF the AI, only INTO the AI. continue; } if (SelectInst *SI = dyn_cast<SelectInst>(U)) { // If the condition being selected on is a constant, fold the select, yes // this does (rarely) happen early on. if (ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition())) { Value *Result = SI->getOperand(1+CI->isZero()); SI->replaceAllUsesWith(Result); SI->eraseFromParent(); // This is very rare and we just scrambled the use list of AI, start // over completely. return tryToMakeAllocaBePromotable(AI, DL); } // If it is safe to turn "load (select c, AI, ptr)" into a select of two // loads, then we can transform this by rewriting the select. if (!isSafeSelectToSpeculate(SI)) return false; InstsToRewrite.insert(SI); continue; } if (PHINode *PN = dyn_cast<PHINode>(U)) { if (PN->use_empty()) { // Dead PHIs can be stripped. InstsToRewrite.insert(PN); continue; } // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads // in the pred blocks, then we can transform this by rewriting the PHI. if (!isSafePHIToSpeculate(PN)) return false; InstsToRewrite.insert(PN); continue; } if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { if (onlyUsedByLifetimeMarkers(BCI)) { InstsToRewrite.insert(BCI); continue; } } return false; } // If there are no instructions to rewrite, then all uses are load/stores and // we're done! if (InstsToRewrite.empty()) return true; // If we have instructions that need to be rewritten for this to be promotable // take care of it now. for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) { if (BitCastInst *BCI = dyn_cast<BitCastInst>(InstsToRewrite[i])) { // This could only be a bitcast used by nothing but lifetime intrinsics. for (BitCastInst::user_iterator I = BCI->user_begin(), E = BCI->user_end(); I != E;) cast<Instruction>(*I++)->eraseFromParent(); BCI->eraseFromParent(); continue; } if (SelectInst *SI = dyn_cast<SelectInst>(InstsToRewrite[i])) { // Selects in InstsToRewrite only have load uses. Rewrite each as two // loads with a new select. while (!SI->use_empty()) { LoadInst *LI = cast<LoadInst>(SI->user_back()); IRBuilder<> Builder(LI); LoadInst *TrueLoad = Builder.CreateLoad(SI->getTrueValue(), LI->getName()+".t"); LoadInst *FalseLoad = Builder.CreateLoad(SI->getFalseValue(), LI->getName()+".f"); // Transfer alignment and AA info if present. TrueLoad->setAlignment(LI->getAlignment()); FalseLoad->setAlignment(LI->getAlignment()); AAMDNodes Tags; LI->getAAMetadata(Tags); if (Tags) { TrueLoad->setAAMetadata(Tags); FalseLoad->setAAMetadata(Tags); } Value *V = Builder.CreateSelect(SI->getCondition(), TrueLoad, FalseLoad); V->takeName(LI); LI->replaceAllUsesWith(V); LI->eraseFromParent(); } // Now that all the loads are gone, the select is gone too. SI->eraseFromParent(); continue; } // Otherwise, we have a PHI node which allows us to push the loads into the // predecessors. PHINode *PN = cast<PHINode>(InstsToRewrite[i]); if (PN->use_empty()) { PN->eraseFromParent(); continue; } Type *LoadTy = cast<PointerType>(PN->getType())->getElementType(); PHINode *NewPN = PHINode::Create(LoadTy, PN->getNumIncomingValues(), PN->getName()+".ld", PN); // Get the AA tags and alignment to use from one of the loads. It doesn't // matter which one we get and if any differ, it doesn't matter. LoadInst *SomeLoad = cast<LoadInst>(PN->user_back()); AAMDNodes AATags; SomeLoad->getAAMetadata(AATags); unsigned Align = SomeLoad->getAlignment(); // Rewrite all loads of the PN to use the new PHI. while (!PN->use_empty()) { LoadInst *LI = cast<LoadInst>(PN->user_back()); LI->replaceAllUsesWith(NewPN); LI->eraseFromParent(); } // Inject loads into all of the pred blocks. Keep track of which blocks we // insert them into in case we have multiple edges from the same block. DenseMap<BasicBlock*, LoadInst*> InsertedLoads; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { BasicBlock *Pred = PN->getIncomingBlock(i); LoadInst *&Load = InsertedLoads[Pred]; if (!Load) { Load = new LoadInst(PN->getIncomingValue(i), PN->getName() + "." + Pred->getName(), Pred->getTerminator()); Load->setAlignment(Align); if (AATags) Load->setAAMetadata(AATags); } NewPN->addIncoming(Load, Pred); } PN->eraseFromParent(); } ++NumAdjusted; return true; } bool SROA::performPromotion(Function &F) { std::vector<AllocaInst*> Allocas; const DataLayout &DL = F.getParent()->getDataLayout(); DominatorTree *DT = nullptr; if (HasDomTree) DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); AssumptionCache &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); bool Changed = false; SmallVector<Instruction*, 64> Insts; while (1) { Allocas.clear(); // Find allocas that are safe to promote, by looking at all instructions in // the entry node for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? if (tryToMakeAllocaBePromotable(AI, DL)) Allocas.push_back(AI); if (Allocas.empty()) break; if (HasDomTree) PromoteMemToReg(Allocas, *DT, nullptr, &AC); else { SSAUpdater SSA; for (unsigned i = 0, e = Allocas.size(); i != e; ++i) { AllocaInst *AI = Allocas[i]; // Build list of instructions to promote. for (User *U : AI->users()) Insts.push_back(cast<Instruction>(U)); AllocaPromoter(Insts, SSA, &DIB).run(AI, Insts); Insts.clear(); } } NumPromoted += Allocas.size(); Changed = true; } return Changed; } /// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for /// SROA. It must be a struct or array type with a small number of elements. bool SROA::ShouldAttemptScalarRepl(AllocaInst *AI) { Type *T = AI->getAllocatedType(); // Do not promote any struct that has too many members. if (StructType *ST = dyn_cast<StructType>(T)) return ST->getNumElements() <= StructMemberThreshold; // Do not promote any array that has too many elements. if (ArrayType *AT = dyn_cast<ArrayType>(T)) return AT->getNumElements() <= ArrayElementThreshold; return false; } // performScalarRepl - This algorithm is a simple worklist driven algorithm, // which runs on all of the alloca instructions in the entry block, removing // them if they are only used by getelementptr instructions. // bool SROA::performScalarRepl(Function &F) { std::vector<AllocaInst*> WorkList; const DataLayout &DL = F.getParent()->getDataLayout(); // Scan the entry basic block, adding allocas to the worklist. BasicBlock &BB = F.getEntryBlock(); for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) if (AllocaInst *A = dyn_cast<AllocaInst>(I)) WorkList.push_back(A); // Process the worklist bool Changed = false; while (!WorkList.empty()) { AllocaInst *AI = WorkList.back(); WorkList.pop_back(); // Handle dead allocas trivially. These can be formed by SROA'ing arrays // with unused elements. if (AI->use_empty()) { AI->eraseFromParent(); Changed = true; continue; } // If this alloca is impossible for us to promote, reject it early. if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) continue; // Check to see if we can perform the core SROA transformation. We cannot // transform the allocation instruction if it is an array allocation // (allocations OF arrays are ok though), and an allocation of a scalar // value cannot be decomposed at all. uint64_t AllocaSize = DL.getTypeAllocSize(AI->getAllocatedType()); // Do not promote [0 x %struct]. if (AllocaSize == 0) continue; // Do not promote any struct whose size is too big. if (AllocaSize > SRThreshold) continue; // If the alloca looks like a good candidate for scalar replacement, and if // all its users can be transformed, then split up the aggregate into its // separate elements. if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) { DoScalarReplacement(AI, WorkList); Changed = true; continue; } // If we can turn this aggregate value (potentially with casts) into a // simple scalar value that can be mem2reg'd into a register value. // IsNotTrivial tracks whether this is something that mem2reg could have // promoted itself. If so, we don't want to transform it needlessly. Note // that we can't just check based on the type: the alloca may be of an i32 // but that has pointer arithmetic to set byte 3 of it or something. if (AllocaInst *NewAI = ConvertToScalarInfo((unsigned)AllocaSize, DL, ScalarLoadThreshold) .TryConvert(AI)) { NewAI->takeName(AI); AI->eraseFromParent(); ++NumConverted; Changed = true; continue; } // Otherwise, couldn't process this alloca. } return Changed; } /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl /// predicate, do SROA now. void SROA::DoScalarReplacement(AllocaInst *AI, std::vector<AllocaInst*> &WorkList) { DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n'); SmallVector<AllocaInst*, 32> ElementAllocas; if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { ElementAllocas.reserve(ST->getNumContainedTypes()); for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { AllocaInst *NA = new AllocaInst(ST->getContainedType(i), nullptr, AI->getAlignment(), AI->getName() + "." + Twine(i), AI); ElementAllocas.push_back(NA); WorkList.push_back(NA); // Add to worklist for recursive processing } } else { ArrayType *AT = cast<ArrayType>(AI->getAllocatedType()); ElementAllocas.reserve(AT->getNumElements()); Type *ElTy = AT->getElementType(); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { AllocaInst *NA = new AllocaInst(ElTy, nullptr, AI->getAlignment(), AI->getName() + "." + Twine(i), AI); ElementAllocas.push_back(NA); WorkList.push_back(NA); // Add to worklist for recursive processing } } // Now that we have created the new alloca instructions, rewrite all the // uses of the old alloca. RewriteForScalarRepl(AI, AI, 0, ElementAllocas); // Now erase any instructions that were made dead while rewriting the alloca. DeleteDeadInstructions(); AI->eraseFromParent(); ++NumReplaced; } /// DeleteDeadInstructions - Erase instructions on the DeadInstrs list, /// recursively including all their operands that become trivially dead. void SROA::DeleteDeadInstructions() { while (!DeadInsts.empty()) { Instruction *I = cast<Instruction>(DeadInsts.pop_back_val()); for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) if (Instruction *U = dyn_cast<Instruction>(*OI)) { // Zero out the operand and see if it becomes trivially dead. // (But, don't add allocas to the dead instruction list -- they are // already on the worklist and will be deleted separately.) *OI = nullptr; if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U)) DeadInsts.push_back(U); } I->eraseFromParent(); } } /// isSafeForScalarRepl - Check if instruction I is a safe use with regard to /// performing scalar replacement of alloca AI. The results are flagged in /// the Info parameter. Offset indicates the position within AI that is /// referenced by this instruction. void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info) { const DataLayout &DL = I->getModule()->getDataLayout(); for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { isSafeForScalarRepl(BC, Offset, Info); } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { uint64_t GEPOffset = Offset; isSafeGEP(GEPI, GEPOffset, Info); if (!Info.isUnsafe) isSafeForScalarRepl(GEPI, GEPOffset, Info); } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); if (!Length || Length->isNegative()) return MarkUnsafe(Info, User); isSafeMemAccess(Offset, Length->getZExtValue(), nullptr, U.getOperandNo() == 0, Info, MI, true /*AllowWholeAccess*/); } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { if (!LI->isSimple()) return MarkUnsafe(Info, User); Type *LIType = LI->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info, LI, true /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { // Store is ok if storing INTO the pointer, not storing the pointer if (!SI->isSimple() || SI->getOperand(0) == I) return MarkUnsafe(Info, User); Type *SIType = SI->getOperand(0)->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info, SI, true /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { if (II->getIntrinsicID() != Intrinsic::lifetime_start && II->getIntrinsicID() != Intrinsic::lifetime_end) return MarkUnsafe(Info, User); } else if (isa<PHINode>(User) || isa<SelectInst>(User)) { isSafePHISelectUseForScalarRepl(User, Offset, Info); } else { return MarkUnsafe(Info, User); } if (Info.isUnsafe) return; } } /// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer /// derived from the alloca, we can often still split the alloca into elements. /// This is useful if we have a large alloca where one element is phi'd /// together somewhere: we can SRoA and promote all the other elements even if /// we end up not being able to promote this one. /// /// All we require is that the uses of the PHI do not index into other parts of /// the alloca. The most important use case for this is single load and stores /// that are PHI'd together, which can happen due to code sinking. void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info) { // If we've already checked this PHI, don't do it again. if (PHINode *PN = dyn_cast<PHINode>(I)) if (!Info.CheckedPHIs.insert(PN).second) return; const DataLayout &DL = I->getModule()->getDataLayout(); for (User *U : I->users()) { Instruction *UI = cast<Instruction>(U); if (BitCastInst *BC = dyn_cast<BitCastInst>(UI)) { isSafePHISelectUseForScalarRepl(BC, Offset, Info); } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { // Only allow "bitcast" GEPs for simplicity. We could generalize this, // but would have to prove that we're staying inside of an element being // promoted. if (!GEPI->hasAllZeroIndices()) return MarkUnsafe(Info, UI); isSafePHISelectUseForScalarRepl(GEPI, Offset, Info); } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { if (!LI->isSimple()) return MarkUnsafe(Info, UI); Type *LIType = LI->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(LIType), LIType, false, Info, LI, false /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { // Store is ok if storing INTO the pointer, not storing the pointer if (!SI->isSimple() || SI->getOperand(0) == I) return MarkUnsafe(Info, UI); Type *SIType = SI->getOperand(0)->getType(); isSafeMemAccess(Offset, DL.getTypeAllocSize(SIType), SIType, true, Info, SI, false /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) { isSafePHISelectUseForScalarRepl(UI, Offset, Info); } else { return MarkUnsafe(Info, UI); } if (Info.isUnsafe) return; } } /// isSafeGEP - Check if a GEP instruction can be handled for scalar /// replacement. It is safe when all the indices are constant, in-bounds /// references, and when the resulting offset corresponds to an element within /// the alloca type. The results are flagged in the Info parameter. Upon /// return, Offset is adjusted as specified by the GEP indices. void SROA::isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info) { gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI); if (GEPIt == E) return; bool NonConstant = false; unsigned NonConstantIdxSize = 0; // Walk through the GEP type indices, checking the types that this indexes // into. for (; GEPIt != E; ++GEPIt) { // Ignore struct elements, no extra checking needed for these. if ((*GEPIt)->isStructTy()) continue; ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); if (!IdxVal) return MarkUnsafe(Info, GEPI); } // Compute the offset due to this GEP and check if the alloca has a // component element at that offset. SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); // If this GEP is non-constant then the last operand must have been a // dynamic index into a vector. Pop this now as it has no impact on the // constant part of the offset. if (NonConstant) Indices.pop_back(); const DataLayout &DL = GEPI->getModule()->getDataLayout(); Offset += DL.getIndexedOffset(GEPI->getPointerOperandType(), Indices); if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, NonConstantIdxSize, DL)) MarkUnsafe(Info, GEPI); } /// isHomogeneousAggregate - Check if type T is a struct or array containing /// elements of the same type (which is always true for arrays). If so, /// return true with NumElts and EltTy set to the number of elements and the /// element type, respectively. static bool isHomogeneousAggregate(Type *T, unsigned &NumElts, Type *&EltTy) { if (ArrayType *AT = dyn_cast<ArrayType>(T)) { NumElts = AT->getNumElements(); EltTy = (NumElts == 0 ? nullptr : AT->getElementType()); return true; } if (StructType *ST = dyn_cast<StructType>(T)) { NumElts = ST->getNumContainedTypes(); EltTy = (NumElts == 0 ? nullptr : ST->getContainedType(0)); for (unsigned n = 1; n < NumElts; ++n) { if (ST->getContainedType(n) != EltTy) return false; } return true; } return false; } /// isCompatibleAggregate - Check if T1 and T2 are either the same type or are /// "homogeneous" aggregates with the same element type and number of elements. static bool isCompatibleAggregate(Type *T1, Type *T2) { if (T1 == T2) return true; unsigned NumElts1, NumElts2; Type *EltTy1, *EltTy2; if (isHomogeneousAggregate(T1, NumElts1, EltTy1) && isHomogeneousAggregate(T2, NumElts2, EltTy2) && NumElts1 == NumElts2 && EltTy1 == EltTy2) return true; return false; } /// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI /// alloca or has an offset and size that corresponds to a component element /// within it. The offset checked here may have been formed from a GEP with a /// pointer bitcasted to a different type. /// /// If AllowWholeAccess is true, then this allows uses of the entire alloca as a /// unit. If false, it only allows accesses known to be in a single element. void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize, Type *MemOpType, bool isStore, AllocaInfo &Info, Instruction *TheAccess, bool AllowWholeAccess) { const DataLayout &DL = TheAccess->getModule()->getDataLayout(); // Check if this is a load/store of the entire alloca. if (Offset == 0 && AllowWholeAccess && MemSize == DL.getTypeAllocSize(Info.AI->getAllocatedType())) { // This can be safe for MemIntrinsics (where MemOpType is 0) and integer // loads/stores (which are essentially the same as the MemIntrinsics with // regard to copying padding between elements). But, if an alloca is // flagged as both a source and destination of such operations, we'll need // to check later for padding between elements. if (!MemOpType || MemOpType->isIntegerTy()) { if (isStore) Info.isMemCpyDst = true; else Info.isMemCpySrc = true; return; } // This is also safe for references using a type that is compatible with // the type of the alloca, so that loads/stores can be rewritten using // insertvalue/extractvalue. if (isCompatibleAggregate(MemOpType, Info.AI->getAllocatedType())) { Info.hasSubelementAccess = true; return; } } // Check if the offset/size correspond to a component within the alloca type. Type *T = Info.AI->getAllocatedType(); if (TypeHasComponent(T, Offset, MemSize, DL)) { Info.hasSubelementAccess = true; return; } return MarkUnsafe(Info, TheAccess); } /// TypeHasComponent - Return true if T has a component type with the /// specified offset and size. If Size is zero, do not check the size. bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size, const DataLayout &DL) { Type *EltTy; uint64_t EltSize; if (StructType *ST = dyn_cast<StructType>(T)) { const StructLayout *Layout = DL.getStructLayout(ST); unsigned EltIdx = Layout->getElementContainingOffset(Offset); EltTy = ST->getContainedType(EltIdx); EltSize = DL.getTypeAllocSize(EltTy); Offset -= Layout->getElementOffset(EltIdx); } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) { EltTy = AT->getElementType(); EltSize = DL.getTypeAllocSize(EltTy); if (Offset >= AT->getNumElements() * EltSize) return false; Offset %= EltSize; } else if (VectorType *VT = dyn_cast<VectorType>(T)) { EltTy = VT->getElementType(); EltSize = DL.getTypeAllocSize(EltTy); if (Offset >= VT->getNumElements() * EltSize) return false; Offset %= EltSize; } else { return false; } if (Offset == 0 && (Size == 0 || EltSize == Size)) return true; // Check if the component spans multiple elements. if (Offset + Size > EltSize) return false; return TypeHasComponent(EltTy, Offset, Size, DL); } /// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite /// the instruction I, which references it, to use the separate elements. /// Offset indicates the position within AI that is referenced by this /// instruction. void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts) { const DataLayout &DL = I->getModule()->getDataLayout(); for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) { Use &TheUse = *UI++; Instruction *User = cast<Instruction>(TheUse.getUser()); if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { RewriteBitCast(BC, AI, Offset, NewElts); continue; } if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { RewriteGEP(GEPI, AI, Offset, NewElts); continue; } if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); uint64_t MemSize = Length->getZExtValue(); if (Offset == 0 && MemSize == DL.getTypeAllocSize(AI->getAllocatedType())) RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts); // Otherwise the intrinsic can only touch a single element and the // address operand will be updated, so nothing else needs to be done. continue; } if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) { RewriteLifetimeIntrinsic(II, AI, Offset, NewElts); } continue; } if (LoadInst *LI = dyn_cast<LoadInst>(User)) { Type *LIType = LI->getType(); if (isCompatibleAggregate(LIType, AI->getAllocatedType())) { // Replace: // %res = load { i32, i32 }* %alloc // with: // %load.0 = load i32* %alloc.0 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 // %load.1 = load i32* %alloc.1 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 // (Also works for arrays instead of structs) Value *Insert = UndefValue::get(LIType); IRBuilder<> Builder(LI); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *Load = Builder.CreateLoad(NewElts[i], "load"); Insert = Builder.CreateInsertValue(Insert, Load, i, "insert"); } LI->replaceAllUsesWith(Insert); DeadInsts.push_back(LI); } else if (LIType->isIntegerTy() && DL.getTypeAllocSize(LIType) == DL.getTypeAllocSize(AI->getAllocatedType())) { // If this is a load of the entire alloca to an integer, rewrite it. RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); } continue; } if (StoreInst *SI = dyn_cast<StoreInst>(User)) { Value *Val = SI->getOperand(0); Type *SIType = Val->getType(); if (isCompatibleAggregate(SIType, AI->getAllocatedType())) { // Replace: // store { i32, i32 } %val, { i32, i32 }* %alloc // with: // %val.0 = extractvalue { i32, i32 } %val, 0 // store i32 %val.0, i32* %alloc.0 // %val.1 = extractvalue { i32, i32 } %val, 1 // store i32 %val.1, i32* %alloc.1 // (Also works for arrays instead of structs) IRBuilder<> Builder(SI); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { Value *Extract = Builder.CreateExtractValue(Val, i, Val->getName()); Builder.CreateStore(Extract, NewElts[i]); } DeadInsts.push_back(SI); } else if (SIType->isIntegerTy() && DL.getTypeAllocSize(SIType) == DL.getTypeAllocSize(AI->getAllocatedType())) { // If this is a store of the entire alloca from an integer, rewrite it. RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); } continue; } if (isa<SelectInst>(User) || isa<PHINode>(User)) { // If we have a PHI user of the alloca itself (as opposed to a GEP or // bitcast) we have to rewrite it. GEP and bitcast uses will be RAUW'd to // the new pointer. if (!isa<AllocaInst>(I)) continue; assert(Offset == 0 && NewElts[0] && "Direct alloca use should have a zero offset"); // If we have a use of the alloca, we know the derived uses will be // utilizing just the first element of the scalarized result. Insert a // bitcast of the first alloca before the user as required. AllocaInst *NewAI = NewElts[0]; BitCastInst *BCI = new BitCastInst(NewAI, AI->getType(), "", NewAI); NewAI->moveBefore(BCI); TheUse = BCI; continue; } } } /// RewriteBitCast - Update a bitcast reference to the alloca being replaced /// and recursively continue updating all of its uses. void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts) { RewriteForScalarRepl(BC, AI, Offset, NewElts); if (BC->getOperand(0) != AI) return; // The bitcast references the original alloca. Replace its uses with // references to the alloca containing offset zero (which is normally at // index zero, but might not be in cases involving structs with elements // of size zero). Type *T = AI->getAllocatedType(); uint64_t EltOffset = 0; Type *IdxTy; uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy, BC->getModule()->getDataLayout()); Instruction *Val = NewElts[Idx]; if (Val->getType() != BC->getDestTy()) { Val = new BitCastInst(Val, BC->getDestTy(), "", BC); Val->takeName(BC); } BC->replaceAllUsesWith(Val); DeadInsts.push_back(BC); } /// FindElementAndOffset - Return the index of the element containing Offset /// within the specified type, which must be either a struct or an array. /// Sets T to the type of the element and Offset to the offset within that /// element. IdxTy is set to the type of the index result to be used in a /// GEP instruction. uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset, Type *&IdxTy, const DataLayout &DL) { uint64_t Idx = 0; if (StructType *ST = dyn_cast<StructType>(T)) { const StructLayout *Layout = DL.getStructLayout(ST); Idx = Layout->getElementContainingOffset(Offset); T = ST->getContainedType(Idx); Offset -= Layout->getElementOffset(Idx); IdxTy = Type::getInt32Ty(T->getContext()); return Idx; } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) { T = AT->getElementType(); uint64_t EltSize = DL.getTypeAllocSize(T); Idx = Offset / EltSize; Offset -= Idx * EltSize; IdxTy = Type::getInt64Ty(T->getContext()); return Idx; } VectorType *VT = cast<VectorType>(T); T = VT->getElementType(); uint64_t EltSize = DL.getTypeAllocSize(T); Idx = Offset / EltSize; Offset -= Idx * EltSize; IdxTy = Type::getInt64Ty(T->getContext()); return Idx; } /// RewriteGEP - Check if this GEP instruction moves the pointer across /// elements of the alloca that are being split apart, and if so, rewrite /// the GEP to be relative to the new element. void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts) { uint64_t OldOffset = Offset; const DataLayout &DL = GEPI->getModule()->getDataLayout(); SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); // If the GEP was dynamic then it must have been a dynamic vector lookup. // In this case, it must be the last GEP operand which is dynamic so keep that // aside until we've found the constant GEP offset then add it back in at the // end. Value* NonConstantIdx = nullptr; if (!GEPI->hasAllConstantIndices()) NonConstantIdx = Indices.pop_back_val(); Offset += DL.getIndexedOffset(GEPI->getPointerOperandType(), Indices); RewriteForScalarRepl(GEPI, AI, Offset, NewElts); Type *T = AI->getAllocatedType(); Type *IdxTy; uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy, DL); if (GEPI->getOperand(0) == AI) OldIdx = ~0ULL; // Force the GEP to be rewritten. T = AI->getAllocatedType(); uint64_t EltOffset = Offset; uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy, DL); // If this GEP does not move the pointer across elements of the alloca // being split, then it does not needs to be rewritten. if (Idx == OldIdx) return; Type *i32Ty = Type::getInt32Ty(AI->getContext()); SmallVector<Value*, 8> NewArgs; NewArgs.push_back(Constant::getNullValue(i32Ty)); while (EltOffset != 0) { uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy, DL); NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx)); } if (NonConstantIdx) { Type* GepTy = T; // This GEP has a dynamic index. We need to add "i32 0" to index through // any structs or arrays in the original type until we get to the vector // to index. while (!isa<VectorType>(GepTy)) { NewArgs.push_back(Constant::getNullValue(i32Ty)); GepTy = cast<CompositeType>(GepTy)->getTypeAtIndex(0U); } NewArgs.push_back(NonConstantIdx); } Instruction *Val = NewElts[Idx]; if (NewArgs.size() > 1) { Val = GetElementPtrInst::CreateInBounds(Val, NewArgs, "", GEPI); Val->takeName(GEPI); } if (Val->getType() != GEPI->getType()) Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI); GEPI->replaceAllUsesWith(Val); DeadInsts.push_back(GEPI); } /// RewriteLifetimeIntrinsic - II is a lifetime.start/lifetime.end. Rewrite it /// to mark the lifetime of the scalarized memory. void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, uint64_t Offset, SmallVectorImpl<AllocaInst *> &NewElts) { ConstantInt *OldSize = cast<ConstantInt>(II->getArgOperand(0)); // Put matching lifetime markers on everything from Offset up to // Offset+OldSize. Type *AIType = AI->getAllocatedType(); const DataLayout &DL = II->getModule()->getDataLayout(); uint64_t NewOffset = Offset; Type *IdxTy; uint64_t Idx = FindElementAndOffset(AIType, NewOffset, IdxTy, DL); IRBuilder<> Builder(II); uint64_t Size = OldSize->getLimitedValue(); if (NewOffset) { // Splice the first element and index 'NewOffset' bytes in. SROA will // split the alloca again later. unsigned AS = AI->getType()->getAddressSpace(); Value *V = Builder.CreateBitCast(NewElts[Idx], Builder.getInt8PtrTy(AS)); V = Builder.CreateGEP(Builder.getInt8Ty(), V, Builder.getInt64(NewOffset)); IdxTy = NewElts[Idx]->getAllocatedType(); uint64_t EltSize = DL.getTypeAllocSize(IdxTy) - NewOffset; if (EltSize > Size) { EltSize = Size; Size = 0; } else { Size -= EltSize; } if (II->getIntrinsicID() == Intrinsic::lifetime_start) Builder.CreateLifetimeStart(V, Builder.getInt64(EltSize)); else Builder.CreateLifetimeEnd(V, Builder.getInt64(EltSize)); ++Idx; } for (; Idx != NewElts.size() && Size; ++Idx) { IdxTy = NewElts[Idx]->getAllocatedType(); uint64_t EltSize = DL.getTypeAllocSize(IdxTy); if (EltSize > Size) { EltSize = Size; Size = 0; } else { Size -= EltSize; } if (II->getIntrinsicID() == Intrinsic::lifetime_start) Builder.CreateLifetimeStart(NewElts[Idx], Builder.getInt64(EltSize)); else Builder.CreateLifetimeEnd(NewElts[Idx], Builder.getInt64(EltSize)); } DeadInsts.push_back(II); } /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. /// Rewrite it to copy or set the elements of the scalarized memory. void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, AllocaInst *AI, SmallVectorImpl<AllocaInst *> &NewElts) { // If this is a memcpy/memmove, construct the other pointer as the // appropriate type. The "Other" pointer is the pointer that goes to memory // that doesn't have anything to do with the alloca that we are promoting. For // memset, this Value* stays null. Value *OtherPtr = nullptr; unsigned MemAlignment = MI->getAlignment(); if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy if (Inst == MTI->getRawDest()) OtherPtr = MTI->getRawSource(); else { assert(Inst == MTI->getRawSource()); OtherPtr = MTI->getRawDest(); } } // If there is an other pointer, we want to convert it to the same pointer // type as AI has, so we can GEP through it safely. if (OtherPtr) { unsigned AddrSpace = cast<PointerType>(OtherPtr->getType())->getAddressSpace(); // Remove bitcasts and all-zero GEPs from OtherPtr. This is an // optimization, but it's also required to detect the corner case where // both pointer operands are referencing the same memory, and where // OtherPtr may be a bitcast or GEP that currently being rewritten. (This // function is only called for mem intrinsics that access the whole // aggregate, so non-zero GEPs are not an issue here.) OtherPtr = OtherPtr->stripPointerCasts(); // Copying the alloca to itself is a no-op: just delete it. if (OtherPtr == AI || OtherPtr == NewElts[0]) { // This code will run twice for a no-op memcpy -- once for each operand. // Put only one reference to MI on the DeadInsts list. for (SmallVectorImpl<Value *>::const_iterator I = DeadInsts.begin(), E = DeadInsts.end(); I != E; ++I) if (*I == MI) return; DeadInsts.push_back(MI); return; } // If the pointer is not the right type, insert a bitcast to the right // type. Type *NewTy = PointerType::get(AI->getType()->getElementType(), AddrSpace); if (OtherPtr->getType() != NewTy) OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI); } // Process each element of the aggregate. bool SROADest = MI->getRawDest() == Inst; Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext())); const DataLayout &DL = MI->getModule()->getDataLayout(); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // If this is a memcpy/memmove, emit a GEP of the other element address. Value *OtherElt = nullptr; unsigned OtherEltAlign = MemAlignment; if (OtherPtr) { Value *Idx[2] = { Zero, ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) }; OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, OtherPtr->getName()+"."+Twine(i), MI); uint64_t EltOffset; PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); Type *OtherTy = OtherPtrTy->getElementType(); if (StructType *ST = dyn_cast<StructType>(OtherTy)) { EltOffset = DL.getStructLayout(ST)->getElementOffset(i); } else { Type *EltTy = cast<SequentialType>(OtherTy)->getElementType(); EltOffset = DL.getTypeAllocSize(EltTy) * i; } // The alignment of the other pointer is the guaranteed alignment of the // element, which is affected by both the known alignment of the whole // mem intrinsic and the alignment of the element. If the alignment of // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the // known alignment is just 4 bytes. OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset); } Value *EltPtr = NewElts[i]; Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType(); // If we got down to a scalar, insert a load or store as appropriate. if (EltTy->isSingleValueType()) { if (isa<MemTransferInst>(MI)) { if (SROADest) { // From Other to Alloca. Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI); new StoreInst(Elt, EltPtr, MI); } else { // From Alloca to Other. Value *Elt = new LoadInst(EltPtr, "tmp", MI); new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI); } continue; } assert(isa<MemSetInst>(MI)); // If the stored element is zero (common case), just store a null // constant. Constant *StoreVal; if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) { if (CI->isZero()) { StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0> } else { // If EltTy is a vector type, get the element type. Type *ValTy = EltTy->getScalarType(); // Construct an integer with the right value. unsigned EltSize = DL.getTypeSizeInBits(ValTy); APInt OneVal(EltSize, CI->getZExtValue()); APInt TotalVal(OneVal); // Set each byte. for (unsigned i = 0; 8*i < EltSize; ++i) { TotalVal = TotalVal.shl(8); TotalVal |= OneVal; } // Convert the integer value to the appropriate type. StoreVal = ConstantInt::get(CI->getContext(), TotalVal); if (ValTy->isPointerTy()) StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy); else if (ValTy->isFloatingPointTy()) StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy); assert(StoreVal->getType() == ValTy && "Type mismatch!"); // If the requested value was a vector constant, create it. if (EltTy->isVectorTy()) { unsigned NumElts = cast<VectorType>(EltTy)->getNumElements(); StoreVal = ConstantVector::getSplat(NumElts, StoreVal); } } new StoreInst(StoreVal, EltPtr, MI); continue; } // Otherwise, if we're storing a byte variable, use a memset call for // this element. } unsigned EltSize = DL.getTypeAllocSize(EltTy); if (!EltSize) continue; IRBuilder<> Builder(MI); // Finally, insert the meminst for this element. if (isa<MemSetInst>(MI)) { Builder.CreateMemSet(EltPtr, MI->getArgOperand(1), EltSize, MI->isVolatile()); } else { assert(isa<MemTransferInst>(MI)); Value *Dst = SROADest ? EltPtr : OtherElt; // Dest ptr Value *Src = SROADest ? OtherElt : EltPtr; // Src ptr if (isa<MemCpyInst>(MI)) Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign,MI->isVolatile()); else Builder.CreateMemMove(Dst, Src, EltSize,OtherEltAlign,MI->isVolatile()); } } DeadInsts.push_back(MI); } /// RewriteStoreUserOfWholeAlloca - We found a store of an integer that /// overwrites the entire allocation. Extract out the pieces of the stored /// integer and store them individually. void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, SmallVectorImpl<AllocaInst *> &NewElts) { // Extract each element out of the integer according to its structure offset // and store the element value to the individual alloca. Value *SrcVal = SI->getOperand(0); Type *AllocaEltTy = AI->getAllocatedType(); const DataLayout &DL = SI->getModule()->getDataLayout(); uint64_t AllocaSizeBits = DL.getTypeAllocSizeInBits(AllocaEltTy); IRBuilder<> Builder(SI); // Handle tail padding by extending the operand if (DL.getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) SrcVal = Builder.CreateZExt(SrcVal, IntegerType::get(SI->getContext(), AllocaSizeBits)); DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI << '\n'); // There are two forms here: AI could be an array or struct. Both cases // have different ways to compute the element offset. if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { const StructLayout *Layout = DL.getStructLayout(EltSTy); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // Get the number of bits to shift SrcVal to get the value. Type *FieldTy = EltSTy->getElementType(i); uint64_t Shift = Layout->getElementOffsetInBits(i); if (DL.isBigEndian()) Shift = AllocaSizeBits - Shift - DL.getTypeAllocSizeInBits(FieldTy); Value *EltVal = SrcVal; if (Shift) { Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt"); } // Truncate down to an integer of the right size. uint64_t FieldSizeBits = DL.getTypeSizeInBits(FieldTy); // Ignore zero sized fields like {}, they obviously contain no data. if (FieldSizeBits == 0) continue; if (FieldSizeBits != AllocaSizeBits) EltVal = Builder.CreateTrunc(EltVal, IntegerType::get(SI->getContext(), FieldSizeBits)); Value *DestField = NewElts[i]; if (EltVal->getType() == FieldTy) { // Storing to an integer field of this size, just do it. } else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) { // Bitcast to the right element type (for fp/vector values). EltVal = Builder.CreateBitCast(EltVal, FieldTy); } else { // Otherwise, bitcast the dest pointer (for aggregates). DestField = Builder.CreateBitCast(DestField, PointerType::getUnqual(EltVal->getType())); } new StoreInst(EltVal, DestField, SI); } } else { ArrayType *ATy = cast<ArrayType>(AllocaEltTy); Type *ArrayEltTy = ATy->getElementType(); uint64_t ElementOffset = DL.getTypeAllocSizeInBits(ArrayEltTy); uint64_t ElementSizeBits = DL.getTypeSizeInBits(ArrayEltTy); uint64_t Shift; if (DL.isBigEndian()) Shift = AllocaSizeBits-ElementOffset; else Shift = 0; for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // Ignore zero sized fields like {}, they obviously contain no data. if (ElementSizeBits == 0) continue; Value *EltVal = SrcVal; if (Shift) { Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift); EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt"); } // Truncate down to an integer of the right size. if (ElementSizeBits != AllocaSizeBits) EltVal = Builder.CreateTrunc(EltVal, IntegerType::get(SI->getContext(), ElementSizeBits)); Value *DestField = NewElts[i]; if (EltVal->getType() == ArrayEltTy) { // Storing to an integer field of this size, just do it. } else if (ArrayEltTy->isFloatingPointTy() || ArrayEltTy->isVectorTy()) { // Bitcast to the right element type (for fp/vector values). EltVal = Builder.CreateBitCast(EltVal, ArrayEltTy); } else { // Otherwise, bitcast the dest pointer (for aggregates). DestField = Builder.CreateBitCast(DestField, PointerType::getUnqual(EltVal->getType())); } new StoreInst(EltVal, DestField, SI); if (DL.isBigEndian()) Shift -= ElementOffset; else Shift += ElementOffset; } } DeadInsts.push_back(SI); } /// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to /// an integer. Load the individual pieces to form the aggregate value. void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, SmallVectorImpl<AllocaInst *> &NewElts) { // Extract each element out of the NewElts according to its structure offset // and form the result value. Type *AllocaEltTy = AI->getAllocatedType(); const DataLayout &DL = LI->getModule()->getDataLayout(); uint64_t AllocaSizeBits = DL.getTypeAllocSizeInBits(AllocaEltTy); DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI << '\n'); // There are two forms here: AI could be an array or struct. Both cases // have different ways to compute the element offset. const StructLayout *Layout = nullptr; uint64_t ArrayEltBitOffset = 0; if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { Layout = DL.getStructLayout(EltSTy); } else { Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); ArrayEltBitOffset = DL.getTypeAllocSizeInBits(ArrayEltTy); } Value *ResultVal = Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits)); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // Load the value from the alloca. If the NewElt is an aggregate, cast // the pointer to an integer of the same size before doing the load. Value *SrcField = NewElts[i]; Type *FieldTy = cast<PointerType>(SrcField->getType())->getElementType(); uint64_t FieldSizeBits = DL.getTypeSizeInBits(FieldTy); // Ignore zero sized fields like {}, they obviously contain no data. if (FieldSizeBits == 0) continue; IntegerType *FieldIntTy = IntegerType::get(LI->getContext(), FieldSizeBits); if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() && !FieldTy->isVectorTy()) SrcField = new BitCastInst(SrcField, PointerType::getUnqual(FieldIntTy), "", LI); SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); // If SrcField is a fp or vector of the right size but that isn't an // integer type, bitcast to an integer so we can shift it. if (SrcField->getType() != FieldIntTy) SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI); // Zero extend the field to be the same size as the final alloca so that // we can shift and insert it. if (SrcField->getType() != ResultVal->getType()) SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI); // Determine the number of bits to shift SrcField. uint64_t Shift; if (Layout) // Struct case. Shift = Layout->getElementOffsetInBits(i); else // Array case. Shift = i*ArrayEltBitOffset; if (DL.isBigEndian()) Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); if (Shift) { Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift); SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); } // Don't create an 'or x, 0' on the first iteration. if (!isa<Constant>(ResultVal) || !cast<Constant>(ResultVal)->isNullValue()) ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI); else ResultVal = SrcField; } // Handle tail padding by truncating the result if (DL.getTypeSizeInBits(LI->getType()) != AllocaSizeBits) ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI); LI->replaceAllUsesWith(ResultVal); DeadInsts.push_back(LI); } /// HasPadding - Return true if the specified type has any structure or /// alignment padding in between the elements that would be split apart /// by SROA; return false otherwise. static bool HasPadding(Type *Ty, const DataLayout &DL) { if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { Ty = ATy->getElementType(); return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty); } // SROA currently handles only Arrays and Structs. StructType *STy = cast<StructType>(Ty); const StructLayout *SL = DL.getStructLayout(STy); unsigned PrevFieldBitOffset = 0; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { unsigned FieldBitOffset = SL->getElementOffsetInBits(i); // Check to see if there is any padding between this element and the // previous one. if (i) { unsigned PrevFieldEnd = PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1)); if (PrevFieldEnd < FieldBitOffset) return true; } PrevFieldBitOffset = FieldBitOffset; } // Check for tail padding. if (unsigned EltCount = STy->getNumElements()) { unsigned PrevFieldEnd = PrevFieldBitOffset + DL.getTypeSizeInBits(STy->getElementType(EltCount-1)); if (PrevFieldEnd < SL->getSizeInBits()) return true; } return false; } /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe, /// or 1 if safe after canonicalization has been performed. bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { // Loop over the use list of the alloca. We can only transform it if all of // the users are safe to transform. AllocaInfo Info(AI); isSafeForScalarRepl(AI, 0, Info); if (Info.isUnsafe) { DEBUG(dbgs() << "Cannot transform: " << *AI << '\n'); return false; } const DataLayout &DL = AI->getModule()->getDataLayout(); // Okay, we know all the users are promotable. If the aggregate is a memcpy // source and destination, we have to be careful. In particular, the memcpy // could be moving around elements that live in structure padding of the LLVM // types, but may actually be used. In these cases, we refuse to promote the // struct. if (Info.isMemCpySrc && Info.isMemCpyDst && HasPadding(AI->getAllocatedType(), DL)) return false; // If the alloca never has an access to just *part* of it, but is accessed // via loads and stores, then we should use ConvertToScalarInfo to promote // the alloca instead of promoting each piece at a time and inserting fission // and fusion code. if (!Info.hasSubelementAccess && Info.hasALoadOrStore) { // If the struct/array just has one element, use basic SRoA. if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { if (ST->getNumElements() > 1) return false; } else { if (cast<ArrayType>(AI->getAllocatedType())->getNumElements() > 1) return false; } } return true; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/IndVarSimplify.cpp
//===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This transformation analyzes and transforms the induction variables (and // computations derived from them) into simpler forms suitable for subsequent // analysis and transformation. // // If the trip count of a loop is computable, this pass also makes the following // changes: // 1. The exit condition for the loop is canonicalized to compare the // induction value against the exit value. This turns loops like: // 'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)' // 2. Any use outside of the loop of an expression derived from the indvar // is changed to compute the derived value outside of the loop, eliminating // the dependence on the exit value of the induction variable. If the only // purpose of the loop is to compute the exit value of some derived // expression, this transformation will make the loop dead. // //===----------------------------------------------------------------------===// #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Type.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SimplifyIndVar.h" using namespace llvm; #define DEBUG_TYPE "indvars" STATISTIC(NumWidened , "Number of indvars widened"); STATISTIC(NumReplaced , "Number of exit values replaced"); STATISTIC(NumLFTR , "Number of loop exit tests replaced"); STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated"); STATISTIC(NumElimIV , "Number of congruent IVs eliminated"); #if 0 // HLSL Change Starts - option pending // Trip count verification can be enabled by default under NDEBUG if we // implement a strong expression equivalence checker in SCEV. Until then, we // use the verify-indvars flag, which may assert in some cases. static cl::opt<bool> VerifyIndvars( "verify-indvars", cl::Hidden, cl::desc("Verify the ScalarEvolution result after running indvars")); static cl::opt<bool> ReduceLiveIVs("liv-reduce", cl::Hidden, cl::desc("Reduce live induction variables.")); enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, AlwaysRepl }; static cl::opt<ReplaceExitVal> ReplaceExitValue( "replexitval", cl::Hidden, cl::init(OnlyCheapRepl), cl::desc("Choose the strategy to replace exit value in IndVarSimplify"), cl::values(clEnumValN(NeverRepl, "never", "never replace exit value"), clEnumValN(OnlyCheapRepl, "cheap", "only replace exit value when the cost is cheap"), clEnumValN(AlwaysRepl, "always", "always replace exit value whenever possible"), clEnumValEnd)); #else static const bool ReduceLiveIVs = false; enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, AlwaysRepl }; static const ReplaceExitVal ReplaceExitValue = OnlyCheapRepl; #endif // HLSL Change Ends - option pending namespace { struct RewritePhi; } namespace { class IndVarSimplify : public LoopPass { LoopInfo *LI; ScalarEvolution *SE; DominatorTree *DT; TargetLibraryInfo *TLI; const TargetTransformInfo *TTI; SmallVector<WeakTrackingVH, 16> DeadInsts; bool Changed; public: static char ID; // Pass identification, replacement for typeid IndVarSimplify() : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr), Changed(false) { initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry()); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreserved<ScalarEvolution>(); AU.addPreservedID(LoopSimplifyID); AU.addPreservedID(LCSSAID); AU.setPreservesCFG(); } private: void releaseMemory() override { DeadInsts.clear(); } bool isValidRewrite(Value *FromVal, Value *ToVal); void HandleFloatingPointIV(Loop *L, PHINode *PH); void RewriteNonIntegerIVs(Loop *L); void SimplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LPPassManager &LPM); bool CanLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet); void RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter); Value *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, PHINode *IndVar, SCEVExpander &Rewriter); void SinkUnusedInvariants(Loop *L); Value *ExpandSCEVIfNeeded(SCEVExpander &Rewriter, const SCEV *S, Loop *L, Instruction *InsertPt, Type *Ty, bool &IsHighCostExpansion); }; } char IndVarSimplify::ID = 0; INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars", "Induction Variable Simplification", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(IndVarSimplify, "indvars", "Induction Variable Simplification", false, false) Pass *llvm::createIndVarSimplifyPass() { return new IndVarSimplify(); } /// isValidRewrite - Return true if the SCEV expansion generated by the /// rewriter can replace the original value. SCEV guarantees that it /// produces the same value, but the way it is produced may be illegal IR. /// Ideally, this function will only be called for verification. bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { // If an SCEV expression subsumed multiple pointers, its expansion could // reassociate the GEP changing the base pointer. This is illegal because the // final address produced by a GEP chain must be inbounds relative to its // underlying object. Otherwise basic alias analysis, among other things, // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid // producing an expression involving multiple pointers. Until then, we must // bail out here. // // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject // because it understands lcssa phis while SCEV does not. Value *FromPtr = FromVal; Value *ToPtr = ToVal; if (GEPOperator *GEP = dyn_cast<GEPOperator>(FromVal)) { FromPtr = GEP->getPointerOperand(); } if (GEPOperator *GEP = dyn_cast<GEPOperator>(ToVal)) { ToPtr = GEP->getPointerOperand(); } if (FromPtr != FromVal || ToPtr != ToVal) { // Quickly check the common case if (FromPtr == ToPtr) return true; // SCEV may have rewritten an expression that produces the GEP's pointer // operand. That's ok as long as the pointer operand has the same base // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the // base of a recurrence. This handles the case in which SCEV expansion // converts a pointer type recurrence into a nonrecurrent pointer base // indexed by an integer recurrence. // If the GEP base pointer is a vector of pointers, abort. if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy()) return false; const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr)); const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr)); if (FromBase == ToBase) return true; DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " << *FromBase << " != " << *ToBase << "\n"); return false; } return true; } /// Determine the insertion point for this user. By default, insert immediately /// before the user. SCEVExpander or LICM will hoist loop invariants out of the /// loop. For PHI nodes, there may be multiple uses, so compute the nearest /// common dominator for the incoming blocks. static Instruction *getInsertPointForUses(Instruction *User, Value *Def, DominatorTree *DT) { PHINode *PHI = dyn_cast<PHINode>(User); if (!PHI) return User; Instruction *InsertPt = nullptr; for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) { if (PHI->getIncomingValue(i) != Def) continue; BasicBlock *InsertBB = PHI->getIncomingBlock(i); if (!InsertPt) { InsertPt = InsertBB->getTerminator(); continue; } InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB); InsertPt = InsertBB->getTerminator(); } assert(InsertPt && "Missing phi operand"); assert((!isa<Instruction>(Def) || DT->dominates(cast<Instruction>(Def), InsertPt)) && "def does not dominate all uses"); return InsertPt; } //===----------------------------------------------------------------------===// // RewriteNonIntegerIVs and helpers. Prefer integer IVs. //===----------------------------------------------------------------------===// /// ConvertToSInt - Convert APF to an integer, if possible. static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) { bool isExact = false; // See if we can convert this to an int64_t uint64_t UIntVal; if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero, &isExact) != APFloat::opOK || !isExact) return false; IntVal = UIntVal; return true; } /// HandleFloatingPointIV - If the loop has floating induction variable /// then insert corresponding integer induction variable if possible. /// For example, /// for(double i = 0; i < 10000; ++i) /// bar(i) /// is converted into /// for(int i = 0; i < 10000; ++i) /// bar((double)i); /// void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); unsigned BackEdge = IncomingEdge^1; // Check incoming value. ConstantFP *InitValueVal = dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge)); int64_t InitValue; if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue)) return; // Check IV increment. Reject this PN if increment operation is not // an add or increment value can not be represented by an integer. BinaryOperator *Incr = dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge)); if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return; // If this is not an add of the PHI with a constantfp, or if the constant fp // is not an integer, bail out. ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1)); int64_t IncValue; if (IncValueVal == nullptr || Incr->getOperand(0) != PN || !ConvertToSInt(IncValueVal->getValueAPF(), IncValue)) return; // Check Incr uses. One user is PN and the other user is an exit condition // used by the conditional terminator. Value::user_iterator IncrUse = Incr->user_begin(); Instruction *U1 = cast<Instruction>(*IncrUse++); if (IncrUse == Incr->user_end()) return; Instruction *U2 = cast<Instruction>(*IncrUse++); if (IncrUse != Incr->user_end()) return; // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't // only used by a branch, we can't transform it. FCmpInst *Compare = dyn_cast<FCmpInst>(U1); if (!Compare) Compare = dyn_cast<FCmpInst>(U2); if (!Compare || !Compare->hasOneUse() || !isa<BranchInst>(Compare->user_back())) return; BranchInst *TheBr = cast<BranchInst>(Compare->user_back()); // We need to verify that the branch actually controls the iteration count // of the loop. If not, the new IV can overflow and no one will notice. // The branch block must be in the loop and one of the successors must be out // of the loop. assert(TheBr->isConditional() && "Can't use fcmp if not conditional"); if (!L->contains(TheBr->getParent()) || (L->contains(TheBr->getSuccessor(0)) && L->contains(TheBr->getSuccessor(1)))) return; // If it isn't a comparison with an integer-as-fp (the exit value), we can't // transform it. ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1)); int64_t ExitValue; if (ExitValueVal == nullptr || !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue)) return; // Find new predicate for integer comparison. CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE; switch (Compare->getPredicate()) { default: return; // Unknown comparison. case CmpInst::FCMP_OEQ: case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break; case CmpInst::FCMP_ONE: case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break; case CmpInst::FCMP_OGT: case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break; case CmpInst::FCMP_OGE: case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break; case CmpInst::FCMP_OLT: case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break; case CmpInst::FCMP_OLE: case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break; } // We convert the floating point induction variable to a signed i32 value if // we can. This is only safe if the comparison will not overflow in a way // that won't be trapped by the integer equivalent operations. Check for this // now. // TODO: We could use i64 if it is native and the range requires it. // The start/stride/exit values must all fit in signed i32. if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue)) return; // If not actually striding (add x, 0.0), avoid touching the code. if (IncValue == 0) return; // Positive and negative strides have different safety conditions. if (IncValue > 0) { // If we have a positive stride, we require the init to be less than the // exit value. if (InitValue >= ExitValue) return; uint32_t Range = uint32_t(ExitValue-InitValue); // Check for infinite loop, either: // while (i <= Exit) or until (i > Exit) if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) { if (++Range == 0) return; // Range overflows. } unsigned Leftover = Range % uint32_t(IncValue); // If this is an equality comparison, we require that the strided value // exactly land on the exit value, otherwise the IV condition will wrap // around and do things the fp IV wouldn't. if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && Leftover != 0) return; // If the stride would wrap around the i32 before exiting, we can't // transform the IV. if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue) return; } else { // If we have a negative stride, we require the init to be greater than the // exit value. if (InitValue <= ExitValue) return; uint32_t Range = uint32_t(InitValue-ExitValue); // Check for infinite loop, either: // while (i >= Exit) or until (i < Exit) if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) { if (++Range == 0) return; // Range overflows. } unsigned Leftover = Range % uint32_t(-IncValue); // If this is an equality comparison, we require that the strided value // exactly land on the exit value, otherwise the IV condition will wrap // around and do things the fp IV wouldn't. if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && Leftover != 0) return; // If the stride would wrap around the i32 before exiting, we can't // transform the IV. if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue) return; } IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext()); // Insert new integer induction variable. PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN); NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue), PN->getIncomingBlock(IncomingEdge)); Value *NewAdd = BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue), Incr->getName()+".int", Incr); NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge)); ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd, ConstantInt::get(Int32Ty, ExitValue), Compare->getName()); // In the following deletions, PN may become dead and may be deleted. // Use a WeakTrackingVH to observe whether this happens. WeakTrackingVH WeakPH = PN; // Delete the old floating point exit comparison. The branch starts using the // new comparison. NewCompare->takeName(Compare); Compare->replaceAllUsesWith(NewCompare); RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI); // Delete the old floating point increment. Incr->replaceAllUsesWith(UndefValue::get(Incr->getType())); RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI); // If the FP induction variable still has uses, this is because something else // in the loop uses its value. In order to canonicalize the induction // variable, we chose to eliminate the IV and rewrite it in terms of an // int->fp cast. // // We give preference to sitofp over uitofp because it is faster on most // platforms. if (WeakPH) { Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv", PN->getParent()->getFirstInsertionPt()); PN->replaceAllUsesWith(Conv); RecursivelyDeleteTriviallyDeadInstructions(PN, TLI); } Changed = true; } void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) { // First step. Check to see if there are any floating-point recurrences. // If there are, change them into integer recurrences, permitting analysis by // the SCEV routines. // BasicBlock *Header = L->getHeader(); SmallVector<WeakTrackingVH, 8> PHIs; for (BasicBlock::iterator I = Header->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) PHIs.push_back(PN); for (unsigned i = 0, e = PHIs.size(); i != e; ++i) if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i])) HandleFloatingPointIV(L, PN); // If the loop previously had floating-point IV, ScalarEvolution // may not have been able to compute a trip count. Now that we've done some // re-writing, the trip count may be computable. if (Changed) SE->forgetLoop(L); } namespace { // Collect information about PHI nodes which can be transformed in // RewriteLoopExitValues. struct RewritePhi { PHINode *PN; unsigned Ith; // Ith incoming value. Value *Val; // Exit value after expansion. bool HighCost; // High Cost when expansion. bool SafePhi; // LCSSASafePhiForRAUW. RewritePhi(PHINode *P, unsigned I, Value *V, bool H, bool S) : PN(P), Ith(I), Val(V), HighCost(H), SafePhi(S) {} }; } Value *IndVarSimplify::ExpandSCEVIfNeeded(SCEVExpander &Rewriter, const SCEV *S, Loop *L, Instruction *InsertPt, Type *ResultTy, bool &IsHighCostExpansion) { using namespace llvm::PatternMatch; if (!Rewriter.isHighCostExpansion(S, L)) { IsHighCostExpansion = false; return Rewriter.expandCodeFor(S, ResultTy, InsertPt); } // Before expanding S into an expensive LLVM expression, see if we can use an // already existing value as the expansion for S. There is potential to make // this significantly smarter, but this simple heuristic already gets some // interesting cases. SmallVector<BasicBlock *, 4> Latches; L->getLoopLatches(Latches); for (BasicBlock *BB : Latches) { ICmpInst::Predicate Pred; Instruction *LHS, *RHS; BasicBlock *TrueBB, *FalseBB; if (!match(BB->getTerminator(), m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)), TrueBB, FalseBB))) continue; if (SE->getSCEV(LHS) == S && DT->dominates(LHS, InsertPt)) { IsHighCostExpansion = false; return LHS; } if (SE->getSCEV(RHS) == S && DT->dominates(RHS, InsertPt)) { IsHighCostExpansion = false; return RHS; } } // We didn't find anything, fall back to using SCEVExpander. assert(Rewriter.isHighCostExpansion(S, L) && "this should not have changed!"); IsHighCostExpansion = true; return Rewriter.expandCodeFor(S, ResultTy, InsertPt); } //===----------------------------------------------------------------------===// // RewriteLoopExitValues - Optimize IV users outside the loop. // As a side effect, reduces the amount of IV processing within the loop. //===----------------------------------------------------------------------===// /// RewriteLoopExitValues - Check to see if this loop has a computable /// loop-invariant execution count. If so, this means that we can compute the /// final value of any expressions that are recurrent in the loop, and /// substitute the exit values from the loop into any instructions outside of /// the loop that use the final values of the current expressions. /// /// This is mostly redundant with the regular IndVarSimplify activities that /// happen later, except that it's more powerful in some cases, because it's /// able to brute-force evaluate arbitrary instructions as long as they have /// constant operands at the beginning of the loop. void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { // Verify the input to the pass in already in LCSSA form. assert(L->isLCSSAForm(*DT)); SmallVector<BasicBlock*, 8> ExitBlocks; L->getUniqueExitBlocks(ExitBlocks); SmallVector<RewritePhi, 8> RewritePhiSet; // Find all values that are computed inside the loop, but used outside of it. // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan // the exit blocks of the loop to find them. for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { BasicBlock *ExitBB = ExitBlocks[i]; // If there are no PHI nodes in this exit block, then no values defined // inside the loop are used on this path, skip it. PHINode *PN = dyn_cast<PHINode>(ExitBB->begin()); if (!PN) continue; unsigned NumPreds = PN->getNumIncomingValues(); // We would like to be able to RAUW single-incoming value PHI nodes. We // have to be certain this is safe even when this is an LCSSA PHI node. // While the computed exit value is no longer varying in *this* loop, the // exit block may be an exit block for an outer containing loop as well, // the exit value may be varying in the outer loop, and thus it may still // require an LCSSA PHI node. The safe case is when this is // single-predecessor PHI node (LCSSA) and the exit block containing it is // part of the enclosing loop, or this is the outer most loop of the nest. // In either case the exit value could (at most) be varying in the same // loop body as the phi node itself. Thus if it is in turn used outside of // an enclosing loop it will only be via a separate LCSSA node. bool LCSSASafePhiForRAUW = NumPreds == 1 && (!L->getParentLoop() || L->getParentLoop() == LI->getLoopFor(ExitBB)); // Iterate over all of the PHI nodes. BasicBlock::iterator BBI = ExitBB->begin(); while ((PN = dyn_cast<PHINode>(BBI++))) { if (PN->use_empty()) continue; // dead use, don't replace it // SCEV only supports integer expressions for now. if (!PN->getType()->isIntegerTy() && !PN->getType()->isPointerTy()) continue; // It's necessary to tell ScalarEvolution about this explicitly so that // it can walk the def-use list and forget all SCEVs, as it may not be // watching the PHI itself. Once the new exit value is in place, there // may not be a def-use connection between the loop and every instruction // which got a SCEVAddRecExpr for that loop. SE->forgetValue(PN); // Iterate over all of the values in all the PHI nodes. for (unsigned i = 0; i != NumPreds; ++i) { // If the value being merged in is not integer or is not defined // in the loop, skip it. Value *InVal = PN->getIncomingValue(i); if (!isa<Instruction>(InVal)) continue; // If this pred is for a subloop, not L itself, skip it. if (LI->getLoopFor(PN->getIncomingBlock(i)) != L) continue; // The Block is in a subloop, skip it. // Check that InVal is defined in the loop. Instruction *Inst = cast<Instruction>(InVal); if (!L->contains(Inst)) continue; // Okay, this instruction has a user outside of the current loop // and varies predictably *inside* the loop. Evaluate the value it // contains when the loop exits, if possible. const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); if (!SE->isLoopInvariant(ExitValue, L) || !isSafeToExpand(ExitValue, *SE)) continue; // Computing the value outside of the loop brings no benefit if : // - it is definitely used inside the loop in a way which can not be // optimized away. // - no use outside of the loop can take advantage of hoisting the // computation out of the loop if (ExitValue->getSCEVType()>=scMulExpr) { unsigned NumHardInternalUses = 0; unsigned NumSoftExternalUses = 0; unsigned NumUses = 0; for (auto IB = Inst->user_begin(), IE = Inst->user_end(); IB != IE && NumUses <= 6; ++IB) { Instruction *UseInstr = cast<Instruction>(*IB); unsigned Opc = UseInstr->getOpcode(); NumUses++; if (L->contains(UseInstr)) { if (Opc == Instruction::Call || Opc == Instruction::Ret) NumHardInternalUses++; } else { if (Opc == Instruction::PHI) { // Do not count the Phi as a use. LCSSA may have inserted // plenty of trivial ones. NumUses--; for (auto PB = UseInstr->user_begin(), PE = UseInstr->user_end(); PB != PE && NumUses <= 6; ++PB, ++NumUses) { unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode(); if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret) NumSoftExternalUses++; } continue; } if (Opc != Instruction::Call && Opc != Instruction::Ret) NumSoftExternalUses++; } } if (NumUses <= 6 && NumHardInternalUses && !NumSoftExternalUses) continue; } bool HighCost = false; Value *ExitVal = ExpandSCEVIfNeeded(Rewriter, ExitValue, L, Inst, PN->getType(), HighCost); DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n' << " LoopVal = " << *Inst << "\n"); if (!isValidRewrite(Inst, ExitVal)) { DeadInsts.push_back(ExitVal); continue; } // HLSL Change Begin // Avoid breaking LCSSA: Don't replace the PHI if its replacement // is defined inside the loop. if (auto *ExitValInst = dyn_cast<Instruction>(ExitVal)) { if (L->contains(ExitValInst)) { continue; } } // HLSL Change End // Collect all the candidate PHINodes to be rewritten. RewritePhiSet.push_back( RewritePhi(PN, i, ExitVal, HighCost, LCSSASafePhiForRAUW)); } } } bool LoopCanBeDel = CanLoopBeDeleted(L, RewritePhiSet); // Transformation. for (const RewritePhi &Phi : RewritePhiSet) { PHINode *PN = Phi.PN; Value *ExitVal = Phi.Val; // Only do the rewrite when the ExitValue can be expanded cheaply. // If LoopCanBeDel is true, rewrite exit value aggressively. if (ReplaceExitValue == OnlyCheapRepl && !LoopCanBeDel && Phi.HighCost) { DeadInsts.push_back(ExitVal); continue; } Changed = true; ++NumReplaced; Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith)); PN->setIncomingValue(Phi.Ith, ExitVal); // If this instruction is dead now, delete it. Don't do it now to avoid // invalidating iterators. if (isInstructionTriviallyDead(Inst, TLI)) DeadInsts.push_back(Inst); // If we determined that this PHI is safe to replace even if an LCSSA // PHI, do so. if (Phi.SafePhi) { PN->replaceAllUsesWith(ExitVal); PN->eraseFromParent(); } } // The insertion point instruction may have been deleted; clear it out // so that the rewriter doesn't trip over it later. Rewriter.clearInsertPoint(); } /// CanLoopBeDeleted - Check whether it is possible to delete the loop after /// rewriting exit value. If it is possible, ignore ReplaceExitValue and /// do rewriting aggressively. bool IndVarSimplify::CanLoopBeDeleted( Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) { BasicBlock *Preheader = L->getLoopPreheader(); // If there is no preheader, the loop will not be deleted. if (!Preheader) return false; // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1. // We obviate multiple ExitingBlocks case for simplicity. // TODO: If we see testcase with multiple ExitingBlocks can be deleted // after exit value rewriting, we can enhance the logic here. SmallVector<BasicBlock *, 4> ExitingBlocks; L->getExitingBlocks(ExitingBlocks); SmallVector<BasicBlock *, 8> ExitBlocks; L->getUniqueExitBlocks(ExitBlocks); if (ExitBlocks.size() > 1 || ExitingBlocks.size() > 1) return false; BasicBlock *ExitBlock = ExitBlocks[0]; BasicBlock::iterator BI = ExitBlock->begin(); while (PHINode *P = dyn_cast<PHINode>(BI)) { Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]); // If the Incoming value of P is found in RewritePhiSet, we know it // could be rewritten to use a loop invariant value in transformation // phase later. Skip it in the loop invariant check below. bool found = false; for (const RewritePhi &Phi : RewritePhiSet) { unsigned i = Phi.Ith; if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) { found = true; break; } } Instruction *I; if (!found && (I = dyn_cast<Instruction>(Incoming))) if (!L->hasLoopInvariantOperands(I)) return false; ++BI; } for (Loop::block_iterator LI = L->block_begin(), LE = L->block_end(); LI != LE; ++LI) { for (BasicBlock::iterator BI = (*LI)->begin(), BE = (*LI)->end(); BI != BE; ++BI) { if (BI->mayHaveSideEffects()) return false; } } return true; } //===----------------------------------------------------------------------===// // IV Widening - Extend the width of an IV to cover its widest uses. //===----------------------------------------------------------------------===// namespace { // Collect information about induction variables that are used by sign/zero // extend operations. This information is recorded by CollectExtend and // provides the input to WidenIV. struct WideIVInfo { PHINode *NarrowIV; Type *WidestNativeType; // Widest integer type created [sz]ext bool IsSigned; // Was a sext user seen before a zext? WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr), IsSigned(false) {} }; } /// visitCast - Update information about the induction variable that is /// extended by this sign or zero extend operation. This is used to determine /// the final width of the IV before actually widening it. static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE, const TargetTransformInfo *TTI) { bool IsSigned = Cast->getOpcode() == Instruction::SExt; if (!IsSigned && Cast->getOpcode() != Instruction::ZExt) return; Type *Ty = Cast->getType(); uint64_t Width = SE->getTypeSizeInBits(Ty); if (!Cast->getModule()->getDataLayout().isLegalInteger(Width)) return; // Cast is either an sext or zext up to this point. // We should not widen an indvar if arithmetics on the wider indvar are more // expensive than those on the narrower indvar. We check only the cost of ADD // because at least an ADD is required to increment the induction variable. We // could compute more comprehensively the cost of all instructions on the // induction variable when necessary. if (TTI && TTI->getArithmeticInstrCost(Instruction::Add, Ty) > TTI->getArithmeticInstrCost(Instruction::Add, Cast->getOperand(0)->getType())) { return; } if (!WI.WidestNativeType) { WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); WI.IsSigned = IsSigned; return; } // We extend the IV to satisfy the sign of its first user, arbitrarily. if (WI.IsSigned != IsSigned) return; if (Width > SE->getTypeSizeInBits(WI.WidestNativeType)) WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); } namespace { /// NarrowIVDefUse - Record a link in the Narrow IV def-use chain along with the /// WideIV that computes the same value as the Narrow IV def. This avoids /// caching Use* pointers. struct NarrowIVDefUse { Instruction *NarrowDef; Instruction *NarrowUse; Instruction *WideDef; NarrowIVDefUse(): NarrowDef(nullptr), NarrowUse(nullptr), WideDef(nullptr) {} NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD): NarrowDef(ND), NarrowUse(NU), WideDef(WD) {} }; /// WidenIV - The goal of this transform is to remove sign and zero extends /// without creating any new induction variables. To do this, it creates a new /// phi of the wider type and redirects all users, either removing extends or /// inserting truncs whenever we stop propagating the type. /// class WidenIV { // Parameters PHINode *OrigPhi; Type *WideType; bool IsSigned; // Context LoopInfo *LI; Loop *L; ScalarEvolution *SE; DominatorTree *DT; // Result PHINode *WidePhi; Instruction *WideInc; const SCEV *WideIncExpr; SmallVectorImpl<WeakTrackingVH> &DeadInsts; SmallPtrSet<Instruction*,16> Widened; SmallVector<NarrowIVDefUse, 8> NarrowIVUsers; public: WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv, DominatorTree *DTree, SmallVectorImpl<WeakTrackingVH> &DI) : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), IsSigned(WI.IsSigned), LI(LInfo), L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree), WidePhi(nullptr), WideInc(nullptr), WideIncExpr(nullptr), DeadInsts(DI) { assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV"); } PHINode *CreateWideIV(SCEVExpander &Rewriter); protected: Value *getExtend(Value *NarrowOper, Type *WideType, bool IsSigned, Instruction *Use); Instruction *CloneIVUser(NarrowIVDefUse DU); const SCEVAddRecExpr *GetWideRecurrence(Instruction *NarrowUse); const SCEVAddRecExpr* GetExtendedOperandRecurrence(NarrowIVDefUse DU); const SCEV *GetSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, unsigned OpCode) const; Instruction *WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter); bool WidenLoopCompare(NarrowIVDefUse DU); void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef); }; } // anonymous namespace /// isLoopInvariant - Perform a quick domtree based check for loop invariance /// assuming that V is used within the loop. LoopInfo::isLoopInvariant() seems /// gratuitous for this purpose. static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) { Instruction *Inst = dyn_cast<Instruction>(V); if (!Inst) return true; return DT->properlyDominates(Inst->getParent(), L->getHeader()); } Value *WidenIV::getExtend(Value *NarrowOper, Type *WideType, bool IsSigned, Instruction *Use) { // Set the debug location and conservative insertion point. IRBuilder<> Builder(Use); // Hoist the insertion point into loop preheaders as far as possible. for (const Loop *L = LI->getLoopFor(Use->getParent()); L && L->getLoopPreheader() && isLoopInvariant(NarrowOper, L, DT); L = L->getParentLoop()) Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator()); return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) : Builder.CreateZExt(NarrowOper, WideType); } /// CloneIVUser - Instantiate a wide operation to replace a narrow /// operation. This only needs to handle operations that can evaluation to /// SCEVAddRec. It can safely return 0 for any operation we decide not to clone. Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) { unsigned Opcode = DU.NarrowUse->getOpcode(); switch (Opcode) { default: return nullptr; case Instruction::Add: case Instruction::Mul: case Instruction::UDiv: case Instruction::Sub: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: DEBUG(dbgs() << "Cloning IVUser: " << *DU.NarrowUse << "\n"); // Replace NarrowDef operands with WideDef. Otherwise, we don't know // anything about the narrow operand yet so must insert a [sz]ext. It is // probably loop invariant and will be folded or hoisted. If it actually // comes from a widened IV, it should be removed during a future call to // WidenIVUse. Value *LHS = (DU.NarrowUse->getOperand(0) == DU.NarrowDef) ? DU.WideDef : getExtend(DU.NarrowUse->getOperand(0), WideType, IsSigned, DU.NarrowUse); Value *RHS = (DU.NarrowUse->getOperand(1) == DU.NarrowDef) ? DU.WideDef : getExtend(DU.NarrowUse->getOperand(1), WideType, IsSigned, DU.NarrowUse); BinaryOperator *NarrowBO = cast<BinaryOperator>(DU.NarrowUse); BinaryOperator *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS, NarrowBO->getName()); IRBuilder<> Builder(DU.NarrowUse); Builder.Insert(WideBO); if (const OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(NarrowBO)) { if (OBO->hasNoUnsignedWrap()) WideBO->setHasNoUnsignedWrap(); if (OBO->hasNoSignedWrap()) WideBO->setHasNoSignedWrap(); } return WideBO; } } const SCEV *WidenIV::GetSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, unsigned OpCode) const { if (OpCode == Instruction::Add) return SE->getAddExpr(LHS, RHS); if (OpCode == Instruction::Sub) return SE->getMinusSCEV(LHS, RHS); if (OpCode == Instruction::Mul) return SE->getMulExpr(LHS, RHS); llvm_unreachable("Unsupported opcode."); } /// No-wrap operations can transfer sign extension of their result to their /// operands. Generate the SCEV value for the widened operation without /// actually modifying the IR yet. If the expression after extending the /// operands is an AddRec for this loop, return it. const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) { // Handle the common case of add<nsw/nuw> const unsigned OpCode = DU.NarrowUse->getOpcode(); // Only Add/Sub/Mul instructions supported yet. if (OpCode != Instruction::Add && OpCode != Instruction::Sub && OpCode != Instruction::Mul) return nullptr; // One operand (NarrowDef) has already been extended to WideDef. Now determine // if extending the other will lead to a recurrence. const unsigned ExtendOperIdx = DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0; assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU"); const SCEV *ExtendOperExpr = nullptr; const OverflowingBinaryOperator *OBO = cast<OverflowingBinaryOperator>(DU.NarrowUse); if (IsSigned && OBO->hasNoSignedWrap()) ExtendOperExpr = SE->getSignExtendExpr( SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); else if(!IsSigned && OBO->hasNoUnsignedWrap()) ExtendOperExpr = SE->getZeroExtendExpr( SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); else return nullptr; // When creating this SCEV expr, don't apply the current operations NSW or NUW // flags. This instruction may be guarded by control flow that the no-wrap // behavior depends on. Non-control-equivalent instructions can be mapped to // the same SCEV expression, and it would be incorrect to transfer NSW/NUW // semantics to those operations. const SCEV *lhs = SE->getSCEV(DU.WideDef); const SCEV *rhs = ExtendOperExpr; // Let's swap operands to the initial order for the case of non-commutative // operations, like SUB. See PR21014. if (ExtendOperIdx == 0) std::swap(lhs, rhs); const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(GetSCEVByOpCode(lhs, rhs, OpCode)); if (!AddRec || AddRec->getLoop() != L) return nullptr; return AddRec; } /// GetWideRecurrence - Is this instruction potentially interesting for further /// simplification after widening it's type? In other words, can the /// extend be safely hoisted out of the loop with SCEV reducing the value to a /// recurrence on the same loop. If so, return the sign or zero extended /// recurrence. Otherwise return NULL. const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) { if (!SE->isSCEVable(NarrowUse->getType())) return nullptr; const SCEV *NarrowExpr = SE->getSCEV(NarrowUse); if (SE->getTypeSizeInBits(NarrowExpr->getType()) >= SE->getTypeSizeInBits(WideType)) { // NarrowUse implicitly widens its operand. e.g. a gep with a narrow // index. So don't follow this use. return nullptr; } const SCEV *WideExpr = IsSigned ? SE->getSignExtendExpr(NarrowExpr, WideType) : SE->getZeroExtendExpr(NarrowExpr, WideType); const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr); if (!AddRec || AddRec->getLoop() != L) return nullptr; return AddRec; } /// This IV user cannot be widen. Replace this use of the original narrow IV /// with a truncation of the new wide IV to isolate and eliminate the narrow IV. static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT) { DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user " << *DU.NarrowUse << "\n"); IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT)); Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType()); DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc); } /// If the narrow use is a compare instruction, then widen the compare // (and possibly the other operand). The extend operation is hoisted into the // loop preheader as far as possible. bool WidenIV::WidenLoopCompare(NarrowIVDefUse DU) { ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse); if (!Cmp) return false; // Sign of IV user and compare must match. if (IsSigned != CmpInst::isSigned(Cmp->getPredicate())) return false; Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0); unsigned CastWidth = SE->getTypeSizeInBits(Op->getType()); unsigned IVWidth = SE->getTypeSizeInBits(WideType); assert (CastWidth <= IVWidth && "Unexpected width while widening compare."); // Widen the compare instruction. IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT)); DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); // Widen the other operand of the compare, if necessary. if (CastWidth < IVWidth) { Value *ExtOp = getExtend(Op, WideType, IsSigned, Cmp); DU.NarrowUse->replaceUsesOfWith(Op, ExtOp); } return true; } /// WidenIVUse - Determine whether an individual user of the narrow IV can be /// widened. If so, return the wide clone of the user. Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { // Stop traversing the def-use chain at inner-loop phis or post-loop phis. if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) { if (LI->getLoopFor(UsePhi->getParent()) != L) { // For LCSSA phis, sink the truncate outside the loop. // After SimplifyCFG most loop exit targets have a single predecessor. // Otherwise fall back to a truncate within the loop. if (UsePhi->getNumOperands() != 1) truncateIVUse(DU, DT); else { PHINode *WidePhi = PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide", UsePhi); WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0)); IRBuilder<> Builder(WidePhi->getParent()->getFirstInsertionPt()); Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType()); UsePhi->replaceAllUsesWith(Trunc); DeadInsts.emplace_back(UsePhi); DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to " << *WidePhi << "\n"); } return nullptr; } } // Our raison d'etre! Eliminate sign and zero extension. if (IsSigned ? isa<SExtInst>(DU.NarrowUse) : isa<ZExtInst>(DU.NarrowUse)) { Value *NewDef = DU.WideDef; if (DU.NarrowUse->getType() != WideType) { unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType()); unsigned IVWidth = SE->getTypeSizeInBits(WideType); if (CastWidth < IVWidth) { // The cast isn't as wide as the IV, so insert a Trunc. IRBuilder<> Builder(DU.NarrowUse); NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType()); } else { // A wider extend was hidden behind a narrower one. This may induce // another round of IV widening in which the intermediate IV becomes // dead. It should be very rare. DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi << " not wide enough to subsume " << *DU.NarrowUse << "\n"); DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); NewDef = DU.NarrowUse; } } if (NewDef != DU.NarrowUse) { DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse << " replaced by " << *DU.WideDef << "\n"); ++NumElimExt; DU.NarrowUse->replaceAllUsesWith(NewDef); DeadInsts.emplace_back(DU.NarrowUse); } // Now that the extend is gone, we want to expose it's uses for potential // further simplification. We don't need to directly inform SimplifyIVUsers // of the new users, because their parent IV will be processed later as a // new loop phi. If we preserved IVUsers analysis, we would also want to // push the uses of WideDef here. // No further widening is needed. The deceased [sz]ext had done it for us. return nullptr; } // Does this user itself evaluate to a recurrence after widening? const SCEVAddRecExpr *WideAddRec = GetWideRecurrence(DU.NarrowUse); if (!WideAddRec) WideAddRec = GetExtendedOperandRecurrence(DU); if (!WideAddRec) { // If use is a loop condition, try to promote the condition instead of // truncating the IV first. if (WidenLoopCompare(DU)) return nullptr; // This user does not evaluate to a recurence after widening, so don't // follow it. Instead insert a Trunc to kill off the original use, // eventually isolating the original narrow IV so it can be removed. truncateIVUse(DU, DT); return nullptr; } // Assume block terminators cannot evaluate to a recurrence. We can't to // insert a Trunc after a terminator if there happens to be a critical edge. assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() && "SCEV is not expected to evaluate a block terminator"); // Reuse the IV increment that SCEVExpander created as long as it dominates // NarrowUse. Instruction *WideUse = nullptr; if (WideAddRec == WideIncExpr && Rewriter.hoistIVInc(WideInc, DU.NarrowUse)) WideUse = WideInc; else { WideUse = CloneIVUser(DU); if (!WideUse) return nullptr; } // Evaluation of WideAddRec ensured that the narrow expression could be // extended outside the loop without overflow. This suggests that the wide use // evaluates to the same expression as the extended narrow use, but doesn't // absolutely guarantee it. Hence the following failsafe check. In rare cases // where it fails, we simply throw away the newly created wide use. if (WideAddRec != SE->getSCEV(WideUse)) { DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n"); DeadInsts.emplace_back(WideUse); return nullptr; } // Returning WideUse pushes it on the worklist. return WideUse; } /// pushNarrowIVUsers - Add eligible users of NarrowDef to NarrowIVUsers. /// void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) { for (User *U : NarrowDef->users()) { Instruction *NarrowUser = cast<Instruction>(U); // Handle data flow merges and bizarre phi cycles. if (!Widened.insert(NarrowUser).second) continue; NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUser, WideDef)); } } /// CreateWideIV - Process a single induction variable. First use the /// SCEVExpander to create a wide induction variable that evaluates to the same /// recurrence as the original narrow IV. Then use a worklist to forward /// traverse the narrow IV's def-use chain. After WidenIVUse has processed all /// interesting IV users, the narrow IV will be isolated for removal by /// DeleteDeadPHIs. /// /// It would be simpler to delete uses as they are processed, but we must avoid /// invalidating SCEV expressions. /// PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) { // Is this phi an induction variable? const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi)); if (!AddRec) return nullptr; // Widen the induction variable expression. const SCEV *WideIVExpr = IsSigned ? SE->getSignExtendExpr(AddRec, WideType) : SE->getZeroExtendExpr(AddRec, WideType); assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType && "Expect the new IV expression to preserve its type"); // Can the IV be extended outside the loop without overflow? AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr); if (!AddRec || AddRec->getLoop() != L) return nullptr; // An AddRec must have loop-invariant operands. Since this AddRec is // materialized by a loop header phi, the expression cannot have any post-loop // operands, so they must dominate the loop header. assert(SE->properlyDominates(AddRec->getStart(), L->getHeader()) && SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) && "Loop header phi recurrence inputs do not dominate the loop"); // The rewriter provides a value for the desired IV expression. This may // either find an existing phi or materialize a new one. Either way, we // expect a well-formed cyclic phi-with-increments. i.e. any operand not part // of the phi-SCC dominates the loop entry. Instruction *InsertPt = L->getHeader()->begin(); WidePhi = cast<PHINode>(Rewriter.expandCodeFor(AddRec, WideType, InsertPt)); // Remembering the WideIV increment generated by SCEVExpander allows // WidenIVUse to reuse it when widening the narrow IV's increment. We don't // employ a general reuse mechanism because the call above is the only call to // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses. if (BasicBlock *LatchBlock = L->getLoopLatch()) { WideInc = cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock)); WideIncExpr = SE->getSCEV(WideInc); } DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n"); ++NumWidened; // Traverse the def-use chain using a worklist starting at the original IV. assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" ); Widened.insert(OrigPhi); pushNarrowIVUsers(OrigPhi, WidePhi); while (!NarrowIVUsers.empty()) { NarrowIVDefUse DU = NarrowIVUsers.pop_back_val(); // Process a def-use edge. This may replace the use, so don't hold a // use_iterator across it. Instruction *WideUse = WidenIVUse(DU, Rewriter); // Follow all def-use edges from the previous narrow use. if (WideUse) pushNarrowIVUsers(DU.NarrowUse, WideUse); // WidenIVUse may have removed the def-use edge. if (DU.NarrowDef->use_empty()) DeadInsts.emplace_back(DU.NarrowDef); } return WidePhi; } //===----------------------------------------------------------------------===// // Live IV Reduction - Minimize IVs live across the loop. //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Simplification of IV users based on SCEV evaluation. //===----------------------------------------------------------------------===// namespace { class IndVarSimplifyVisitor : public IVVisitor { ScalarEvolution *SE; const TargetTransformInfo *TTI; PHINode *IVPhi; public: WideIVInfo WI; IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV, const TargetTransformInfo *TTI, const DominatorTree *DTree) : SE(SCEV), TTI(TTI), IVPhi(IV) { DT = DTree; WI.NarrowIV = IVPhi; if (ReduceLiveIVs) setSplitOverflowIntrinsics(); } // Implement the interface used by simplifyUsersOfIV. void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); } }; } /// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV /// users. Each successive simplification may push more users which may /// themselves be candidates for simplification. /// /// Sign/Zero extend elimination is interleaved with IV simplification. /// void IndVarSimplify::SimplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LPPassManager &LPM) { SmallVector<WideIVInfo, 8> WideIVs; SmallVector<PHINode*, 8> LoopPhis; for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { LoopPhis.push_back(cast<PHINode>(I)); } // Each round of simplification iterates through the SimplifyIVUsers worklist // for all current phis, then determines whether any IVs can be // widened. Widening adds new phis to LoopPhis, inducing another round of // simplification on the wide IVs. while (!LoopPhis.empty()) { // Evaluate as many IV expressions as possible before widening any IVs. This // forces SCEV to set no-wrap flags before evaluating sign/zero // extension. The first time SCEV attempts to normalize sign/zero extension, // the result becomes final. So for the most predictable results, we delay // evaluation of sign/zero extend evaluation until needed, and avoid running // other SCEV based analysis prior to SimplifyAndExtend. do { PHINode *CurrIV = LoopPhis.pop_back_val(); // Information about sign/zero extensions of CurrIV. IndVarSimplifyVisitor Visitor(CurrIV, SE, TTI, DT); Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor); // HLSL change begin - don't widen type for hlsl. //if (Visitor.WI.WidestNativeType) { // WideIVs.push_back(Visitor.WI); //} // HLSL change end. } while(!LoopPhis.empty()); for (; !WideIVs.empty(); WideIVs.pop_back()) { WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts); if (PHINode *WidePhi = Widener.CreateWideIV(Rewriter)) { Changed = true; LoopPhis.push_back(WidePhi); } } } } //===----------------------------------------------------------------------===// // LinearFunctionTestReplace and its kin. Rewrite the loop exit condition. //===----------------------------------------------------------------------===// /// canExpandBackedgeTakenCount - Return true if this loop's backedge taken /// count expression can be safely and cheaply expanded into an instruction /// sequence that can be used by LinearFunctionTestReplace. /// /// TODO: This fails for pointer-type loop counters with greater than one byte /// strides, consequently preventing LFTR from running. For the purpose of LFTR /// we could skip this check in the case that the LFTR loop counter (chosen by /// FindLoopCounter) is also pointer type. Instead, we could directly convert /// the loop test to an inequality test by checking the target data's alignment /// of element types (given that the initial pointer value originates from or is /// used by ABI constrained operation, as opposed to inttoptr/ptrtoint). /// However, we don't yet have a strong motivation for converting loop tests /// into inequality tests. static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE, SCEVExpander &Rewriter) { const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) || BackedgeTakenCount->isZero()) return false; if (!L->getExitingBlock()) return false; // Can't rewrite non-branch yet. if (!isa<BranchInst>(L->getExitingBlock()->getTerminator())) return false; if (Rewriter.isHighCostExpansion(BackedgeTakenCount, L)) return false; return true; } /// getLoopPhiForCounter - Return the loop header phi IFF IncV adds a loop /// invariant value to the phi. static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) { Instruction *IncI = dyn_cast<Instruction>(IncV); if (!IncI) return nullptr; switch (IncI->getOpcode()) { case Instruction::Add: case Instruction::Sub: break; case Instruction::GetElementPtr: // An IV counter must preserve its type. if (IncI->getNumOperands() == 2) break; LLVM_FALLTHROUGH; // HLSL Change default: return nullptr; } PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0)); if (Phi && Phi->getParent() == L->getHeader()) { if (isLoopInvariant(IncI->getOperand(1), L, DT)) return Phi; return nullptr; } if (IncI->getOpcode() == Instruction::GetElementPtr) return nullptr; // Allow add/sub to be commuted. Phi = dyn_cast<PHINode>(IncI->getOperand(1)); if (Phi && Phi->getParent() == L->getHeader()) { if (isLoopInvariant(IncI->getOperand(0), L, DT)) return Phi; } return nullptr; } /// Return the compare guarding the loop latch, or NULL for unrecognized tests. static ICmpInst *getLoopTest(Loop *L) { assert(L->getExitingBlock() && "expected loop exit"); BasicBlock *LatchBlock = L->getLoopLatch(); // Don't bother with LFTR if the loop is not properly simplified. if (!LatchBlock) return nullptr; BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator()); assert(BI && "expected exit branch"); return dyn_cast<ICmpInst>(BI->getCondition()); } /// needsLFTR - LinearFunctionTestReplace policy. Return true unless we can show /// that the current exit test is already sufficiently canonical. static bool needsLFTR(Loop *L, DominatorTree *DT) { // Do LFTR to simplify the exit condition to an ICMP. ICmpInst *Cond = getLoopTest(L); if (!Cond) return true; // Do LFTR to simplify the exit ICMP to EQ/NE ICmpInst::Predicate Pred = Cond->getPredicate(); if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ) return true; // Look for a loop invariant RHS Value *LHS = Cond->getOperand(0); Value *RHS = Cond->getOperand(1); if (!isLoopInvariant(RHS, L, DT)) { if (!isLoopInvariant(LHS, L, DT)) return true; std::swap(LHS, RHS); } // Look for a simple IV counter LHS PHINode *Phi = dyn_cast<PHINode>(LHS); if (!Phi) Phi = getLoopPhiForCounter(LHS, L, DT); if (!Phi) return true; // Do LFTR if PHI node is defined in the loop, but is *not* a counter. int Idx = Phi->getBasicBlockIndex(L->getLoopLatch()); if (Idx < 0) return true; // Do LFTR if the exit condition's IV is *not* a simple counter. Value *IncV = Phi->getIncomingValue(Idx); return Phi != getLoopPhiForCounter(IncV, L, DT); } /// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils /// down to checking that all operands are constant and listing instructions /// that may hide undef. static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl<Value*> &Visited, unsigned Depth) { if (isa<Constant>(V)) return !isa<UndefValue>(V); if (Depth >= 6) return false; // Conservatively handle non-constant non-instructions. For example, Arguments // may be undef. Instruction *I = dyn_cast<Instruction>(V); if (!I) return false; // Load and return values may be undef. if(I->mayReadFromMemory() || isa<CallInst>(I) || isa<InvokeInst>(I)) return false; // Optimistically handle other instructions. for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) { if (!Visited.insert(*OI).second) continue; if (!hasConcreteDefImpl(*OI, Visited, Depth+1)) return false; } return true; } /// Return true if the given value is concrete. We must prove that undef can /// never reach it. /// /// TODO: If we decide that this is a good approach to checking for undef, we /// may factor it into a common location. static bool hasConcreteDef(Value *V) { SmallPtrSet<Value*, 8> Visited; Visited.insert(V); return hasConcreteDefImpl(V, Visited, 0); } /// AlmostDeadIV - Return true if this IV has any uses other than the (soon to /// be rewritten) loop exit test. static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) { int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); Value *IncV = Phi->getIncomingValue(LatchIdx); for (User *U : Phi->users()) if (U != Cond && U != IncV) return false; for (User *U : IncV->users()) if (U != Cond && U != Phi) return false; return true; } /// FindLoopCounter - Find an affine IV in canonical form. /// /// BECount may be an i8* pointer type. The pointer difference is already /// valid count without scaling the address stride, so it remains a pointer /// expression as far as SCEV is concerned. /// /// Currently only valid for LFTR. See the comments on hasConcreteDef below. /// /// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount /// /// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride. /// This is difficult in general for SCEV because of potential overflow. But we /// could at least handle constant BECounts. static PHINode *FindLoopCounter(Loop *L, const SCEV *BECount, ScalarEvolution *SE, DominatorTree *DT) { uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType()); Value *Cond = cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition(); // Loop over all of the PHI nodes, looking for a simple counter. PHINode *BestPhi = nullptr; const SCEV *BestInit = nullptr; BasicBlock *LatchBlock = L->getLoopLatch(); assert(LatchBlock && "needsLFTR should guarantee a loop latch"); for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { PHINode *Phi = cast<PHINode>(I); if (!SE->isSCEVable(Phi->getType())) continue; // Avoid comparing an integer IV against a pointer Limit. if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy()) continue; const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi)); if (!AR || AR->getLoop() != L || !AR->isAffine()) continue; // AR may be a pointer type, while BECount is an integer type. // AR may be wider than BECount. With eq/ne tests overflow is immaterial. // AR may not be a narrower type, or we may never exit. uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType()); if (PhiWidth < BCWidth || !L->getHeader()->getModule()->getDataLayout().isLegalInteger(PhiWidth)) continue; const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); if (!Step || !Step->isOne()) continue; int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); Value *IncV = Phi->getIncomingValue(LatchIdx); if (getLoopPhiForCounter(IncV, L, DT) != Phi) continue; // Avoid reusing a potentially undef value to compute other values that may // have originally had a concrete definition. if (!hasConcreteDef(Phi)) { // We explicitly allow unknown phis as long as they are already used by // the loop test. In this case we assume that performing LFTR could not // increase the number of undef users. if (ICmpInst *Cond = getLoopTest(L)) { if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT) && Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)) { continue; } } } const SCEV *Init = AR->getStart(); if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) { // Don't force a live loop counter if another IV can be used. if (AlmostDeadIV(Phi, LatchBlock, Cond)) continue; // Prefer to count-from-zero. This is a more "canonical" counter form. It // also prefers integer to pointer IVs. if (BestInit->isZero() != Init->isZero()) { if (BestInit->isZero()) continue; } // If two IVs both count from zero or both count from nonzero then the // narrower is likely a dead phi that has been widened. Use the wider phi // to allow the other to be eliminated. else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType())) continue; } BestPhi = Phi; BestInit = Init; } return BestPhi; } /// genLoopLimit - Help LinearFunctionTestReplace by generating a value that /// holds the RHS of the new loop test. static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L, SCEVExpander &Rewriter, ScalarEvolution *SE) { const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter"); const SCEV *IVInit = AR->getStart(); // IVInit may be a pointer while IVCount is an integer when FindLoopCounter // finds a valid pointer IV. Sign extend BECount in order to materialize a // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing // the existing GEPs whenever possible. if (IndVar->getType()->isPointerTy() && !IVCount->getType()->isPointerTy()) { // IVOffset will be the new GEP offset that is interpreted by GEP as a // signed value. IVCount on the other hand represents the loop trip count, // which is an unsigned value. FindLoopCounter only allows induction // variables that have a positive unit stride of one. This means we don't // have to handle the case of negative offsets (yet) and just need to zero // extend IVCount. Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType()); const SCEV *IVOffset = SE->getTruncateOrZeroExtend(IVCount, OfsTy); // Expand the code for the iteration count. assert(SE->isLoopInvariant(IVOffset, L) && "Computed iteration count is not loop invariant!"); BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI); Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader()); assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter"); // We could handle pointer IVs other than i8*, but we need to compensate for // gep index scaling. See canExpandBackedgeTakenCount comments. assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()), cast<PointerType>(GEPBase->getType())->getElementType())->isOne() && "unit stride pointer IV must be i8*"); IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); return Builder.CreateGEP(nullptr, GEPBase, GEPOffset, "lftr.limit"); } else { // In any other case, convert both IVInit and IVCount to integers before // comparing. This may result in SCEV expension of pointers, but in practice // SCEV will fold the pointer arithmetic away as such: // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc). // // Valid Cases: (1) both integers is most common; (2) both may be pointers // for simple memset-style loops. // // IVInit integer and IVCount pointer would only occur if a canonical IV // were generated on top of case #2, which is not expected. const SCEV *IVLimit = nullptr; // For unit stride, IVCount = Start + BECount with 2's complement overflow. // For non-zero Start, compute IVCount here. if (AR->getStart()->isZero()) IVLimit = IVCount; else { assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride"); const SCEV *IVInit = AR->getStart(); // For integer IVs, truncate the IV before computing IVInit + BECount. if (SE->getTypeSizeInBits(IVInit->getType()) > SE->getTypeSizeInBits(IVCount->getType())) IVInit = SE->getTruncateExpr(IVInit, IVCount->getType()); IVLimit = SE->getAddExpr(IVInit, IVCount); } // Expand the code for the iteration count. BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); IRBuilder<> Builder(BI); assert(SE->isLoopInvariant(IVLimit, L) && "Computed iteration count is not loop invariant!"); // Ensure that we generate the same type as IndVar, or a smaller integer // type. In the presence of null pointer values, we have an integer type // SCEV expression (IVInit) for a pointer type IV value (IndVar). Type *LimitTy = IVCount->getType()->isPointerTy() ? IndVar->getType() : IVCount->getType(); return Rewriter.expandCodeFor(IVLimit, LimitTy, BI); } } /// LinearFunctionTestReplace - This method rewrites the exit condition of the /// loop to be a canonical != comparison against the incremented loop induction /// variable. This pass is able to rewrite the exit tests of any loop where the /// SCEV analysis can determine a loop-invariant trip count of the loop, which /// is actually a much broader range than just linear tests. Value *IndVarSimplify:: LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, PHINode *IndVar, SCEVExpander &Rewriter) { assert(canExpandBackedgeTakenCount(L, SE, Rewriter) && "precondition"); // Initialize CmpIndVar and IVCount to their preincremented values. Value *CmpIndVar = IndVar; const SCEV *IVCount = BackedgeTakenCount; // If the exiting block is the same as the backedge block, we prefer to // compare against the post-incremented value, otherwise we must compare // against the preincremented value. if (L->getExitingBlock() == L->getLoopLatch()) { // Add one to the "backedge-taken" count to get the trip count. // This addition may overflow, which is valid as long as the comparison is // truncated to BackedgeTakenCount->getType(). IVCount = SE->getAddExpr(BackedgeTakenCount, SE->getConstant(BackedgeTakenCount->getType(), 1)); // The BackedgeTaken expression contains the number of times that the // backedge branches to the loop header. This is one less than the // number of times the loop executes, so use the incremented indvar. CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock()); } Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE); assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy() && "genLoopLimit missed a cast"); // Insert a new icmp_ne or icmp_eq instruction before the branch. BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); ICmpInst::Predicate P; if (L->contains(BI->getSuccessor(0))) P = ICmpInst::ICMP_NE; else P = ICmpInst::ICMP_EQ; DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n" << " LHS:" << *CmpIndVar << '\n' << " op:\t" << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n" << " RHS:\t" << *ExitCnt << "\n" << " IVCount:\t" << *IVCount << "\n"); IRBuilder<> Builder(BI); // LFTR can ignore IV overflow and truncate to the width of // BECount. This avoids materializing the add(zext(add)) expression. unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType()); unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType()); if (CmpIndVarSize > ExitCntSize) { const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); const SCEV *ARStart = AR->getStart(); const SCEV *ARStep = AR->getStepRecurrence(*SE); // For constant IVCount, avoid truncation. if (isa<SCEVConstant>(ARStart) && isa<SCEVConstant>(IVCount)) { const APInt &Start = cast<SCEVConstant>(ARStart)->getValue()->getValue(); APInt Count = cast<SCEVConstant>(IVCount)->getValue()->getValue(); // Note that the post-inc value of BackedgeTakenCount may have overflowed // above such that IVCount is now zero. if (IVCount != BackedgeTakenCount && Count == 0) { Count = APInt::getMaxValue(Count.getBitWidth()).zext(CmpIndVarSize); ++Count; } else Count = Count.zext(CmpIndVarSize); APInt NewLimit; if (cast<SCEVConstant>(ARStep)->getValue()->isNegative()) NewLimit = Start - Count; else NewLimit = Start + Count; ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit); DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n"); } else { CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(), "lftr.wideiv"); } } Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond"); Value *OrigCond = BI->getCondition(); // It's tempting to use replaceAllUsesWith here to fully replace the old // comparison, but that's not immediately safe, since users of the old // comparison may not be dominated by the new comparison. Instead, just // update the branch to use the new comparison; in the common case this // will make old comparison dead. BI->setCondition(Cond); DeadInsts.push_back(OrigCond); ++NumLFTR; Changed = true; return Cond; } //===----------------------------------------------------------------------===// // SinkUnusedInvariants. A late subpass to cleanup loop preheaders. //===----------------------------------------------------------------------===// /// If there's a single exit block, sink any loop-invariant values that /// were defined in the preheader but not used inside the loop into the /// exit block to reduce register pressure in the loop. void IndVarSimplify::SinkUnusedInvariants(Loop *L) { BasicBlock *ExitBlock = L->getExitBlock(); if (!ExitBlock) return; BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) return; Instruction *InsertPt = ExitBlock->getFirstInsertionPt(); BasicBlock::iterator I = Preheader->getTerminator(); while (I != Preheader->begin()) { --I; // New instructions were inserted at the end of the preheader. if (isa<PHINode>(I)) break; // Don't move instructions which might have side effects, since the side // effects need to complete before instructions inside the loop. Also don't // move instructions which might read memory, since the loop may modify // memory. Note that it's okay if the instruction might have undefined // behavior: LoopSimplify guarantees that the preheader dominates the exit // block. if (I->mayHaveSideEffects() || I->mayReadFromMemory()) continue; // Skip debug info intrinsics. if (isa<DbgInfoIntrinsic>(I)) continue; // Skip landingpad instructions. if (isa<LandingPadInst>(I)) continue; // Don't sink alloca: we never want to sink static alloca's out of the // entry block, and correctly sinking dynamic alloca's requires // checks for stacksave/stackrestore intrinsics. // FIXME: Refactor this check somehow? if (isa<AllocaInst>(I)) continue; // Determine if there is a use in or before the loop (direct or // otherwise). bool UsedInLoop = false; for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); BasicBlock *UseBB = User->getParent(); if (PHINode *P = dyn_cast<PHINode>(User)) { unsigned i = PHINode::getIncomingValueNumForOperand(U.getOperandNo()); UseBB = P->getIncomingBlock(i); } if (UseBB == Preheader || L->contains(UseBB)) { UsedInLoop = true; break; } } // If there is, the def must remain in the preheader. if (UsedInLoop) continue; // Otherwise, sink it to the exit block. Instruction *ToMove = I; bool Done = false; if (I != Preheader->begin()) { // Skip debug info intrinsics. do { --I; } while (isa<DbgInfoIntrinsic>(I) && I != Preheader->begin()); if (isa<DbgInfoIntrinsic>(I) && I == Preheader->begin()) Done = true; } else { Done = true; } ToMove->moveBefore(InsertPt); if (Done) break; InsertPt = ToMove; } } //===----------------------------------------------------------------------===// // IndVarSimplify driver. Manage several subpasses of IV simplification. //===----------------------------------------------------------------------===// bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; // If LoopSimplify form is not available, stay out of trouble. Some notes: // - LSR currently only supports LoopSimplify-form loops. Indvars' // canonicalization can be a pessimization without LSR to "clean up" // afterwards. // - We depend on having a preheader; in particular, // Loop::getCanonicalInductionVariable only supports loops with preheaders, // and we're in trouble if we can't find the induction variable even when // we've manually inserted one. if (!L->isLoopSimplifyForm()) return false; LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); SE = &getAnalysis<ScalarEvolution>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); TLI = TLIP ? &TLIP->getTLI() : nullptr; auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>(); TTI = TTIP ? &TTIP->getTTI(*L->getHeader()->getParent()) : nullptr; const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); DeadInsts.clear(); Changed = false; // If there are any floating-point recurrences, attempt to // transform them to use integer recurrences. RewriteNonIntegerIVs(L); const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); // Create a rewriter object which we'll use to transform the code with. SCEVExpander Rewriter(*SE, DL, "indvars"); #ifndef NDEBUG Rewriter.setDebugType(DEBUG_TYPE); #endif // Eliminate redundant IV users. // // Simplification works best when run before other consumers of SCEV. We // attempt to avoid evaluating SCEVs for sign/zero extend operations until // other expressions involving loop IVs have been evaluated. This helps SCEV // set no-wrap flags before normalizing sign/zero extension. Rewriter.disableCanonicalMode(); SimplifyAndExtend(L, Rewriter, LPM); // Check to see if this loop has a computable loop-invariant execution count. // If so, this means that we can compute the final value of any expressions // that are recurrent in the loop, and substitute the exit values from the // loop into any instructions outside of the loop that use the final values of // the current expressions. // if (ReplaceExitValue != NeverRepl && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) RewriteLoopExitValues(L, Rewriter); // Eliminate redundant IV cycles. NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts); // If we have a trip count expression, rewrite the loop's exit condition // using it. We can currently only handle loops with a single exit. if (canExpandBackedgeTakenCount(L, SE, Rewriter) && needsLFTR(L, DT)) { PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT); if (IndVar) { // Check preconditions for proper SCEVExpander operation. SCEV does not // express SCEVExpander's dependencies, such as LoopSimplify. Instead any // pass that uses the SCEVExpander must do it. This does not work well for // loop passes because SCEVExpander makes assumptions about all loops, // while LoopPassManager only forces the current loop to be simplified. // // FIXME: SCEV expansion has no way to bail out, so the caller must // explicitly check any assumptions made by SCEV. Brittle. const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount); if (!AR || AR->getLoop()->getLoopPreheader()) (void)LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar, Rewriter); } } // Clear the rewriter cache, because values that are in the rewriter's cache // can be deleted in the loop below, causing the AssertingVH in the cache to // trigger. Rewriter.clear(); // Now that we're done iterating through lists, clean up any instructions // which are now dead. while (!DeadInsts.empty()) if (Instruction *Inst = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val())) RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); // The Rewriter may not be used from this point on. // Loop-invariant instructions in the preheader that aren't used in the // loop may be sunk below the loop to reduce register pressure. SinkUnusedInvariants(L); // Clean up dead instructions. Changed |= DeleteDeadPHIs(L->getHeader(), TLI); // Check a post-condition. assert(L->isLCSSAForm(*DT) && "Indvars did not leave the loop in lcssa form!"); #if 0 // HLSL Change Starts - option pending // Verify that LFTR, and any other change have not interfered with SCEV's // ability to compute trip count. #ifndef NDEBUG if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { SE->forgetLoop(L); const SCEV *NewBECount = SE->getBackedgeTakenCount(L); if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) < SE->getTypeSizeInBits(NewBECount->getType())) NewBECount = SE->getTruncateOrNoop(NewBECount, BackedgeTakenCount->getType()); else BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, NewBECount->getType()); assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV"); } #endif #endif // HLSL Change Ends - option pending return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilConditionalMem2Reg.cpp
//===- DxilConditionalMem2Reg.cpp - Mem2Reg that selectively promotes Allocas //----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/AssumptionCache.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/HLModule.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/Analysis/ValueTracking.h" using namespace llvm; using namespace hlsl; static bool ContainsFloatingPointType(Type *Ty) { if (Ty->isFloatingPointTy()) { return true; } else if (Ty->isArrayTy()) { return ContainsFloatingPointType(Ty->getArrayElementType()); } else if (Ty->isVectorTy()) { return ContainsFloatingPointType(Ty->getVectorElementType()); } else if (Ty->isStructTy()) { for (unsigned i = 0, NumStructElms = Ty->getStructNumElements(); i < NumStructElms; i++) { if (ContainsFloatingPointType(Ty->getStructElementType(i))) return true; } } return false; } static bool Mem2Reg(Function &F, DominatorTree &DT, AssumptionCache &AC) { BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function bool Changed = false; std::vector<AllocaInst *> Allocas; while (1) { Allocas.clear(); // Find allocas that are safe to promote, by looking at all instructions in // the entry node for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? if (isAllocaPromotable(AI) && (!HLModule::HasPreciseAttributeWithMetadata(AI) || !ContainsFloatingPointType(AI->getAllocatedType()))) Allocas.push_back(AI); if (Allocas.empty()) break; PromoteMemToReg(Allocas, DT, nullptr, &AC); Changed = true; } return Changed; } // // Special Mem2Reg pass that conditionally promotes or transforms Alloca's. // // Anything marked 'dx.precise', will not be promoted because precise markers // are not propagated to the dxil operations yet and will be lost if alloca // is removed right now. // // Precise Allocas of vectors get scalarized here. It's important we do that // before Scalarizer pass because promoting the allocas later than that will // produce vector phi's (disallowed by the validator), which need another // Scalarizer pass to clean up. // class DxilConditionalMem2Reg : public FunctionPass { public: static char ID; // Function overrides that resolve options when used for DxOpt void applyOptions(PassOptions O) override { GetPassOptionBool(O, "NoOpt", &NoOpt, false); } void dumpConfig(raw_ostream &OS) override { FunctionPass::dumpConfig(OS); OS << ",NoOpt=" << NoOpt; } bool NoOpt = false; explicit DxilConditionalMem2Reg(bool NoOpt = false) : FunctionPass(ID), NoOpt(NoOpt) { initializeDxilConditionalMem2RegPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<AssumptionCacheTracker>(); AU.setPreservesCFG(); } // Replace simple array allocas with individual scalar allocas. // Only handle if: // - All the alloca's users are geps // - The geps all have only constant indices // - The geps are indexing to just the scalar elements // bool SplitSimpleAllocas(llvm::Function &F) { llvm::SmallVector<AllocaInst *, 10> ScalarAllocas; if (F.empty()) return false; BasicBlock *Entry = &F.getEntryBlock(); bool Changed = false; LLVMContext &Ctx = F.getContext(); Module *M = F.getParent(); IRBuilder<> Builder(Ctx); DIBuilder DIB(*M); const DataLayout &DL = F.getParent()->getDataLayout(); for (Instruction *it = &Entry->back(); it != nullptr;) { Instruction *I = it; it = (it == &Entry->front()) ? nullptr : it->getPrevNode(); AllocaInst *AI = dyn_cast<AllocaInst>(I); if (!AI) continue; Type *AllocType = AI->getAllocatedType(); if (!AllocType->isArrayTy()) continue; Type *ArrayElemType = AllocType->getArrayElementType(); if (!ArrayElemType->isSingleValueType()) continue; unsigned MaxSize = 0; bool Giveup = false; for (User *U : AI->users()) { GEPOperator *Gep = dyn_cast<GEPOperator>(U); if (!Gep) { Giveup = true; break; } if (!Gep->hasAllConstantIndices()) { Giveup = true; break; } if (Gep->getNumIndices() != 2 || cast<ConstantInt>(Gep->getOperand(1))->getLimitedValue() != 0) { Giveup = true; break; } unsigned RequiredSize = 1 + cast<ConstantInt>(Gep->getOperand(2))->getLimitedValue(); if (RequiredSize > MaxSize) MaxSize = RequiredSize; } if (Giveup) continue; // Generate a scalar allocas for the corresponding GEPs. ScalarAllocas.clear(); ScalarAllocas.resize(MaxSize); for (auto it = AI->user_begin(); it != AI->user_end();) { User *U = *(it++); GetElementPtrInst *Gep = cast<GetElementPtrInst>(U); unsigned Index = cast<ConstantInt>(Gep->getOperand(2))->getLimitedValue(); AllocaInst *ScalarAlloca = ScalarAllocas[Index]; if (!ScalarAlloca) { Builder.SetInsertPoint(AI); ScalarAlloca = Builder.CreateAlloca(ArrayElemType); ScalarAlloca->setDebugLoc(AI->getDebugLoc()); hlsl::DxilMDHelper::CopyMetadata( *ScalarAlloca, *AI); // Propagate precise attributes, if any ScalarAllocas[Index] = ScalarAlloca; } Gep->replaceAllUsesWith(ScalarAlloca); Gep->eraseFromParent(); } // Rewrite any debug info insts. for (auto mdit = dxilutil::mdv_users_begin(AI); mdit != dxilutil::mdv_users_end(AI);) { User *U = *(mdit++); DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(U); if (!DI) continue; DIExpression *Expr = DI->getExpression(); unsigned ArrayLayoutOffsetInBits = 0; std::vector<DxilDIArrayDim> ArrayDims; const bool HasStrides = DxilMDHelper::GetVariableDebugLayout( DI, ArrayLayoutOffsetInBits, ArrayDims); const bool IsBitpiece = Expr->isBitPiece(); const uint64_t BaseBitpieceOffSet = IsBitpiece ? Expr->getBitPieceOffset() : 0; for (unsigned i = 0; i < ScalarAllocas.size(); i++) { AllocaInst *ScalarAlloca = ScalarAllocas[i]; if (!ScalarAlloca) { continue; } uint64_t BitpieceOffsetInBits = 0; const uint64_t BitpieceSizeInBits = DL.getTypeStoreSizeInBits(ArrayElemType); if (HasStrides) { BitpieceOffsetInBits = ArrayLayoutOffsetInBits; unsigned FragmentIndex = i; for (DxilDIArrayDim &ArrayDim : ArrayDims) { unsigned IndexIntoArray = FragmentIndex % ArrayDim.NumElements; BitpieceOffsetInBits += IndexIntoArray * ArrayDim.StrideInBits; FragmentIndex /= ArrayDim.NumElements; } } else { BitpieceOffsetInBits = BaseBitpieceOffSet + i * BitpieceSizeInBits; } uint64_t Operands[3] = {dwarf::DW_OP_bit_piece, BitpieceOffsetInBits, BitpieceSizeInBits}; DIB.insertDeclare(ScalarAlloca, DI->getVariable(), DIExpression::get(Ctx, Operands), DI->getDebugLoc(), DI); } // For each scalar alloca DI->eraseFromParent(); } // For each metadat user AI->eraseFromParent(); Changed = true; } // For each inst in the entry block return Changed; } // // Turns all allocas of vector types that are marked with 'dx.precise' // and turn them into scalars. For example: // // x = alloca <f32 x 4> !dx.precise // // becomes: // // x1 = alloca f32 !dx.precise // x2 = alloca f32 !dx.precise // x3 = alloca f32 !dx.precise // x4 = alloca f32 !dx.precise // // This function also replaces all stores and loads but leaves everything // else alone by generating insertelement and extractelement as appropriate. // static bool ScalarizePreciseVectorAlloca(Function &F) { BasicBlock *Entry = &*F.begin(); bool Changed = false; for (auto it = Entry->begin(); it != Entry->end();) { Instruction *I = &*(it++); AllocaInst *AI = dyn_cast<AllocaInst>(I); if (!AI || !AI->getAllocatedType()->isVectorTy()) continue; if (!HLModule::HasPreciseAttributeWithMetadata(AI)) continue; IRBuilder<> B(AI); VectorType *VTy = cast<VectorType>(AI->getAllocatedType()); Type *ScalarTy = VTy->getVectorElementType(); const unsigned VectorSize = VTy->getVectorNumElements(); SmallVector<AllocaInst *, 32> Elements; for (unsigned i = 0; i < VectorSize; i++) { AllocaInst *Elem = B.CreateAlloca(ScalarTy); hlsl::DxilMDHelper::CopyMetadata(*Elem, *AI); Elements.push_back(Elem); } for (auto it = AI->user_begin(); it != AI->user_end();) { User *U = *(it++); if (LoadInst *LI = dyn_cast<LoadInst>(U)) { B.SetInsertPoint(LI); Value *Vec = UndefValue::get(VTy); for (unsigned i = 0; i < VectorSize; i++) { LoadInst *Elem = B.CreateLoad(Elements[i]); hlsl::DxilMDHelper::CopyMetadata(*Elem, *LI); Vec = B.CreateInsertElement(Vec, Elem, i); } LI->replaceAllUsesWith(Vec); LI->eraseFromParent(); } else if (StoreInst *Store = dyn_cast<StoreInst>(U)) { B.SetInsertPoint(Store); Value *Vec = Store->getValueOperand(); for (unsigned i = 0; i < VectorSize; i++) { Value *Elem = B.CreateExtractElement(Vec, i); StoreInst *ElemStore = B.CreateStore(Elem, Elements[i]); hlsl::DxilMDHelper::CopyMetadata(*ElemStore, *Store); } Store->eraseFromParent(); } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { DXASSERT(onlyUsedByLifetimeMarkers(BCI), "expected bitcast to only be used by lifetime intrinsics"); for (auto BCIU = BCI->user_begin(), BCIE = BCI->user_end(); BCIU != BCIE;) { IntrinsicInst *II = cast<IntrinsicInst>(*(BCIU++)); II->eraseFromParent(); } BCI->eraseFromParent(); } else { llvm_unreachable( "Cannot handle non-store/load on precise vector allocas"); } } AI->eraseFromParent(); Changed = true; } return Changed; } struct StoreInfo { Value *V; unsigned Offset; }; static bool FindAllStores(Module &M, Value *V, SmallVectorImpl<StoreInfo> *Stores) { SmallVector<StoreInfo, 8> Worklist; std::set<Value *> Seen; auto Add = [&](Value *V, unsigned OffsetInBits) { if (Seen.insert(V).second) Worklist.push_back({V, OffsetInBits}); }; Add(V, 0); const DataLayout &DL = M.getDataLayout(); while (Worklist.size()) { auto Info = Worklist.pop_back_val(); auto *Elem = Info.V; if (auto GEP = dyn_cast<GEPOperator>(Elem)) { if (GEP->getNumIndices() != 2) continue; unsigned ElemSize = 0; Type *GEPPtrType = GEP->getPointerOperand()->getType(); Type *PtrElemType = GEPPtrType->getPointerElementType(); if (ArrayType *ArrayTy = dyn_cast<ArrayType>(PtrElemType)) { ElemSize = DL.getTypeAllocSizeInBits(ArrayTy->getElementType()); } else if (VectorType *VectorTy = dyn_cast<VectorType>(PtrElemType)) { ElemSize = DL.getTypeAllocSizeInBits(VectorTy->getElementType()); } else { return false; } unsigned OffsetInBits = 0; for (unsigned i = 0; i < GEP->getNumIndices(); i++) { auto IdxOp = dyn_cast<ConstantInt>(GEP->getOperand(i + 1)); if (!IdxOp) { return false; } uint64_t Idx = IdxOp->getLimitedValue(); if (i == 0) { if (Idx != 0) return false; } else { OffsetInBits = Idx * ElemSize; } } for (User *U : Elem->users()) Add(U, Info.Offset + OffsetInBits); } else if (auto *Store = dyn_cast<StoreInst>(Elem)) { Stores->push_back({Store, Info.Offset}); } } return true; } // Function to rewrite debug info for output argument. // Sometimes, normal local variables that get returned from functions get // rewritten as a pointer argument. // // Right now, we generally have a single dbg.declare for the Argument, but as // we lower it to storeOutput, the dbg.declare and the Argument both get // removed, leavning no debug info for the local variable. // // Solution here is to rewrite the dbg.declare as dbg.value's by finding all // the stores and writing a dbg.value immediately before the store. Fairly // conservative at the moment about what cases to rewrite (only scalars and // vectors, and arrays of scalars and vectors). // bool RewriteOutputArgsDebugInfo(Function &F) { bool Changed = false; Module *M = F.getParent(); DIBuilder DIB(*M); SmallVector<StoreInfo, 4> Stores; LLVMContext &Ctx = F.getContext(); for (Argument &Arg : F.args()) { if (!Arg.getType()->isPointerTy()) continue; Type *Ty = Arg.getType()->getPointerElementType(); bool IsSimpleType = Ty->isSingleValueType() || Ty->isVectorTy() || (Ty->isArrayTy() && (Ty->getArrayElementType()->isVectorTy() || Ty->getArrayElementType()->isSingleValueType())); if (!IsSimpleType) continue; Stores.clear(); for (User *U : Arg.users()) { if (!FindAllStores(*M, U, &Stores)) { Stores.clear(); break; } } if (Stores.empty()) continue; DbgDeclareInst *Declare = nullptr; if (auto *L = LocalAsMetadata::getIfExists(&Arg)) { if (auto *DINode = MetadataAsValue::getIfExists(Ctx, L)) { if (!DINode->user_empty() && std::next(DINode->user_begin()) == DINode->user_end()) { Declare = dyn_cast<DbgDeclareInst>(*DINode->user_begin()); } } } if (Declare) { DITypeIdentifierMap EmptyMap; DILocalVariable *Var = Declare->getVariable(); DIExpression *Expr = Declare->getExpression(); DIType *VarTy = Var->getType().resolve(EmptyMap); uint64_t VarSize = VarTy->getSizeInBits(); uint64_t Offset = 0; if (Expr->isBitPiece()) Offset = Expr->getBitPieceOffset(); for (auto &Info : Stores) { auto *Store = cast<StoreInst>(Info.V); auto Val = Store->getValueOperand(); auto Loc = Store->getDebugLoc(); auto &M = *F.getParent(); unsigned ValSize = M.getDataLayout().getTypeAllocSizeInBits(Val->getType()); DIExpression *NewExpr = nullptr; if (Offset || VarSize > ValSize) { uint64_t Elems[] = {dwarf::DW_OP_bit_piece, Offset + Info.Offset, ValSize}; NewExpr = DIExpression::get(Ctx, Elems); } else { NewExpr = DIExpression::get(Ctx, {}); } if (Loc->getScope()->getSubprogram() == Var->getScope()->getSubprogram()) DIB.insertDbgValueIntrinsic(Val, 0, Var, NewExpr, Loc, Store); } Declare->eraseFromParent(); Changed = true; } } return Changed; } bool runOnFunction(Function &F) override { DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); AssumptionCache *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); bool Changed = false; Changed |= RewriteOutputArgsDebugInfo(F); Changed |= dxilutil::DeleteDeadAllocas(F); Changed |= SplitSimpleAllocas(F); Changed |= ScalarizePreciseVectorAlloca(F); Changed |= Mem2Reg(F, *DT, *AC); return Changed; } }; char DxilConditionalMem2Reg::ID; Pass *llvm::createDxilConditionalMem2RegPass(bool NoOpt) { return new DxilConditionalMem2Reg(NoOpt); } INITIALIZE_PASS_BEGIN(DxilConditionalMem2Reg, "dxil-cond-mem2reg", "Dxil Conditional Mem2Reg", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_END(DxilConditionalMem2Reg, "dxil-cond-mem2reg", "Dxil Conditional Mem2Reg", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/EarlyCSE.cpp
//===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass performs a simple dominator tree walk that eliminates trivially // redundant instructions. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/EarlyCSE.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/ScopedHashTable.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/RecyclingAllocator.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include <deque> using namespace llvm; using namespace llvm::PatternMatch; #define DEBUG_TYPE "early-cse" STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); STATISTIC(NumCSE, "Number of instructions CSE'd"); STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); STATISTIC(NumCSECall, "Number of call instructions CSE'd"); STATISTIC(NumDSE, "Number of trivial dead stores removed"); //===----------------------------------------------------------------------===// // SimpleValue //===----------------------------------------------------------------------===// namespace { /// \brief Struct representing the available values in the scoped hash table. struct SimpleValue { Instruction *Inst; SimpleValue(Instruction *I) : Inst(I) { assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); } bool isSentinel() const { return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); } static bool canHandle(Instruction *Inst) { // This can only handle non-void readnone functions. if (CallInst *CI = dyn_cast<CallInst>(Inst)) return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); } }; } namespace llvm { template <> struct DenseMapInfo<SimpleValue> { static inline SimpleValue getEmptyKey() { return DenseMapInfo<Instruction *>::getEmptyKey(); } static inline SimpleValue getTombstoneKey() { return DenseMapInfo<Instruction *>::getTombstoneKey(); } static unsigned getHashValue(SimpleValue Val); static bool isEqual(SimpleValue LHS, SimpleValue RHS); }; } unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { Instruction *Inst = Val.Inst; // Hash in all of the operands as pointers. if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { Value *LHS = BinOp->getOperand(0); Value *RHS = BinOp->getOperand(1); if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) std::swap(LHS, RHS); if (isa<OverflowingBinaryOperator>(BinOp)) { // Hash the overflow behavior unsigned Overflow = BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap | BinOp->hasNoUnsignedWrap() * OverflowingBinaryOperator::NoUnsignedWrap; return hash_combine(BinOp->getOpcode(), Overflow, LHS, RHS); } return hash_combine(BinOp->getOpcode(), LHS, RHS); } if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { Value *LHS = CI->getOperand(0); Value *RHS = CI->getOperand(1); CmpInst::Predicate Pred = CI->getPredicate(); if (Inst->getOperand(0) > Inst->getOperand(1)) { std::swap(LHS, RHS); Pred = CI->getSwappedPredicate(); } return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); } if (CastInst *CI = dyn_cast<CastInst>(Inst)) return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) return hash_combine(EVI->getOpcode(), EVI->getOperand(0), hash_combine_range(EVI->idx_begin(), EVI->idx_end())); if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) return hash_combine(IVI->getOpcode(), IVI->getOperand(0), IVI->getOperand(1), hash_combine_range(IVI->idx_begin(), IVI->idx_end())); assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst)) && "Invalid/unknown instruction"); // Mix in the opcode. return hash_combine( Inst->getOpcode(), hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); } bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; if (LHS.isSentinel() || RHS.isSentinel()) return LHSI == RHSI; if (LHSI->getOpcode() != RHSI->getOpcode()) return false; if (LHSI->isIdenticalTo(RHSI)) return true; // If we're not strictly identical, we still might be a commutable instruction if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { if (!LHSBinOp->isCommutative()) return false; assert(isa<BinaryOperator>(RHSI) && "same opcode, but different instruction type?"); BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); // Check overflow attributes if (isa<OverflowingBinaryOperator>(LHSBinOp)) { assert(isa<OverflowingBinaryOperator>(RHSBinOp) && "same opcode, but different operator type?"); if (LHSBinOp->hasNoUnsignedWrap() != RHSBinOp->hasNoUnsignedWrap() || LHSBinOp->hasNoSignedWrap() != RHSBinOp->hasNoSignedWrap()) return false; } // Commuted equality return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); } if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { assert(isa<CmpInst>(RHSI) && "same opcode, but different instruction type?"); CmpInst *RHSCmp = cast<CmpInst>(RHSI); // Commuted equality return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); } return false; } //===----------------------------------------------------------------------===// // CallValue //===----------------------------------------------------------------------===// namespace { /// \brief Struct representing the available call values in the scoped hash /// table. struct CallValue { Instruction *Inst; CallValue(Instruction *I) : Inst(I) { assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); } bool isSentinel() const { return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); } static bool canHandle(Instruction *Inst) { // Don't value number anything that returns void. if (Inst->getType()->isVoidTy()) return false; CallInst *CI = dyn_cast<CallInst>(Inst); if (!CI || !CI->onlyReadsMemory()) return false; return true; } }; } namespace llvm { template <> struct DenseMapInfo<CallValue> { static inline CallValue getEmptyKey() { return DenseMapInfo<Instruction *>::getEmptyKey(); } static inline CallValue getTombstoneKey() { return DenseMapInfo<Instruction *>::getTombstoneKey(); } static unsigned getHashValue(CallValue Val); static bool isEqual(CallValue LHS, CallValue RHS); }; } unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { Instruction *Inst = Val.Inst; // Hash all of the operands as pointers and mix in the opcode. return hash_combine( Inst->getOpcode(), hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); } bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; if (LHS.isSentinel() || RHS.isSentinel()) return LHSI == RHSI; return LHSI->isIdenticalTo(RHSI); } //===----------------------------------------------------------------------===// // EarlyCSE implementation //===----------------------------------------------------------------------===// namespace { /// \brief A simple and fast domtree-based CSE pass. /// /// This pass does a simple depth-first walk over the dominator tree, /// eliminating trivially redundant instructions and using instsimplify to /// canonicalize things as it goes. It is intended to be fast and catch obvious /// cases so that instcombine and other passes are more effective. It is /// expected that a later pass of GVN will catch the interesting/hard cases. class EarlyCSE { public: Function &F; const TargetLibraryInfo &TLI; const TargetTransformInfo &TTI; DominatorTree &DT; AssumptionCache &AC; typedef RecyclingAllocator< BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy; typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, AllocatorTy> ScopedHTType; /// \brief A scoped hash table of the current values of all of our simple /// scalar expressions. /// /// As we walk down the domtree, we look to see if instructions are in this: /// if so, we replace them with what we find, otherwise we insert them so /// that dominated values can succeed in their lookup. ScopedHTType AvailableValues; /// \brief A scoped hash table of the current values of loads. /// /// This allows us to get efficient access to dominating loads when we have /// a fully redundant load. In addition to the most recent load, we keep /// track of a generation count of the read, which is compared against the /// current generation count. The current generation count is incremented /// after every possibly writing memory operation, which ensures that we only /// CSE loads with other loads that have no intervening store. typedef RecyclingAllocator< BumpPtrAllocator, ScopedHashTableVal<Value *, std::pair<Value *, unsigned>>> LoadMapAllocator; typedef ScopedHashTable<Value *, std::pair<Value *, unsigned>, DenseMapInfo<Value *>, LoadMapAllocator> LoadHTType; LoadHTType AvailableLoads; /// \brief A scoped hash table of the current values of read-only call /// values. /// /// It uses the same generation count as loads. typedef ScopedHashTable<CallValue, std::pair<Value *, unsigned>> CallHTType; CallHTType AvailableCalls; /// \brief This is the current generation of the memory value. unsigned CurrentGeneration; /// \brief Set up the EarlyCSE runner for a particular function. EarlyCSE(Function &F, const TargetLibraryInfo &TLI, const TargetTransformInfo &TTI, DominatorTree &DT, AssumptionCache &AC) : F(F), TLI(TLI), TTI(TTI), DT(DT), AC(AC), CurrentGeneration(0) {} bool run(); private: // Almost a POD, but needs to call the constructors for the scoped hash // tables so that a new scope gets pushed on. These are RAII so that the // scope gets popped when the NodeScope is destroyed. class NodeScope { public: NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, CallHTType &AvailableCalls) : Scope(AvailableValues), LoadScope(AvailableLoads), CallScope(AvailableCalls) {} private: NodeScope(const NodeScope &) = delete; void operator=(const NodeScope &) = delete; ScopedHTType::ScopeTy Scope; LoadHTType::ScopeTy LoadScope; CallHTType::ScopeTy CallScope; }; // Contains all the needed information to create a stack for doing a depth // first tranversal of the tree. This includes scopes for values, loads, and // calls as well as the generation. There is a child iterator so that the // children do not need to be store spearately. class StackNode { public: StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n, DomTreeNode::iterator child, DomTreeNode::iterator end) : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls), Processed(false) {} // Accessors. unsigned currentGeneration() { return CurrentGeneration; } unsigned childGeneration() { return ChildGeneration; } void childGeneration(unsigned generation) { ChildGeneration = generation; } DomTreeNode *node() { return Node; } DomTreeNode::iterator childIter() { return ChildIter; } DomTreeNode *nextChild() { DomTreeNode *child = *ChildIter; ++ChildIter; return child; } DomTreeNode::iterator end() { return EndIter; } bool isProcessed() { return Processed; } void process() { Processed = true; } private: StackNode(const StackNode &) = delete; void operator=(const StackNode &) = delete; // Members. unsigned CurrentGeneration; unsigned ChildGeneration; DomTreeNode *Node; DomTreeNode::iterator ChildIter; DomTreeNode::iterator EndIter; NodeScope Scopes; bool Processed; }; /// \brief Wrapper class to handle memory instructions, including loads, /// stores and intrinsic loads and stores defined by the target. class ParseMemoryInst { public: ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) : Load(false), Store(false), Vol(false), MayReadFromMemory(false), MayWriteToMemory(false), MatchingId(-1), Ptr(nullptr) { MayReadFromMemory = Inst->mayReadFromMemory(); MayWriteToMemory = Inst->mayWriteToMemory(); if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { MemIntrinsicInfo Info; if (!TTI.getTgtMemIntrinsic(II, Info)) return; if (Info.NumMemRefs == 1) { Store = Info.WriteMem; Load = Info.ReadMem; MatchingId = Info.MatchingId; MayReadFromMemory = Info.ReadMem; MayWriteToMemory = Info.WriteMem; Vol = Info.Vol; Ptr = Info.PtrVal; } } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { Load = true; Vol = !LI->isSimple(); Ptr = LI->getPointerOperand(); } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { Store = true; Vol = !SI->isSimple(); Ptr = SI->getPointerOperand(); } } bool isLoad() { return Load; } bool isStore() { return Store; } bool isVolatile() { return Vol; } bool isMatchingMemLoc(const ParseMemoryInst &Inst) { return Ptr == Inst.Ptr && MatchingId == Inst.MatchingId; } bool isValid() { return Ptr != nullptr; } int getMatchingId() { return MatchingId; } Value *getPtr() { return Ptr; } bool mayReadFromMemory() { return MayReadFromMemory; } bool mayWriteToMemory() { return MayWriteToMemory; } private: bool Load; bool Store; bool Vol; bool MayReadFromMemory; bool MayWriteToMemory; // For regular (non-intrinsic) loads/stores, this is set to -1. For // intrinsic loads/stores, the id is retrieved from the corresponding // field in the MemIntrinsicInfo structure. That field contains // non-negative values only. int MatchingId; Value *Ptr; }; bool processNode(DomTreeNode *Node); Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) return LI; else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) return SI->getValueOperand(); assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), ExpectedType); } }; } bool EarlyCSE::processNode(DomTreeNode *Node) { BasicBlock *BB = Node->getBlock(); // If this block has a single predecessor, then the predecessor is the parent // of the domtree node and all of the live out memory values are still current // in this block. If this block has multiple predecessors, then they could // have invalidated the live-out memory values of our parent value. For now, // just be conservative and invalidate memory if this block has multiple // predecessors. if (!BB->getSinglePredecessor()) ++CurrentGeneration; // If this node has a single predecessor which ends in a conditional branch, // we can infer the value of the branch condition given that we took this // path. We need the single predeccesor to ensure there's not another path // which reaches this block where the condition might hold a different // value. Since we're adding this to the scoped hash table (like any other // def), it will have been popped if we encounter a future merge block. if (BasicBlock *Pred = BB->getSinglePredecessor()) if (auto *BI = dyn_cast<BranchInst>(Pred->getTerminator())) if (BI->isConditional()) if (auto *CondInst = dyn_cast<Instruction>(BI->getCondition())) if (SimpleValue::canHandle(CondInst)) { assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB); auto *ConditionalConstant = (BI->getSuccessor(0) == BB) ? ConstantInt::getTrue(BB->getContext()) : ConstantInt::getFalse(BB->getContext()); AvailableValues.insert(CondInst, ConditionalConstant); DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" << CondInst->getName() << "' as " << *ConditionalConstant << " in " << BB->getName() << "\n"); // Replace all dominated uses with the known value replaceDominatedUsesWith(CondInst, ConditionalConstant, DT, BasicBlockEdge(Pred, BB)); } /// LastStore - Keep track of the last non-volatile store that we saw... for /// as long as there in no instruction that reads memory. If we see a store /// to the same location, we delete the dead store. This zaps trivial dead /// stores which can occur in bitfield code among other things. Instruction *LastStore = nullptr; bool Changed = false; const DataLayout &DL = BB->getModule()->getDataLayout(); // See if any instructions in the block can be eliminated. If so, do it. If // not, add them to AvailableValues. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { Instruction *Inst = I++; // Dead instructions should just be removed. if (isInstructionTriviallyDead(Inst, &TLI)) { DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); Inst->eraseFromParent(); Changed = true; ++NumSimplify; continue; } // Skip assume intrinsics, they don't really have side effects (although // they're marked as such to ensure preservation of control dependencies), // and this pass will not disturb any of the assumption's control // dependencies. if (match(Inst, m_Intrinsic<Intrinsic::assume>())) { DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); continue; } // If the instruction can be simplified (e.g. X+0 = X) then replace it with // its simpler value. if (Value *V = SimplifyInstruction(Inst, DL, &TLI, &DT, &AC)) { DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); Inst->replaceAllUsesWith(V); Inst->eraseFromParent(); Changed = true; ++NumSimplify; continue; } // If this is a simple instruction that we can value number, process it. if (SimpleValue::canHandle(Inst)) { // See if the instruction has an available value. If so, use it. if (Value *V = AvailableValues.lookup(Inst)) { DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); Inst->replaceAllUsesWith(V); Inst->eraseFromParent(); Changed = true; ++NumCSE; continue; } // Otherwise, just remember that this value is available. AvailableValues.insert(Inst, Inst); continue; } ParseMemoryInst MemInst(Inst, TTI); // If this is a non-volatile load, process it. if (MemInst.isValid() && MemInst.isLoad()) { // Ignore volatile loads. if (MemInst.isVolatile()) { LastStore = nullptr; // Don't CSE across synchronization boundaries. if (Inst->mayWriteToMemory()) ++CurrentGeneration; continue; } // If we have an available version of this load, and if it is the right // generation, replace this instruction. std::pair<Value *, unsigned> InVal = AvailableLoads.lookup(MemInst.getPtr()); if (InVal.first != nullptr && InVal.second == CurrentGeneration) { Value *Op = getOrCreateResult(InVal.first, Inst->getType()); if (Op != nullptr) { DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst << " to: " << *InVal.first << '\n'); if (!Inst->use_empty()) Inst->replaceAllUsesWith(Op); Inst->eraseFromParent(); Changed = true; ++NumCSELoad; continue; } } // Otherwise, remember that we have this instruction. AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>( Inst, CurrentGeneration)); LastStore = nullptr; continue; } // If this instruction may read from memory, forget LastStore. // Load/store intrinsics will indicate both a read and a write to // memory. The target may override this (e.g. so that a store intrinsic // does not read from memory, and thus will be treated the same as a // regular store for commoning purposes). if (Inst->mayReadFromMemory() && !(MemInst.isValid() && !MemInst.mayReadFromMemory())) LastStore = nullptr; // If this is a read-only call, process it. if (CallValue::canHandle(Inst)) { // If we have an available version of this call, and if it is the right // generation, replace this instruction. std::pair<Value *, unsigned> InVal = AvailableCalls.lookup(Inst); if (InVal.first != nullptr && InVal.second == CurrentGeneration) { DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst << " to: " << *InVal.first << '\n'); if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first); Inst->eraseFromParent(); Changed = true; ++NumCSECall; continue; } // Otherwise, remember that we have this instruction. AvailableCalls.insert( Inst, std::pair<Value *, unsigned>(Inst, CurrentGeneration)); continue; } // Okay, this isn't something we can CSE at all. Check to see if it is // something that could modify memory. If so, our available memory values // cannot be used so bump the generation count. if (Inst->mayWriteToMemory()) { ++CurrentGeneration; if (MemInst.isValid() && MemInst.isStore()) { // We do a trivial form of DSE if there are two stores to the same // location with no intervening loads. Delete the earlier store. if (LastStore) { ParseMemoryInst LastStoreMemInst(LastStore, TTI); if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore << " due to: " << *Inst << '\n'); LastStore->eraseFromParent(); Changed = true; ++NumDSE; LastStore = nullptr; } // fallthrough - we can exploit information about this store } // Okay, we just invalidated anything we knew about loaded values. Try // to salvage *something* by remembering that the stored value is a live // version of the pointer. It is safe to forward from volatile stores // to non-volatile loads, so we don't have to check for volatility of // the store. AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>( Inst, CurrentGeneration)); // Remember that this was the last store we saw for DSE. if (!MemInst.isVolatile()) LastStore = Inst; } } } return Changed; } bool EarlyCSE::run() { // Note, deque is being used here because there is significant performance // gains over vector when the container becomes very large due to the // specific access patterns. For more information see the mailing list // discussion on this: // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html std::deque<StackNode *> nodesToProcess; bool Changed = false; // Process the root node. nodesToProcess.push_back(new StackNode( AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration, DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end())); // Save the current generation. unsigned LiveOutGeneration = CurrentGeneration; // Process the stack. while (!nodesToProcess.empty()) { // Grab the first item off the stack. Set the current generation, remove // the node from the stack, and process it. StackNode *NodeToProcess = nodesToProcess.back(); // Initialize class members. CurrentGeneration = NodeToProcess->currentGeneration(); // Check if the node needs to be processed. if (!NodeToProcess->isProcessed()) { // Process the node. Changed |= processNode(NodeToProcess->node()); NodeToProcess->childGeneration(CurrentGeneration); NodeToProcess->process(); } else if (NodeToProcess->childIter() != NodeToProcess->end()) { // Push the next child onto the stack. DomTreeNode *child = NodeToProcess->nextChild(); nodesToProcess.push_back( new StackNode(AvailableValues, AvailableLoads, AvailableCalls, NodeToProcess->childGeneration(), child, child->begin(), child->end())); } else { // It has been processed, and there are no more children to process, // so delete it and pop it off the stack. delete NodeToProcess; nodesToProcess.pop_back(); } } // while (!nodes...) // Reset the current generation. CurrentGeneration = LiveOutGeneration; return Changed; } PreservedAnalyses EarlyCSEPass::run(Function &F, AnalysisManager<Function> *AM) { auto &TLI = AM->getResult<TargetLibraryAnalysis>(F); auto &TTI = AM->getResult<TargetIRAnalysis>(F); auto &DT = AM->getResult<DominatorTreeAnalysis>(F); auto &AC = AM->getResult<AssumptionAnalysis>(F); EarlyCSE CSE(F, TLI, TTI, DT, AC); if (!CSE.run()) return PreservedAnalyses::all(); // CSE preserves the dominator tree because it doesn't mutate the CFG. // FIXME: Bundle this with other CFG-preservation. PreservedAnalyses PA; PA.preserve<DominatorTreeAnalysis>(); return PA; } namespace { /// \brief A simple and fast domtree-based CSE pass. /// /// This pass does a simple depth-first walk over the dominator tree, /// eliminating trivially redundant instructions and using instsimplify to /// canonicalize things as it goes. It is intended to be fast and catch obvious /// cases so that instcombine and other passes are more effective. It is /// expected that a later pass of GVN will catch the interesting/hard cases. class EarlyCSELegacyPass : public FunctionPass { public: static char ID; EarlyCSELegacyPass() : FunctionPass(ID) { initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override { if (skipOptnoneFunction(F)) return false; auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); EarlyCSE CSE(F, TLI, TTI, DT, AC); return CSE.run(); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); AU.setPreservesCFG(); } }; } char EarlyCSELegacyPass::ID = 0; FunctionPass *llvm::createEarlyCSEPass() { return new EarlyCSELegacyPass(); } INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LowerTypePasses.cpp
//===- LowerTypePasses.cpp ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLOperations.h" #include "dxc/HlslIntrinsicOp.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" #include <vector> using namespace llvm; using namespace hlsl; static ArrayType *CreateNestArrayTy(Type *FinalEltTy, ArrayRef<ArrayType *> nestArrayTys) { Type *newAT = FinalEltTy; for (auto ArrayTy = nestArrayTys.rbegin(), E = nestArrayTys.rend(); ArrayTy != E; ++ArrayTy) newAT = ArrayType::get(newAT, (*ArrayTy)->getNumElements()); return cast<ArrayType>(newAT); } //===----------------------------------------------------------------------===// // Lower one type to another type. //===----------------------------------------------------------------------===// namespace { class LowerTypePass : public ModulePass { public: explicit LowerTypePass(char &ID) : ModulePass(ID) {} bool runOnModule(Module &M) override; private: bool runOnFunction(Function &F, bool HasDbgInfo); AllocaInst *lowerAlloca(AllocaInst *A); GlobalVariable *lowerInternalGlobal(GlobalVariable *GV); protected: virtual bool needToLower(Value *V) = 0; virtual void lowerUseWithNewValue(Value *V, Value *NewV) = 0; virtual Type *lowerType(Type *Ty) = 0; virtual Constant *lowerInitVal(Constant *InitVal, Type *NewTy) = 0; virtual StringRef getGlobalPrefix() = 0; virtual void initialize(Module &M){}; }; AllocaInst *LowerTypePass::lowerAlloca(AllocaInst *A) { IRBuilder<> AllocaBuilder(A); Type *NewTy = lowerType(A->getAllocatedType()); AllocaInst *NewA = AllocaBuilder.CreateAlloca(NewTy); NewA->setAlignment(A->getAlignment()); return NewA; } GlobalVariable *LowerTypePass::lowerInternalGlobal(GlobalVariable *GV) { Type *NewTy = lowerType(GV->getType()->getPointerElementType()); // So set init val to undef. Constant *InitVal = UndefValue::get(NewTy); if (GV->hasInitializer()) { Constant *OldInitVal = GV->getInitializer(); if (isa<ConstantAggregateZero>(OldInitVal)) InitVal = ConstantAggregateZero::get(NewTy); else if (!isa<UndefValue>(OldInitVal)) { InitVal = lowerInitVal(OldInitVal, NewTy); } } bool isConst = GV->isConstant(); GlobalVariable::ThreadLocalMode TLMode = GV->getThreadLocalMode(); unsigned AddressSpace = GV->getType()->getAddressSpace(); GlobalValue::LinkageTypes linkage = GV->getLinkage(); Module *M = GV->getParent(); GlobalVariable *NewGV = new llvm::GlobalVariable( *M, NewTy, /*IsConstant*/ isConst, linkage, /*InitVal*/ InitVal, GV->getName() + getGlobalPrefix(), /*InsertBefore*/ nullptr, TLMode, AddressSpace); NewGV->setAlignment(GV->getAlignment()); return NewGV; } bool LowerTypePass::runOnFunction(Function &F, bool HasDbgInfo) { std::vector<AllocaInst *> workList; // Scan the entry basic block, adding allocas to the worklist. BasicBlock &BB = F.getEntryBlock(); for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) { if (!isa<AllocaInst>(I)) continue; AllocaInst *A = cast<AllocaInst>(I); if (needToLower(A)) workList.emplace_back(A); } LLVMContext &Context = F.getContext(); for (AllocaInst *A : workList) { AllocaInst *NewA = lowerAlloca(A); if (HasDbgInfo) { // Migrate debug info. DbgDeclareInst *DDI = llvm::FindAllocaDbgDeclare(A); if (DDI) DDI->setOperand( 0, MetadataAsValue::get(Context, LocalAsMetadata::get(NewA))); } // Replace users. lowerUseWithNewValue(A, NewA); // Remove alloca. A->eraseFromParent(); } return true; } bool LowerTypePass::runOnModule(Module &M) { initialize(M); // Load up debug information, to cross-reference values and the instructions // used to load them. bool HasDbgInfo = llvm::hasDebugInfo(M); llvm::DebugInfoFinder Finder; if (HasDbgInfo) { Finder.processModule(M); } for (Function &F : M.functions()) { if (F.isDeclaration()) continue; runOnFunction(F, HasDbgInfo); } // Work on internal global. std::vector<GlobalVariable *> vecGVs; for (GlobalVariable &GV : M.globals()) { if (dxilutil::IsStaticGlobal(&GV) || dxilutil::IsSharedMemoryGlobal(&GV)) { if (needToLower(&GV) && !GV.user_empty()) vecGVs.emplace_back(&GV); } } for (GlobalVariable *GV : vecGVs) { GlobalVariable *NewGV = lowerInternalGlobal(GV); // Add debug info. if (HasDbgInfo) { HLModule::UpdateGlobalVariableDebugInfo(GV, Finder, NewGV); } // Replace users. GV->removeDeadConstantUsers(); lowerUseWithNewValue(GV, NewGV); // Remove GV. GV->removeDeadConstantUsers(); GV->eraseFromParent(); } return true; } } // namespace //===----------------------------------------------------------------------===// // DynamicIndexingVector to Array. //===----------------------------------------------------------------------===// namespace { class DynamicIndexingVectorToArray : public LowerTypePass { bool ReplaceAllVectors; public: explicit DynamicIndexingVectorToArray(bool ReplaceAll = false) : LowerTypePass(ID), ReplaceAllVectors(ReplaceAll) {} static char ID; // Pass identification, replacement for typeid void applyOptions(PassOptions O) override; void dumpConfig(raw_ostream &OS) override; protected: bool needToLower(Value *V) override; void lowerUseWithNewValue(Value *V, Value *NewV) override; Type *lowerType(Type *Ty) override; Constant *lowerInitVal(Constant *InitVal, Type *NewTy) override; StringRef getGlobalPrefix() override { return ".v"; } private: bool HasVectorDynamicIndexing(Value *V); void ReplaceVecGEP(Value *GEP, ArrayRef<Value *> idxList, Value *A, IRBuilder<> &Builder); void ReplaceVecArrayGEP(Value *GEP, ArrayRef<Value *> idxList, Value *A, IRBuilder<> &Builder); void ReplaceVectorWithArray(Value *Vec, Value *Array); void ReplaceVectorArrayWithArray(Value *VecArray, Value *Array); void ReplaceStaticIndexingOnVector(Value *V); void ReplaceAddrSpaceCast(ConstantExpr *CE, Value *A, IRBuilder<> &Builder); }; void DynamicIndexingVectorToArray::applyOptions(PassOptions O) { GetPassOptionBool(O, "ReplaceAllVectors", &ReplaceAllVectors, ReplaceAllVectors); } void DynamicIndexingVectorToArray::dumpConfig(raw_ostream &OS) { ModulePass::dumpConfig(OS); OS << ",ReplaceAllVectors=" << ReplaceAllVectors; } void DynamicIndexingVectorToArray::ReplaceStaticIndexingOnVector(Value *V) { for (auto U = V->user_begin(), E = V->user_end(); U != E;) { Value *User = *(U++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { // Only work on element access for vector. if (GEP->getNumOperands() == 3) { auto Idx = GEP->idx_begin(); // Skip the pointer idx. Idx++; ConstantInt *constIdx = cast<ConstantInt>(Idx); // AllocaInst for Call user. AllocaInst *TmpAI = nullptr; for (auto GEPU = GEP->user_begin(), GEPE = GEP->user_end(); GEPU != GEPE;) { Instruction *GEPUser = cast<Instruction>(*(GEPU++)); IRBuilder<> Builder(GEPUser); if (LoadInst *ldInst = dyn_cast<LoadInst>(GEPUser)) { // Change // ld a->x // into // b = ld a // b.x Value *ldVal = Builder.CreateLoad(V); Value *Elt = Builder.CreateExtractElement(ldVal, constIdx); ldInst->replaceAllUsesWith(Elt); ldInst->eraseFromParent(); } else if (CallInst *CI = dyn_cast<CallInst>(GEPUser)) { // Change // call a->x // into // tmp = alloca // b = ld a // st b.x, tmp // call tmp // b = ld a // b.x = ld tmp // st b, a if (TmpAI == nullptr) { Type *Ty = GEP->getType()->getPointerElementType(); IRBuilder<> AllocaB(CI->getParent() ->getParent() ->getEntryBlock() .getFirstInsertionPt()); TmpAI = AllocaB.CreateAlloca(Ty); } Value *ldVal = Builder.CreateLoad(V); Value *Elt = Builder.CreateExtractElement(ldVal, constIdx); Builder.CreateStore(Elt, TmpAI); CI->replaceUsesOfWith(GEP, TmpAI); Builder.SetInsertPoint(CI->getNextNode()); Elt = Builder.CreateLoad(TmpAI); ldVal = Builder.CreateLoad(V); ldVal = Builder.CreateInsertElement(ldVal, Elt, constIdx); Builder.CreateStore(ldVal, V); } else { // Change // st val, a->x // into // tmp = ld a // tmp.x = val // st tmp, a // Must be store inst here. StoreInst *stInst = cast<StoreInst>(GEPUser); Value *val = stInst->getValueOperand(); Value *ldVal = Builder.CreateLoad(V); ldVal = Builder.CreateInsertElement(ldVal, val, constIdx); Builder.CreateStore(ldVal, V); stInst->eraseFromParent(); } } GEP->eraseFromParent(); } else if (GEP->getNumIndices() == 1) { Value *Idx = *GEP->idx_begin(); if (ConstantInt *C = dyn_cast<ConstantInt>(Idx)) { if (C->getLimitedValue() == 0) { GEP->replaceAllUsesWith(V); GEP->eraseFromParent(); } } } } } } bool DynamicIndexingVectorToArray::needToLower(Value *V) { Type *Ty = V->getType()->getPointerElementType(); if (dyn_cast<VectorType>(Ty)) { if (isa<GlobalVariable>(V) || ReplaceAllVectors) { return true; } // Don't lower local vector which only static indexing. if (HasVectorDynamicIndexing(V)) { return true; } else { // Change vector indexing with ld st. ReplaceStaticIndexingOnVector(V); return false; } } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { // Array must be replaced even without dynamic indexing to remove vector // type in dxil. // TODO: optimize static array index in later pass. Type *EltTy = dxilutil::GetArrayEltTy(AT); return isa<VectorType>(EltTy); } return false; } void DynamicIndexingVectorToArray::ReplaceVecGEP(Value *GEP, ArrayRef<Value *> idxList, Value *A, IRBuilder<> &Builder) { Value *newGEP = Builder.CreateGEP(A, idxList); if (GEP->getType()->getPointerElementType()->isVectorTy()) { ReplaceVectorWithArray(GEP, newGEP); } else { GEP->replaceAllUsesWith(newGEP); } } void DynamicIndexingVectorToArray::ReplaceAddrSpaceCast(ConstantExpr *CE, Value *A, IRBuilder<> &Builder) { // create new AddrSpaceCast. Value *NewAddrSpaceCast = Builder.CreateAddrSpaceCast( A, PointerType::get(A->getType()->getPointerElementType(), CE->getType()->getPointerAddressSpace())); ReplaceVectorWithArray(CE, NewAddrSpaceCast); } void DynamicIndexingVectorToArray::ReplaceVectorWithArray(Value *Vec, Value *A) { unsigned size = Vec->getType()->getPointerElementType()->getVectorNumElements(); for (auto U = Vec->user_begin(); U != Vec->user_end();) { User *User = (*U++); // GlobalVariable user. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(User)) { if (User->user_empty()) continue; if (GEPOperator *GEP = dyn_cast<GEPOperator>(User)) { IRBuilder<> Builder(Vec->getContext()); SmallVector<Value *, 4> idxList(GEP->idx_begin(), GEP->idx_end()); ReplaceVecGEP(GEP, idxList, A, Builder); continue; } else if (CE->getOpcode() == Instruction::AddrSpaceCast) { IRBuilder<> Builder(Vec->getContext()); ReplaceAddrSpaceCast(CE, A, Builder); continue; } DXASSERT(0, "not implemented yet"); } // Instrution user. Instruction *UserInst = cast<Instruction>(User); IRBuilder<> Builder(UserInst); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { SmallVector<Value *, 4> idxList(GEP->idx_begin(), GEP->idx_end()); ReplaceVecGEP(cast<GEPOperator>(GEP), idxList, A, Builder); GEP->eraseFromParent(); } else if (LoadInst *ldInst = dyn_cast<LoadInst>(User)) { // If ld whole struct, need to split the load. Value *newLd = UndefValue::get(ldInst->getType()); Value *zero = Builder.getInt32(0); unsigned align = ldInst->getAlignment(); for (unsigned i = 0; i < size; i++) { Value *idx = Builder.getInt32(i); Value *GEP = Builder.CreateInBoundsGEP(A, {zero, idx}); LoadInst *Elt = Builder.CreateLoad(GEP); Elt->setAlignment(align); newLd = Builder.CreateInsertElement(newLd, Elt, i); } ldInst->replaceAllUsesWith(newLd); ldInst->eraseFromParent(); } else if (StoreInst *stInst = dyn_cast<StoreInst>(User)) { Value *val = stInst->getValueOperand(); Value *zero = Builder.getInt32(0); unsigned align = stInst->getAlignment(); for (unsigned i = 0; i < size; i++) { Value *Elt = Builder.CreateExtractElement(val, i); Value *idx = Builder.getInt32(i); Value *GEP = Builder.CreateInBoundsGEP(A, {zero, idx}); StoreInst *EltSt = Builder.CreateStore(Elt, GEP); EltSt->setAlignment(align); } stInst->eraseFromParent(); } else if (BitCastInst *castInst = dyn_cast<BitCastInst>(User)) { DXASSERT(onlyUsedByLifetimeMarkers(castInst), "expected bitcast to only be used by lifetime intrinsics"); castInst->setOperand(0, A); } else { // Vector parameter should be lowered. // No function call should use vector. DXASSERT(0, "not implement yet"); } } } void DynamicIndexingVectorToArray::ReplaceVecArrayGEP(Value *GEP, ArrayRef<Value *> idxList, Value *A, IRBuilder<> &Builder) { Value *newGEP = Builder.CreateGEP(A, idxList); Type *Ty = GEP->getType()->getPointerElementType(); if (Ty->isVectorTy()) { ReplaceVectorWithArray(GEP, newGEP); } else if (Ty->isArrayTy()) { ReplaceVectorArrayWithArray(GEP, newGEP); } else { DXASSERT(Ty->isSingleValueType(), "must be vector subscript here"); GEP->replaceAllUsesWith(newGEP); } } void DynamicIndexingVectorToArray::ReplaceVectorArrayWithArray(Value *VA, Value *A) { for (auto U = VA->user_begin(); U != VA->user_end();) { User *User = *(U++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { IRBuilder<> Builder(GEP); SmallVector<Value *, 4> idxList(GEP->idx_begin(), GEP->idx_end()); ReplaceVecArrayGEP(GEP, idxList, A, Builder); GEP->eraseFromParent(); } else if (GEPOperator *GEPOp = dyn_cast<GEPOperator>(User)) { IRBuilder<> Builder(GEPOp->getContext()); SmallVector<Value *, 4> idxList(GEPOp->idx_begin(), GEPOp->idx_end()); ReplaceVecArrayGEP(GEPOp, idxList, A, Builder); } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { BCI->setOperand(0, A); } else if (auto *CI = dyn_cast<CallInst>(User)) { IRBuilder<> B(CI); auto *Cast = B.CreateBitCast(A, VA->getType()); CI->replaceUsesOfWith(VA, Cast); } else { DXASSERT(0, "Array pointer should only used by GEP"); } } } void DynamicIndexingVectorToArray::lowerUseWithNewValue(Value *V, Value *NewV) { Type *Ty = V->getType()->getPointerElementType(); // Replace V with NewV. if (Ty->isVectorTy()) { ReplaceVectorWithArray(V, NewV); } else { ReplaceVectorArrayWithArray(V, NewV); } } Type *DynamicIndexingVectorToArray::lowerType(Type *Ty) { if (VectorType *VT = dyn_cast<VectorType>(Ty)) { return ArrayType::get(VT->getElementType(), VT->getNumElements()); } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { SmallVector<ArrayType *, 4> nestArrayTys; nestArrayTys.emplace_back(AT); Type *EltTy = AT->getElementType(); // support multi level of array while (EltTy->isArrayTy()) { ArrayType *ElAT = cast<ArrayType>(EltTy); nestArrayTys.emplace_back(ElAT); EltTy = ElAT->getElementType(); } if (EltTy->isVectorTy()) { Type *vecAT = ArrayType::get(EltTy->getVectorElementType(), EltTy->getVectorNumElements()); return CreateNestArrayTy(vecAT, nestArrayTys); } return nullptr; } return nullptr; } Constant *DynamicIndexingVectorToArray::lowerInitVal(Constant *InitVal, Type *NewTy) { Type *VecTy = InitVal->getType(); ArrayType *ArrayTy = cast<ArrayType>(NewTy); if (VecTy->isVectorTy()) { SmallVector<Constant *, 4> Elts; for (unsigned i = 0; i < VecTy->getVectorNumElements(); i++) { Elts.emplace_back(InitVal->getAggregateElement(i)); } return ConstantArray::get(ArrayTy, Elts); } else { ArrayType *AT = cast<ArrayType>(VecTy); ArrayType *EltArrayTy = cast<ArrayType>(ArrayTy->getElementType()); SmallVector<Constant *, 4> Elts; for (unsigned i = 0; i < AT->getNumElements(); i++) { Constant *Elt = lowerInitVal(InitVal->getAggregateElement(i), EltArrayTy); Elts.emplace_back(Elt); } return ConstantArray::get(ArrayTy, Elts); } } bool DynamicIndexingVectorToArray::HasVectorDynamicIndexing(Value *V) { return dxilutil::HasDynamicIndexing(V); } } // namespace char DynamicIndexingVectorToArray::ID = 0; INITIALIZE_PASS(DynamicIndexingVectorToArray, "dynamic-vector-to-array", "Replace dynamic indexing vector with array", false, false) // Public interface to the DynamicIndexingVectorToArray pass ModulePass * llvm::createDynamicIndexingVectorToArrayPass(bool ReplaceAllVector) { return new DynamicIndexingVectorToArray(ReplaceAllVector); } //===----------------------------------------------------------------------===// // Flatten multi dim array into 1 dim. //===----------------------------------------------------------------------===// namespace { class MultiDimArrayToOneDimArray : public LowerTypePass { public: explicit MultiDimArrayToOneDimArray() : LowerTypePass(ID) {} static char ID; // Pass identification, replacement for typeid protected: bool needToLower(Value *V) override; void lowerUseWithNewValue(Value *V, Value *NewV) override; Type *lowerType(Type *Ty) override; Constant *lowerInitVal(Constant *InitVal, Type *NewTy) override; StringRef getGlobalPrefix() override { return ".1dim"; } bool isSafeToLowerArray(Value *V); }; // Recurse users, looking for any direct users of array or sub-array type, // other than lifetime markers: bool MultiDimArrayToOneDimArray::isSafeToLowerArray(Value *V) { if (!V->getType()->getPointerElementType()->isArrayTy()) return true; for (auto it = V->user_begin(); it != V->user_end();) { User *U = *it++; if (isa<BitCastOperator>(U)) { // Bitcast is ok because source type can be changed. continue; } else if (isa<GEPOperator>(U) || isa<AddrSpaceCastInst>(U) || isa<ConstantExpr>(U)) { if (!isSafeToLowerArray(U)) return false; } else { return false; } } return true; } bool MultiDimArrayToOneDimArray::needToLower(Value *V) { Type *Ty = V->getType()->getPointerElementType(); ArrayType *AT = dyn_cast<ArrayType>(Ty); if (!AT) return false; if (!isa<ArrayType>(AT->getElementType())) { return false; } else { // Merge all GEP. dxilutil::MergeGepUse(V); return isSafeToLowerArray(V); } } void ReplaceMultiDimGEP(User *GEP, Value *OneDim, IRBuilder<> &Builder) { gep_type_iterator GEPIt = gep_type_begin(GEP), E = gep_type_end(GEP); Value *PtrOffset = GEPIt.getOperand(); ++GEPIt; Value *ArrayIdx = GEPIt.getOperand(); ++GEPIt; Value *VecIdx = nullptr; SmallVector<Value *, 8> StructIdxs; for (; GEPIt != E; ++GEPIt) { if (GEPIt->isArrayTy()) { unsigned arraySize = GEPIt->getArrayNumElements(); Value *V = GEPIt.getOperand(); ArrayIdx = Builder.CreateMul(ArrayIdx, Builder.getInt32(arraySize)); ArrayIdx = Builder.CreateAdd(V, ArrayIdx); } else if (isa<StructType>(*GEPIt)) { // Replaces multi-dim array of struct, with single-dim array of struct StructIdxs.push_back(PtrOffset); StructIdxs.push_back(ArrayIdx); while (GEPIt != E) { StructIdxs.push_back(GEPIt.getOperand()); ++GEPIt; } break; } else { DXASSERT_NOMSG(isa<VectorType>(*GEPIt)); VecIdx = GEPIt.getOperand(); } } Value *NewGEP = nullptr; if (StructIdxs.size()) NewGEP = Builder.CreateGEP(OneDim, StructIdxs); else if (!VecIdx) NewGEP = Builder.CreateGEP(OneDim, {PtrOffset, ArrayIdx}); else NewGEP = Builder.CreateGEP(OneDim, {PtrOffset, ArrayIdx, VecIdx}); GEP->replaceAllUsesWith(NewGEP); } void MultiDimArrayToOneDimArray::lowerUseWithNewValue(Value *MultiDim, Value *OneDim) { LLVMContext &Context = MultiDim->getContext(); // All users should be element type. // Replace users of AI or GV. for (auto it = MultiDim->user_begin(); it != MultiDim->user_end();) { User *U = *(it++); if (U->user_empty()) continue; if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { BCI->setOperand(0, OneDim); continue; } if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { IRBuilder<> Builder(Context); if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { // NewGEP must be GEPOperator too. // No instruction will be build. ReplaceMultiDimGEP(U, OneDim, Builder); } else if (CE->getOpcode() == Instruction::AddrSpaceCast) { Value *NewAddrSpaceCast = Builder.CreateAddrSpaceCast( OneDim, PointerType::get(OneDim->getType()->getPointerElementType(), CE->getType()->getPointerAddressSpace())); lowerUseWithNewValue(CE, NewAddrSpaceCast); } else { DXASSERT(0, "not implemented"); } } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { IRBuilder<> Builder(GEP); ReplaceMultiDimGEP(U, OneDim, Builder); GEP->eraseFromParent(); } else { DXASSERT(0, "not implemented"); } } } Type *MultiDimArrayToOneDimArray::lowerType(Type *Ty) { ArrayType *AT = cast<ArrayType>(Ty); unsigned arraySize = AT->getNumElements(); Type *EltTy = AT->getElementType(); // support multi level of array while (EltTy->isArrayTy()) { ArrayType *ElAT = cast<ArrayType>(EltTy); arraySize *= ElAT->getNumElements(); EltTy = ElAT->getElementType(); } return ArrayType::get(EltTy, arraySize); } void FlattenMultiDimConstArray(Constant *V, std::vector<Constant *> &Elts) { if (!V->getType()->isArrayTy()) { Elts.emplace_back(V); } else { ArrayType *AT = cast<ArrayType>(V->getType()); for (unsigned i = 0; i < AT->getNumElements(); i++) { FlattenMultiDimConstArray(V->getAggregateElement(i), Elts); } } } Constant *MultiDimArrayToOneDimArray::lowerInitVal(Constant *InitVal, Type *NewTy) { if (InitVal) { // MultiDim array init should be done by store. if (isa<ConstantAggregateZero>(InitVal)) InitVal = ConstantAggregateZero::get(NewTy); else if (isa<UndefValue>(InitVal)) InitVal = UndefValue::get(NewTy); else { std::vector<Constant *> Elts; FlattenMultiDimConstArray(InitVal, Elts); InitVal = ConstantArray::get(cast<ArrayType>(NewTy), Elts); } } else { InitVal = UndefValue::get(NewTy); } return InitVal; } } // namespace char MultiDimArrayToOneDimArray::ID = 0; INITIALIZE_PASS(MultiDimArrayToOneDimArray, "multi-dim-one-dim", "Flatten multi-dim array into one-dim array", false, false) // Public interface to the SROA_Parameter_HLSL pass ModulePass *llvm::createMultiDimArrayToOneDimArrayPass() { return new MultiDimArrayToOneDimArray(); } //===----------------------------------------------------------------------===// // Lower resource into handle. //===----------------------------------------------------------------------===// namespace { class ResourceToHandle : public LowerTypePass { public: explicit ResourceToHandle() : LowerTypePass(ID) {} static char ID; // Pass identification, replacement for typeid protected: bool needToLower(Value *V) override; void lowerUseWithNewValue(Value *V, Value *NewV) override; Type *lowerType(Type *Ty) override; Constant *lowerInitVal(Constant *InitVal, Type *NewTy) override; StringRef getGlobalPrefix() override { return ".res"; } void initialize(Module &M) override; private: void ReplaceResourceWithHandle(Value *ResPtr, Value *HandlePtr); void ReplaceResourceGEPWithHandleGEP(Value *GEP, ArrayRef<Value *> idxList, Value *A, IRBuilder<> &Builder); void ReplaceResourceArrayWithHandleArray(Value *VA, Value *A); Type *m_HandleTy; HLModule *m_pHLM; bool m_bIsLib; }; void ResourceToHandle::initialize(Module &M) { DXASSERT(M.HasHLModule(), "require HLModule"); m_pHLM = &M.GetHLModule(); m_HandleTy = m_pHLM->GetOP()->GetHandleType(); m_bIsLib = m_pHLM->GetShaderModel()->IsLib(); } bool ResourceToHandle::needToLower(Value *V) { Type *Ty = V->getType()->getPointerElementType(); Ty = dxilutil::GetArrayEltTy(Ty); return (dxilutil::IsHLSLObjectType(Ty) && !HLModule::IsStreamOutputType(Ty)) && // Skip lib profile. !m_bIsLib; } Type *ResourceToHandle::lowerType(Type *Ty) { if ((dxilutil::IsHLSLObjectType(Ty) && !HLModule::IsStreamOutputType(Ty))) { return m_HandleTy; } ArrayType *AT = cast<ArrayType>(Ty); SmallVector<ArrayType *, 4> nestArrayTys; nestArrayTys.emplace_back(AT); Type *EltTy = AT->getElementType(); // support multi level of array while (EltTy->isArrayTy()) { ArrayType *ElAT = cast<ArrayType>(EltTy); nestArrayTys.emplace_back(ElAT); EltTy = ElAT->getElementType(); } return CreateNestArrayTy(m_HandleTy, nestArrayTys); } Constant *ResourceToHandle::lowerInitVal(Constant *InitVal, Type *NewTy) { DXASSERT(isa<UndefValue>(InitVal), "resource cannot have real init val"); return UndefValue::get(NewTy); } void ResourceToHandle::ReplaceResourceWithHandle(Value *ResPtr, Value *HandlePtr) { for (auto it = ResPtr->user_begin(); it != ResPtr->user_end();) { User *U = *(it++); if (LoadInst *LI = dyn_cast<LoadInst>(U)) { IRBuilder<> Builder(LI); Value *Handle = Builder.CreateLoad(HandlePtr); Type *ResTy = LI->getType(); // Used by createHandle or Store. for (auto ldIt = LI->user_begin(); ldIt != LI->user_end();) { User *ldU = *(ldIt++); if (StoreInst *SI = dyn_cast<StoreInst>(ldU)) { Value *TmpRes = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToResCast, ResTy, {Handle}, *m_pHLM->GetModule()); SI->replaceUsesOfWith(LI, TmpRes); } else { CallInst *CI = cast<CallInst>(ldU); DXASSERT(hlsl::GetHLOpcodeGroupByName(CI->getCalledFunction()) == HLOpcodeGroup::HLCreateHandle, "must be createHandle"); CI->replaceAllUsesWith(Handle); CI->eraseFromParent(); } } LI->eraseFromParent(); } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { Value *Res = SI->getValueOperand(); IRBuilder<> Builder(SI); // CreateHandle from Res. Value *Handle = HLModule::EmitHLOperationCall( Builder, HLOpcodeGroup::HLCreateHandle, /*opcode*/ 0, m_HandleTy, {Res}, *m_pHLM->GetModule()); // Store Handle to HandlePtr. Builder.CreateStore(Handle, HandlePtr); // Remove resource Store. SI->eraseFromParent(); } else if (U->user_empty() && isa<GEPOperator>(U)) { continue; } else { CallInst *CI = cast<CallInst>(U); IRBuilder<> Builder(CI); HLOpcodeGroup group = GetHLOpcodeGroupByName(CI->getCalledFunction()); // Allow user function to use res ptr as argument. if (group == HLOpcodeGroup::NotHL) { Value *TmpResPtr = Builder.CreateBitCast(HandlePtr, ResPtr->getType()); CI->replaceUsesOfWith(ResPtr, TmpResPtr); } else { DXASSERT(0, "invalid operation on resource"); } } } } void ResourceToHandle::ReplaceResourceGEPWithHandleGEP( Value *GEP, ArrayRef<Value *> idxList, Value *A, IRBuilder<> &Builder) { Value *newGEP = Builder.CreateGEP(A, idxList); Type *Ty = GEP->getType()->getPointerElementType(); if (Ty->isArrayTy()) { ReplaceResourceArrayWithHandleArray(GEP, newGEP); } else { DXASSERT(dxilutil::IsHLSLObjectType(Ty), "must be resource type here"); ReplaceResourceWithHandle(GEP, newGEP); } } void ResourceToHandle::ReplaceResourceArrayWithHandleArray(Value *VA, Value *A) { for (auto U = VA->user_begin(); U != VA->user_end();) { User *User = *(U++); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { IRBuilder<> Builder(GEP); SmallVector<Value *, 4> idxList(GEP->idx_begin(), GEP->idx_end()); ReplaceResourceGEPWithHandleGEP(GEP, idxList, A, Builder); GEP->eraseFromParent(); } else if (GEPOperator *GEPOp = dyn_cast<GEPOperator>(User)) { IRBuilder<> Builder(GEPOp->getContext()); SmallVector<Value *, 4> idxList(GEPOp->idx_begin(), GEPOp->idx_end()); ReplaceResourceGEPWithHandleGEP(GEPOp, idxList, A, Builder); } else { DXASSERT(0, "Array pointer should only used by GEP"); } } } void ResourceToHandle::lowerUseWithNewValue(Value *V, Value *NewV) { Type *Ty = V->getType()->getPointerElementType(); // Replace V with NewV. if (Ty->isArrayTy()) { ReplaceResourceArrayWithHandleArray(V, NewV); } else { ReplaceResourceWithHandle(V, NewV); } } } // namespace char ResourceToHandle::ID = 0; INITIALIZE_PASS(ResourceToHandle, "resource-handle", "Lower resource into handle", false, false) // Public interface to the ResourceToHandle pass ModulePass *llvm::createResourceToHandlePass() { return new ResourceToHandle(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/SampleProfile.cpp
//===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the SampleProfileLoader transformation. This pass // reads a profile file generated by a sampling profiler (e.g. Linux Perf - // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the // profile information in the given profile. // // This pass generates branch weight annotations on the IR: // // - prof: Represents branch weights. This annotation is added to branches // to indicate the weights of each edge coming out of the branch. // The weight of each edge is the weight of the target block for // that edge. The weight of a block B is computed as the maximum // number of samples found in B. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/PostDominators.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/ProfileData/SampleProfReader.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <cctype> using namespace llvm; using namespace sampleprof; #define DEBUG_TYPE "sample-profile" #if 0 // HLSL Change Start // Command line option to specify the file to read samples from. This is // mainly used for debugging. static cl::opt<std::string> SampleProfileFile( "sample-profile-file", cl::init(""), cl::value_desc("filename"), cl::desc("Profile file loaded by -sample-profile"), cl::Hidden); static cl::opt<unsigned> SampleProfileMaxPropagateIterations( "sample-profile-max-propagate-iterations", cl::init(100), cl::desc("Maximum number of iterations to go through when propagating " "sample block/edge weights through the CFG.")); #else static const char SampleProfileFile[] = ""; static const unsigned SampleProfileMaxPropagateIterations = 100; #endif // HLSL Change Ends namespace { typedef DenseMap<BasicBlock *, unsigned> BlockWeightMap; typedef DenseMap<BasicBlock *, BasicBlock *> EquivalenceClassMap; typedef std::pair<BasicBlock *, BasicBlock *> Edge; typedef DenseMap<Edge, unsigned> EdgeWeightMap; typedef DenseMap<BasicBlock *, SmallVector<BasicBlock *, 8>> BlockEdgeMap; /// \brief Sample profile pass. /// /// This pass reads profile data from the file specified by /// -sample-profile-file and annotates every affected function with the /// profile information found in that file. class SampleProfileLoader : public FunctionPass { public: // Class identification, replacement for typeinfo static char ID; SampleProfileLoader(StringRef Name = SampleProfileFile) : FunctionPass(ID), DT(nullptr), PDT(nullptr), LI(nullptr), Ctx(nullptr), Reader(), Samples(nullptr), Filename(Name), ProfileIsValid(false) { initializeSampleProfileLoaderPass(*PassRegistry::getPassRegistry()); } bool doInitialization(Module &M) override; void dump() { Reader->dump(); } StringRef getPassName() const override { return "Sample profile pass"; } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<LoopInfoWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<PostDominatorTree>(); } protected: unsigned getFunctionLoc(Function &F); bool emitAnnotations(Function &F); unsigned getInstWeight(Instruction &I); unsigned getBlockWeight(BasicBlock *BB); void printEdgeWeight(raw_ostream &OS, Edge E); void printBlockWeight(raw_ostream &OS, BasicBlock *BB); void printBlockEquivalence(raw_ostream &OS, BasicBlock *BB); bool computeBlockWeights(Function &F); void findEquivalenceClasses(Function &F); void findEquivalencesFor(BasicBlock *BB1, SmallVector<BasicBlock *, 8> Descendants, DominatorTreeBase<BasicBlock> *DomTree); void propagateWeights(Function &F); unsigned visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge); void buildEdges(Function &F); bool propagateThroughEdges(Function &F); /// \brief Line number for the function header. Used to compute absolute /// line numbers from the relative line numbers found in the profile. unsigned HeaderLineno; /// \brief Map basic blocks to their computed weights. /// /// The weight of a basic block is defined to be the maximum /// of all the instruction weights in that block. BlockWeightMap BlockWeights; /// \brief Map edges to their computed weights. /// /// Edge weights are computed by propagating basic block weights in /// SampleProfile::propagateWeights. EdgeWeightMap EdgeWeights; /// \brief Set of visited blocks during propagation. SmallPtrSet<BasicBlock *, 128> VisitedBlocks; /// \brief Set of visited edges during propagation. SmallSet<Edge, 128> VisitedEdges; /// \brief Equivalence classes for block weights. /// /// Two blocks BB1 and BB2 are in the same equivalence class if they /// dominate and post-dominate each other, and they are in the same loop /// nest. When this happens, the two blocks are guaranteed to execute /// the same number of times. EquivalenceClassMap EquivalenceClass; /// \brief Dominance, post-dominance and loop information. DominatorTree *DT; PostDominatorTree *PDT; LoopInfo *LI; /// \brief Predecessors for each basic block in the CFG. BlockEdgeMap Predecessors; /// \brief Successors for each basic block in the CFG. BlockEdgeMap Successors; /// \brief LLVM context holding the debug data we need. LLVMContext *Ctx; /// \brief Profile reader object. std::unique_ptr<SampleProfileReader> Reader; /// \brief Samples collected for the body of this function. FunctionSamples *Samples; /// \brief Name of the profile file to load. StringRef Filename; /// \brief Flag indicating whether the profile input loaded successfully. bool ProfileIsValid; }; } /// \brief Print the weight of edge \p E on stream \p OS. /// /// \param OS Stream to emit the output to. /// \param E Edge to print. void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) { OS << "weight[" << E.first->getName() << "->" << E.second->getName() << "]: " << EdgeWeights[E] << "\n"; } /// \brief Print the equivalence class of block \p BB on stream \p OS. /// /// \param OS Stream to emit the output to. /// \param BB Block to print. void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS, BasicBlock *BB) { BasicBlock *Equiv = EquivalenceClass[BB]; OS << "equivalence[" << BB->getName() << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n"; } /// \brief Print the weight of block \p BB on stream \p OS. /// /// \param OS Stream to emit the output to. /// \param BB Block to print. void SampleProfileLoader::printBlockWeight(raw_ostream &OS, BasicBlock *BB) { OS << "weight[" << BB->getName() << "]: " << BlockWeights[BB] << "\n"; } /// \brief Get the weight for an instruction. /// /// The "weight" of an instruction \p Inst is the number of samples /// collected on that instruction at runtime. To retrieve it, we /// need to compute the line number of \p Inst relative to the start of its /// function. We use HeaderLineno to compute the offset. We then /// look up the samples collected for \p Inst using BodySamples. /// /// \param Inst Instruction to query. /// /// \returns The profiled weight of I. unsigned SampleProfileLoader::getInstWeight(Instruction &Inst) { DebugLoc DLoc = Inst.getDebugLoc(); if (!DLoc) return 0; unsigned Lineno = DLoc.getLine(); if (Lineno < HeaderLineno) return 0; const DILocation *DIL = DLoc; int LOffset = Lineno - HeaderLineno; unsigned Discriminator = DIL->getDiscriminator(); unsigned Weight = Samples->samplesAt(LOffset, Discriminator); DEBUG(dbgs() << " " << Lineno << "." << Discriminator << ":" << Inst << " (line offset: " << LOffset << "." << Discriminator << " - weight: " << Weight << ")\n"); return Weight; } /// \brief Compute the weight of a basic block. /// /// The weight of basic block \p BB is the maximum weight of all the /// instructions in BB. The weight of \p BB is computed and cached in /// the BlockWeights map. /// /// \param BB The basic block to query. /// /// \returns The computed weight of BB. unsigned SampleProfileLoader::getBlockWeight(BasicBlock *BB) { // If we've computed BB's weight before, return it. std::pair<BlockWeightMap::iterator, bool> Entry = BlockWeights.insert(std::make_pair(BB, 0)); if (!Entry.second) return Entry.first->second; // Otherwise, compute and cache BB's weight. unsigned Weight = 0; for (auto &I : BB->getInstList()) { unsigned InstWeight = getInstWeight(I); if (InstWeight > Weight) Weight = InstWeight; } Entry.first->second = Weight; return Weight; } /// \brief Compute and store the weights of every basic block. /// /// This populates the BlockWeights map by computing /// the weights of every basic block in the CFG. /// /// \param F The function to query. bool SampleProfileLoader::computeBlockWeights(Function &F) { bool Changed = false; DEBUG(dbgs() << "Block weights\n"); for (auto &BB : F) { unsigned Weight = getBlockWeight(&BB); Changed |= (Weight > 0); DEBUG(printBlockWeight(dbgs(), &BB)); } return Changed; } /// \brief Find equivalence classes for the given block. /// /// This finds all the blocks that are guaranteed to execute the same /// number of times as \p BB1. To do this, it traverses all the /// descendants of \p BB1 in the dominator or post-dominator tree. /// /// A block BB2 will be in the same equivalence class as \p BB1 if /// the following holds: /// /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2 /// is a descendant of \p BB1 in the dominator tree, then BB2 should /// dominate BB1 in the post-dominator tree. /// /// 2- Both BB2 and \p BB1 must be in the same loop. /// /// For every block BB2 that meets those two requirements, we set BB2's /// equivalence class to \p BB1. /// /// \param BB1 Block to check. /// \param Descendants Descendants of \p BB1 in either the dom or pdom tree. /// \param DomTree Opposite dominator tree. If \p Descendants is filled /// with blocks from \p BB1's dominator tree, then /// this is the post-dominator tree, and vice versa. void SampleProfileLoader::findEquivalencesFor( BasicBlock *BB1, SmallVector<BasicBlock *, 8> Descendants, DominatorTreeBase<BasicBlock> *DomTree) { for (auto *BB2 : Descendants) { bool IsDomParent = DomTree->dominates(BB2, BB1); bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2); if (BB1 != BB2 && VisitedBlocks.insert(BB2).second && IsDomParent && IsInSameLoop) { EquivalenceClass[BB2] = BB1; // If BB2 is heavier than BB1, make BB2 have the same weight // as BB1. // // Note that we don't worry about the opposite situation here // (when BB2 is lighter than BB1). We will deal with this // during the propagation phase. Right now, we just want to // make sure that BB1 has the largest weight of all the // members of its equivalence set. unsigned &BB1Weight = BlockWeights[BB1]; unsigned &BB2Weight = BlockWeights[BB2]; BB1Weight = std::max(BB1Weight, BB2Weight); } } } /// \brief Find equivalence classes. /// /// Since samples may be missing from blocks, we can fill in the gaps by setting /// the weights of all the blocks in the same equivalence class to the same /// weight. To compute the concept of equivalence, we use dominance and loop /// information. Two blocks B1 and B2 are in the same equivalence class if B1 /// dominates B2, B2 post-dominates B1 and both are in the same loop. /// /// \param F The function to query. void SampleProfileLoader::findEquivalenceClasses(Function &F) { SmallVector<BasicBlock *, 8> DominatedBBs; DEBUG(dbgs() << "\nBlock equivalence classes\n"); // Find equivalence sets based on dominance and post-dominance information. for (auto &BB : F) { BasicBlock *BB1 = &BB; // Compute BB1's equivalence class once. if (EquivalenceClass.count(BB1)) { DEBUG(printBlockEquivalence(dbgs(), BB1)); continue; } // By default, blocks are in their own equivalence class. EquivalenceClass[BB1] = BB1; // Traverse all the blocks dominated by BB1. We are looking for // every basic block BB2 such that: // // 1- BB1 dominates BB2. // 2- BB2 post-dominates BB1. // 3- BB1 and BB2 are in the same loop nest. // // If all those conditions hold, it means that BB2 is executed // as many times as BB1, so they are placed in the same equivalence // class by making BB2's equivalence class be BB1. DominatedBBs.clear(); DT->getDescendants(BB1, DominatedBBs); findEquivalencesFor(BB1, DominatedBBs, PDT->DT); // Repeat the same logic for all the blocks post-dominated by BB1. // We are looking for every basic block BB2 such that: // // 1- BB1 post-dominates BB2. // 2- BB2 dominates BB1. // 3- BB1 and BB2 are in the same loop nest. // // If all those conditions hold, BB2's equivalence class is BB1. DominatedBBs.clear(); PDT->getDescendants(BB1, DominatedBBs); findEquivalencesFor(BB1, DominatedBBs, DT); DEBUG(printBlockEquivalence(dbgs(), BB1)); } // Assign weights to equivalence classes. // // All the basic blocks in the same equivalence class will execute // the same number of times. Since we know that the head block in // each equivalence class has the largest weight, assign that weight // to all the blocks in that equivalence class. DEBUG(dbgs() << "\nAssign the same weight to all blocks in the same class\n"); for (auto &BI : F) { BasicBlock *BB = &BI; BasicBlock *EquivBB = EquivalenceClass[BB]; if (BB != EquivBB) BlockWeights[BB] = BlockWeights[EquivBB]; DEBUG(printBlockWeight(dbgs(), BB)); } } /// \brief Visit the given edge to decide if it has a valid weight. /// /// If \p E has not been visited before, we copy to \p UnknownEdge /// and increment the count of unknown edges. /// /// \param E Edge to visit. /// \param NumUnknownEdges Current number of unknown edges. /// \param UnknownEdge Set if E has not been visited before. /// /// \returns E's weight, if known. Otherwise, return 0. unsigned SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge) { if (!VisitedEdges.count(E)) { (*NumUnknownEdges)++; *UnknownEdge = E; return 0; } return EdgeWeights[E]; } /// \brief Propagate weights through incoming/outgoing edges. /// /// If the weight of a basic block is known, and there is only one edge /// with an unknown weight, we can calculate the weight of that edge. /// /// Similarly, if all the edges have a known count, we can calculate the /// count of the basic block, if needed. /// /// \param F Function to process. /// /// \returns True if new weights were assigned to edges or blocks. bool SampleProfileLoader::propagateThroughEdges(Function &F) { bool Changed = false; DEBUG(dbgs() << "\nPropagation through edges\n"); for (auto &BI : F) { BasicBlock *BB = &BI; // Visit all the predecessor and successor edges to determine // which ones have a weight assigned already. Note that it doesn't // matter that we only keep track of a single unknown edge. The // only case we are interested in handling is when only a single // edge is unknown (see setEdgeOrBlockWeight). for (unsigned i = 0; i < 2; i++) { unsigned TotalWeight = 0; unsigned NumUnknownEdges = 0; Edge UnknownEdge, SelfReferentialEdge; if (i == 0) { // First, visit all predecessor edges. for (auto *Pred : Predecessors[BB]) { Edge E = std::make_pair(Pred, BB); TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); if (E.first == E.second) SelfReferentialEdge = E; } } else { // On the second round, visit all successor edges. for (auto *Succ : Successors[BB]) { Edge E = std::make_pair(BB, Succ); TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); } } // After visiting all the edges, there are three cases that we // can handle immediately: // // - All the edge weights are known (i.e., NumUnknownEdges == 0). // In this case, we simply check that the sum of all the edges // is the same as BB's weight. If not, we change BB's weight // to match. Additionally, if BB had not been visited before, // we mark it visited. // // - Only one edge is unknown and BB has already been visited. // In this case, we can compute the weight of the edge by // subtracting the total block weight from all the known // edge weights. If the edges weight more than BB, then the // edge of the last remaining edge is set to zero. // // - There exists a self-referential edge and the weight of BB is // known. In this case, this edge can be based on BB's weight. // We add up all the other known edges and set the weight on // the self-referential edge as we did in the previous case. // // In any other case, we must continue iterating. Eventually, // all edges will get a weight, or iteration will stop when // it reaches SampleProfileMaxPropagateIterations. if (NumUnknownEdges <= 1) { unsigned &BBWeight = BlockWeights[BB]; if (NumUnknownEdges == 0) { // If we already know the weight of all edges, the weight of the // basic block can be computed. It should be no larger than the sum // of all edge weights. if (TotalWeight > BBWeight) { BBWeight = TotalWeight; Changed = true; DEBUG(dbgs() << "All edge weights for " << BB->getName() << " known. Set weight for block: "; printBlockWeight(dbgs(), BB);); } if (VisitedBlocks.insert(BB).second) Changed = true; } else if (NumUnknownEdges == 1 && VisitedBlocks.count(BB)) { // If there is a single unknown edge and the block has been // visited, then we can compute E's weight. if (BBWeight >= TotalWeight) EdgeWeights[UnknownEdge] = BBWeight - TotalWeight; else EdgeWeights[UnknownEdge] = 0; VisitedEdges.insert(UnknownEdge); Changed = true; DEBUG(dbgs() << "Set weight for edge: "; printEdgeWeight(dbgs(), UnknownEdge)); } } else if (SelfReferentialEdge.first && VisitedBlocks.count(BB)) { unsigned &BBWeight = BlockWeights[BB]; // We have a self-referential edge and the weight of BB is known. if (BBWeight >= TotalWeight) EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight; else EdgeWeights[SelfReferentialEdge] = 0; VisitedEdges.insert(SelfReferentialEdge); Changed = true; DEBUG(dbgs() << "Set self-referential edge weight to: "; printEdgeWeight(dbgs(), SelfReferentialEdge)); } } } return Changed; } /// \brief Build in/out edge lists for each basic block in the CFG. /// /// We are interested in unique edges. If a block B1 has multiple /// edges to another block B2, we only add a single B1->B2 edge. void SampleProfileLoader::buildEdges(Function &F) { for (auto &BI : F) { BasicBlock *B1 = &BI; // Add predecessors for B1. SmallPtrSet<BasicBlock *, 16> Visited; if (!Predecessors[B1].empty()) llvm_unreachable("Found a stale predecessors list in a basic block."); for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) { BasicBlock *B2 = *PI; if (Visited.insert(B2).second) Predecessors[B1].push_back(B2); } // Add successors for B1. Visited.clear(); if (!Successors[B1].empty()) llvm_unreachable("Found a stale successors list in a basic block."); for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) { BasicBlock *B2 = *SI; if (Visited.insert(B2).second) Successors[B1].push_back(B2); } } } /// \brief Propagate weights into edges /// /// The following rules are applied to every block BB in the CFG: /// /// - If BB has a single predecessor/successor, then the weight /// of that edge is the weight of the block. /// /// - If all incoming or outgoing edges are known except one, and the /// weight of the block is already known, the weight of the unknown /// edge will be the weight of the block minus the sum of all the known /// edges. If the sum of all the known edges is larger than BB's weight, /// we set the unknown edge weight to zero. /// /// - If there is a self-referential edge, and the weight of the block is /// known, the weight for that edge is set to the weight of the block /// minus the weight of the other incoming edges to that block (if /// known). void SampleProfileLoader::propagateWeights(Function &F) { bool Changed = true; unsigned i = 0; // Add an entry count to the function using the samples gathered // at the function entry. F.setEntryCount(Samples->getHeadSamples()); // Before propagation starts, build, for each block, a list of // unique predecessors and successors. This is necessary to handle // identical edges in multiway branches. Since we visit all blocks and all // edges of the CFG, it is cleaner to build these lists once at the start // of the pass. buildEdges(F); // Propagate until we converge or we go past the iteration limit. while (Changed && i++ < SampleProfileMaxPropagateIterations) { Changed = propagateThroughEdges(F); } // Generate MD_prof metadata for every branch instruction using the // edge weights computed during propagation. DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); MDBuilder MDB(F.getContext()); for (auto &BI : F) { BasicBlock *BB = &BI; TerminatorInst *TI = BB->getTerminator(); if (TI->getNumSuccessors() == 1) continue; if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) continue; DEBUG(dbgs() << "\nGetting weights for branch at line " << TI->getDebugLoc().getLine() << ".\n"); SmallVector<unsigned, 4> Weights; bool AllWeightsZero = true; for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) { BasicBlock *Succ = TI->getSuccessor(I); Edge E = std::make_pair(BB, Succ); unsigned Weight = EdgeWeights[E]; DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); Weights.push_back(Weight); if (Weight != 0) AllWeightsZero = false; } // Only set weights if there is at least one non-zero weight. // In any other case, let the analyzer set weights. if (!AllWeightsZero) { DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); TI->setMetadata(llvm::LLVMContext::MD_prof, MDB.createBranchWeights(Weights)); } else { DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); } } } /// \brief Get the line number for the function header. /// /// This looks up function \p F in the current compilation unit and /// retrieves the line number where the function is defined. This is /// line 0 for all the samples read from the profile file. Every line /// number is relative to this line. /// /// \param F Function object to query. /// /// \returns the line number where \p F is defined. If it returns 0, /// it means that there is no debug information available for \p F. unsigned SampleProfileLoader::getFunctionLoc(Function &F) { if (DISubprogram *S = getDISubprogram(&F)) return S->getLine(); // If could not find the start of \p F, emit a diagnostic to inform the user // about the missed opportunity. F.getContext().diagnose(DiagnosticInfoSampleProfile( "No debug information found in function " + F.getName() + ": Function profile not used", DS_Warning)); return 0; } /// \brief Generate branch weight metadata for all branches in \p F. /// /// Branch weights are computed out of instruction samples using a /// propagation heuristic. Propagation proceeds in 3 phases: /// /// 1- Assignment of block weights. All the basic blocks in the function /// are initial assigned the same weight as their most frequently /// executed instruction. /// /// 2- Creation of equivalence classes. Since samples may be missing from /// blocks, we can fill in the gaps by setting the weights of all the /// blocks in the same equivalence class to the same weight. To compute /// the concept of equivalence, we use dominance and loop information. /// Two blocks B1 and B2 are in the same equivalence class if B1 /// dominates B2, B2 post-dominates B1 and both are in the same loop. /// /// 3- Propagation of block weights into edges. This uses a simple /// propagation heuristic. The following rules are applied to every /// block BB in the CFG: /// /// - If BB has a single predecessor/successor, then the weight /// of that edge is the weight of the block. /// /// - If all the edges are known except one, and the weight of the /// block is already known, the weight of the unknown edge will /// be the weight of the block minus the sum of all the known /// edges. If the sum of all the known edges is larger than BB's weight, /// we set the unknown edge weight to zero. /// /// - If there is a self-referential edge, and the weight of the block is /// known, the weight for that edge is set to the weight of the block /// minus the weight of the other incoming edges to that block (if /// known). /// /// Since this propagation is not guaranteed to finalize for every CFG, we /// only allow it to proceed for a limited number of iterations (controlled /// by -sample-profile-max-propagate-iterations). /// /// FIXME: Try to replace this propagation heuristic with a scheme /// that is guaranteed to finalize. A work-list approach similar to /// the standard value propagation algorithm used by SSA-CCP might /// work here. /// /// Once all the branch weights are computed, we emit the MD_prof /// metadata on BB using the computed values for each of its branches. /// /// \param F The function to query. /// /// \returns true if \p F was modified. Returns false, otherwise. bool SampleProfileLoader::emitAnnotations(Function &F) { bool Changed = false; // Initialize invariants used during computation and propagation. HeaderLineno = getFunctionLoc(F); if (HeaderLineno == 0) return false; DEBUG(dbgs() << "Line number for the first instruction in " << F.getName() << ": " << HeaderLineno << "\n"); // Compute basic block weights. Changed |= computeBlockWeights(F); if (Changed) { // Find equivalence classes. findEquivalenceClasses(F); // Propagate weights to all edges. propagateWeights(F); } return Changed; } char SampleProfileLoader::ID = 0; INITIALIZE_PASS_BEGIN(SampleProfileLoader, "sample-profile", "Sample Profile loader", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(PostDominatorTree) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AddDiscriminators) INITIALIZE_PASS_END(SampleProfileLoader, "sample-profile", "Sample Profile loader", false, false) bool SampleProfileLoader::doInitialization(Module &M) { auto ReaderOrErr = SampleProfileReader::create(Filename, M.getContext()); if (std::error_code EC = ReaderOrErr.getError()) { std::string Msg = "Could not open profile: " + EC.message(); M.getContext().diagnose(DiagnosticInfoSampleProfile(Filename.data(), Msg)); return false; } Reader = std::move(ReaderOrErr.get()); ProfileIsValid = (Reader->read() == sampleprof_error::success); return true; } FunctionPass *llvm::createSampleProfileLoaderPass() { return new SampleProfileLoader(SampleProfileFile); } FunctionPass *llvm::createSampleProfileLoaderPass(StringRef Name) { return new SampleProfileLoader(Name); } bool SampleProfileLoader::runOnFunction(Function &F) { if (!ProfileIsValid) return false; DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); PDT = &getAnalysis<PostDominatorTree>(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); Ctx = &F.getParent()->getContext(); Samples = Reader->getSamplesFor(F); if (!Samples->empty()) return emitAnnotations(F); return false; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/TailRecursionElimination.cpp
//===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file transforms calls of the current function (self recursion) followed // by a return instruction with a branch to the entry of the function, creating // a loop. This pass also implements the following extensions to the basic // algorithm: // // 1. Trivial instructions between the call and return do not prevent the // transformation from taking place, though currently the analysis cannot // support moving any really useful instructions (only dead ones). // 2. This pass transforms functions that are prevented from being tail // recursive by an associative and commutative expression to use an // accumulator variable, thus compiling the typical naive factorial or // 'fib' implementation into efficient code. // 3. TRE is performed if the function returns void, if the return // returns the result returned by the call, or if the function returns a // run-time constant on all exits from the function. It is possible, though // unlikely, that the return returns something else (like constant 0), and // can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in // the function return the exact same value. // 4. If it can prove that callees do not access their caller stack frame, // they are marked as eligible for tail call elimination (by the code // generator). // // There are several improvements that could be made: // // 1. If the function has any alloca instructions, these instructions will be // moved out of the entry block of the function, causing them to be // evaluated each time through the tail recursion. Safely keeping allocas // in the entry block requires analysis to proves that the tail-called // function does not read or write the stack object. // 2. Tail recursion is only performed if the call immediately precedes the // return instruction. It's possible that there could be a jump between // the call and the return. // 3. There can be intervening operations between the call and the return that // prevent the TRE from occurring. For example, there could be GEP's and // stores to memory that will not be read or written by the call. This // requires some substantial analysis (such as with DSA) to prove safe to // move ahead of the call, but doing so could allow many more TREs to be // performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark. // 4. The algorithm we use to detect if callees access their caller stack // frames is very primitive. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/InlineCost.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/CFG.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "tailcallelim" STATISTIC(NumEliminated, "Number of tail calls removed"); STATISTIC(NumRetDuped, "Number of return duplicated"); STATISTIC(NumAccumAdded, "Number of accumulators introduced"); namespace { struct TailCallElim : public FunctionPass { const TargetTransformInfo *TTI; static char ID; // Pass identification, replacement for typeid TailCallElim() : FunctionPass(ID) { initializeTailCallElimPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnFunction(Function &F) override; private: bool runTRE(Function &F); bool markTails(Function &F, bool &AllCallsAreTailCalls); CallInst *FindTRECandidate(Instruction *I, bool CannotTailCallElimCallsMarkedTail); bool EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, bool CannotTailCallElimCallsMarkedTail); bool FoldReturnAndProcessPred(BasicBlock *BB, ReturnInst *Ret, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, bool CannotTailCallElimCallsMarkedTail); bool ProcessReturningBlock(ReturnInst *RI, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, bool CannotTailCallElimCallsMarkedTail); bool CanMoveAboveCall(Instruction *I, CallInst *CI); Value *CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI); }; } char TailCallElim::ID = 0; INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(TailCallElim, "tailcallelim", "Tail Call Elimination", false, false) // Public interface to the TailCallElimination pass FunctionPass *llvm::createTailCallEliminationPass() { return new TailCallElim(); } void TailCallElim::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<TargetTransformInfoWrapperPass>(); } /// \brief Scan the specified function for alloca instructions. /// If it contains any dynamic allocas, returns false. static bool CanTRE(Function &F) { // Because of PR962, we don't TRE dynamic allocas. for (auto &BB : F) { for (auto &I : BB) { if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { if (!AI->isStaticAlloca()) return false; } } } return true; } bool TailCallElim::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true") return false; bool AllCallsAreTailCalls = false; bool Modified = markTails(F, AllCallsAreTailCalls); if (AllCallsAreTailCalls) Modified |= runTRE(F); return Modified; } namespace { struct AllocaDerivedValueTracker { // Start at a root value and walk its use-def chain to mark calls that use the // value or a derived value in AllocaUsers, and places where it may escape in // EscapePoints. void walk(Value *Root) { SmallVector<Use *, 32> Worklist; SmallPtrSet<Use *, 32> Visited; auto AddUsesToWorklist = [&](Value *V) { for (auto &U : V->uses()) { if (!Visited.insert(&U).second) continue; Worklist.push_back(&U); } }; AddUsesToWorklist(Root); while (!Worklist.empty()) { Use *U = Worklist.pop_back_val(); Instruction *I = cast<Instruction>(U->getUser()); switch (I->getOpcode()) { case Instruction::Call: case Instruction::Invoke: { CallSite CS(I); bool IsNocapture = !CS.isCallee(U) && CS.doesNotCapture(CS.getArgumentNo(U)); callUsesLocalStack(CS, IsNocapture); if (IsNocapture) { // If the alloca-derived argument is passed in as nocapture, then it // can't propagate to the call's return. That would be capturing. continue; } break; } case Instruction::Load: { // The result of a load is not alloca-derived (unless an alloca has // otherwise escaped, but this is a local analysis). continue; } case Instruction::Store: { if (U->getOperandNo() == 0) EscapePoints.insert(I); continue; // Stores have no users to analyze. } case Instruction::BitCast: case Instruction::GetElementPtr: case Instruction::PHI: case Instruction::Select: case Instruction::AddrSpaceCast: break; default: EscapePoints.insert(I); break; } AddUsesToWorklist(I); } } void callUsesLocalStack(CallSite CS, bool IsNocapture) { // Add it to the list of alloca users. AllocaUsers.insert(CS.getInstruction()); // If it's nocapture then it can't capture this alloca. if (IsNocapture) return; // If it can write to memory, it can leak the alloca value. if (!CS.onlyReadsMemory()) EscapePoints.insert(CS.getInstruction()); } SmallPtrSet<Instruction *, 32> AllocaUsers; SmallPtrSet<Instruction *, 32> EscapePoints; }; } bool TailCallElim::markTails(Function &F, bool &AllCallsAreTailCalls) { if (F.callsFunctionThatReturnsTwice()) return false; AllCallsAreTailCalls = true; // The local stack holds all alloca instructions and all byval arguments. AllocaDerivedValueTracker Tracker; for (Argument &Arg : F.args()) { if (Arg.hasByValAttr()) Tracker.walk(&Arg); } for (auto &BB : F) { for (auto &I : BB) if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) Tracker.walk(AI); } bool Modified = false; // Track whether a block is reachable after an alloca has escaped. Blocks that // contain the escaping instruction will be marked as being visited without an // escaped alloca, since that is how the block began. enum VisitType { UNVISITED, UNESCAPED, ESCAPED }; DenseMap<BasicBlock *, VisitType> Visited; // We propagate the fact that an alloca has escaped from block to successor. // Visit the blocks that are propagating the escapedness first. To do this, we // maintain two worklists. SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped; // We may enter a block and visit it thinking that no alloca has escaped yet, // then see an escape point and go back around a loop edge and come back to // the same block twice. Because of this, we defer setting tail on calls when // we first encounter them in a block. Every entry in this list does not // statically use an alloca via use-def chain analysis, but may find an alloca // through other means if the block turns out to be reachable after an escape // point. SmallVector<CallInst *, 32> DeferredTails; BasicBlock *BB = &F.getEntryBlock(); VisitType Escaped = UNESCAPED; do { for (auto &I : *BB) { if (Tracker.EscapePoints.count(&I)) Escaped = ESCAPED; CallInst *CI = dyn_cast<CallInst>(&I); if (!CI || CI->isTailCall()) continue; if (CI->doesNotAccessMemory()) { // A call to a readnone function whose arguments are all things computed // outside this function can be marked tail. Even if you stored the // alloca address into a global, a readnone function can't load the // global anyhow. // // Note that this runs whether we know an alloca has escaped or not. If // it has, then we can't trust Tracker.AllocaUsers to be accurate. bool SafeToTail = true; for (auto &Arg : CI->arg_operands()) { if (isa<Constant>(Arg.getUser())) continue; if (Argument *A = dyn_cast<Argument>(Arg.getUser())) if (!A->hasByValAttr()) continue; SafeToTail = false; break; } if (SafeToTail) { emitOptimizationRemark( F.getContext(), "tailcallelim", F, CI->getDebugLoc(), "marked this readnone call a tail call candidate"); CI->setTailCall(); Modified = true; continue; } } if (Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) { DeferredTails.push_back(CI); } else { AllCallsAreTailCalls = false; } } for (auto *SuccBB : make_range(succ_begin(BB), succ_end(BB))) { auto &State = Visited[SuccBB]; if (State < Escaped) { State = Escaped; if (State == ESCAPED) WorklistEscaped.push_back(SuccBB); else WorklistUnescaped.push_back(SuccBB); } } if (!WorklistEscaped.empty()) { BB = WorklistEscaped.pop_back_val(); Escaped = ESCAPED; } else { BB = nullptr; while (!WorklistUnescaped.empty()) { auto *NextBB = WorklistUnescaped.pop_back_val(); if (Visited[NextBB] == UNESCAPED) { BB = NextBB; Escaped = UNESCAPED; break; } } } } while (BB); for (CallInst *CI : DeferredTails) { if (Visited[CI->getParent()] != ESCAPED) { // If the escape point was part way through the block, calls after the // escape point wouldn't have been put into DeferredTails. emitOptimizationRemark(F.getContext(), "tailcallelim", F, CI->getDebugLoc(), "marked this call a tail call candidate"); CI->setTailCall(); Modified = true; } else { AllCallsAreTailCalls = false; } } return Modified; } bool TailCallElim::runTRE(Function &F) { // If this function is a varargs function, we won't be able to PHI the args // right, so don't even try to convert it... if (F.getFunctionType()->isVarArg()) return false; TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); BasicBlock *OldEntry = nullptr; bool TailCallsAreMarkedTail = false; SmallVector<PHINode*, 8> ArgumentPHIs; bool MadeChange = false; // If false, we cannot perform TRE on tail calls marked with the 'tail' // attribute, because doing so would cause the stack size to increase (real // TRE would deallocate variable sized allocas, TRE doesn't). bool CanTRETailMarkedCall = CanTRE(F); // Change any tail recursive calls to loops. // // FIXME: The code generator produces really bad code when an 'escaping // alloca' is changed from being a static alloca to being a dynamic alloca. // Until this is resolved, disable this transformation if that would ever // happen. This bug is PR962. for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; /*in loop*/) { BasicBlock *BB = BBI++; // FoldReturnAndProcessPred may delete BB. if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) { bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail, ArgumentPHIs, !CanTRETailMarkedCall); if (!Change && BB->getFirstNonPHIOrDbg() == Ret) Change = FoldReturnAndProcessPred(BB, Ret, OldEntry, TailCallsAreMarkedTail, ArgumentPHIs, !CanTRETailMarkedCall); MadeChange |= Change; } } // If we eliminated any tail recursions, it's possible that we inserted some // silly PHI nodes which just merge an initial value (the incoming operand) // with themselves. Check to see if we did and clean up our mess if so. This // occurs when a function passes an argument straight through to its tail // call. for (unsigned i = 0, e = ArgumentPHIs.size(); i != e; ++i) { PHINode *PN = ArgumentPHIs[i]; // If the PHI Node is a dynamic constant, replace it with the value it is. if (Value *PNV = SimplifyInstruction(PN, F.getParent()->getDataLayout())) { PN->replaceAllUsesWith(PNV); PN->eraseFromParent(); } } return MadeChange; } /// Return true if it is safe to move the specified /// instruction from after the call to before the call, assuming that all /// instructions between the call and this instruction are movable. /// bool TailCallElim::CanMoveAboveCall(Instruction *I, CallInst *CI) { // FIXME: We can move load/store/call/free instructions above the call if the // call does not mod/ref the memory location being processed. if (I->mayHaveSideEffects()) // This also handles volatile loads. return false; if (LoadInst *L = dyn_cast<LoadInst>(I)) { // Loads may always be moved above calls without side effects. if (CI->mayHaveSideEffects()) { // Non-volatile loads may be moved above a call with side effects if it // does not write to memory and the load provably won't trap. // FIXME: Writes to memory only matter if they may alias the pointer // being loaded from. if (CI->mayWriteToMemory() || !isSafeToLoadUnconditionally(L->getPointerOperand(), L, L->getAlignment())) return false; } } // Otherwise, if this is a side-effect free instruction, check to make sure // that it does not use the return value of the call. If it doesn't use the // return value of the call, it must only use things that are defined before // the call, or movable instructions between the call and the instruction // itself. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) if (I->getOperand(i) == CI) return false; return true; } /// Return true if the specified value is the same when the return would exit /// as it was when the initial iteration of the recursive function was executed. /// /// We currently handle static constants and arguments that are not modified as /// part of the recursion. static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) { if (isa<Constant>(V)) return true; // Static constants are always dyn consts // Check to see if this is an immutable argument, if so, the value // will be available to initialize the accumulator. if (Argument *Arg = dyn_cast<Argument>(V)) { // Figure out which argument number this is... unsigned ArgNo = 0; Function *F = CI->getParent()->getParent(); for (Function::arg_iterator AI = F->arg_begin(); &*AI != Arg; ++AI) ++ArgNo; // If we are passing this argument into call as the corresponding // argument operand, then the argument is dynamically constant. // Otherwise, we cannot transform this function safely. if (CI->getArgOperand(ArgNo) == Arg) return true; } // Switch cases are always constant integers. If the value is being switched // on and the return is only reachable from one of its cases, it's // effectively constant. if (BasicBlock *UniquePred = RI->getParent()->getUniquePredecessor()) if (SwitchInst *SI = dyn_cast<SwitchInst>(UniquePred->getTerminator())) if (SI->getCondition() == V) return SI->getDefaultDest() != RI->getParent(); // Not a constant or immutable argument, we can't safely transform. return false; } /// Check to see if the function containing the specified tail call consistently /// returns the same runtime-constant value at all exit points except for /// IgnoreRI. If so, return the returned value. static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) { Function *F = CI->getParent()->getParent(); Value *ReturnedValue = nullptr; for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI) { ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator()); if (RI == nullptr || RI == IgnoreRI) continue; // We can only perform this transformation if the value returned is // evaluatable at the start of the initial invocation of the function, // instead of at the end of the evaluation. // Value *RetOp = RI->getOperand(0); if (!isDynamicConstant(RetOp, CI, RI)) return nullptr; if (ReturnedValue && RetOp != ReturnedValue) return nullptr; // Cannot transform if differing values are returned. ReturnedValue = RetOp; } return ReturnedValue; } /// If the specified instruction can be transformed using accumulator recursion /// elimination, return the constant which is the start of the accumulator /// value. Otherwise return null. Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI) { if (!I->isAssociative() || !I->isCommutative()) return nullptr; assert(I->getNumOperands() == 2 && "Associative/commutative operations should have 2 args!"); // Exactly one operand should be the result of the call instruction. if ((I->getOperand(0) == CI && I->getOperand(1) == CI) || (I->getOperand(0) != CI && I->getOperand(1) != CI)) return nullptr; // The only user of this instruction we allow is a single return instruction. if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back())) return nullptr; // Ok, now we have to check all of the other return instructions in this // function. If they return non-constants or differing values, then we cannot // transform the function safely. return getCommonReturnValue(cast<ReturnInst>(I->user_back()), CI); } static Instruction *FirstNonDbg(BasicBlock::iterator I) { while (isa<DbgInfoIntrinsic>(I)) ++I; return &*I; } CallInst* TailCallElim::FindTRECandidate(Instruction *TI, bool CannotTailCallElimCallsMarkedTail) { BasicBlock *BB = TI->getParent(); Function *F = BB->getParent(); if (&BB->front() == TI) // Make sure there is something before the terminator. return nullptr; // Scan backwards from the return, checking to see if there is a tail call in // this block. If so, set CI to it. CallInst *CI = nullptr; BasicBlock::iterator BBI = TI; while (true) { CI = dyn_cast<CallInst>(BBI); if (CI && CI->getCalledFunction() == F) break; if (BBI == BB->begin()) return nullptr; // Didn't find a potential tail call. --BBI; } // If this call is marked as a tail call, and if there are dynamic allocas in // the function, we cannot perform this optimization. if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail) return nullptr; // As a special case, detect code like this: // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call // and disable this xform in this case, because the code generator will // lower the call to fabs into inline code. if (BB == &F->getEntryBlock() && FirstNonDbg(BB->front()) == CI && FirstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() && !TTI->isLoweredToCall(CI->getCalledFunction())) { // A single-block function with just a call and a return. Check that // the arguments match. CallSite::arg_iterator I = CallSite(CI).arg_begin(), E = CallSite(CI).arg_end(); Function::arg_iterator FI = F->arg_begin(), FE = F->arg_end(); for (; I != E && FI != FE; ++I, ++FI) if (*I != &*FI) break; if (I == E && FI == FE) return nullptr; } return CI; } bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, bool CannotTailCallElimCallsMarkedTail) { // If we are introducing accumulator recursion to eliminate operations after // the call instruction that are both associative and commutative, the initial // value for the accumulator is placed in this variable. If this value is set // then we actually perform accumulator recursion elimination instead of // simple tail recursion elimination. If the operation is an LLVM instruction // (eg: "add") then it is recorded in AccumulatorRecursionInstr. If not, then // we are handling the case when the return instruction returns a constant C // which is different to the constant returned by other return instructions // (which is recorded in AccumulatorRecursionEliminationInitVal). This is a // special case of accumulator recursion, the operation being "return C". Value *AccumulatorRecursionEliminationInitVal = nullptr; Instruction *AccumulatorRecursionInstr = nullptr; // Ok, we found a potential tail call. We can currently only transform the // tail call if all of the instructions between the call and the return are // movable to above the call itself, leaving the call next to the return. // Check that this is the case now. BasicBlock::iterator BBI = CI; for (++BBI; &*BBI != Ret; ++BBI) { if (CanMoveAboveCall(BBI, CI)) continue; // If we can't move the instruction above the call, it might be because it // is an associative and commutative operation that could be transformed // using accumulator recursion elimination. Check to see if this is the // case, and if so, remember the initial accumulator value for later. if ((AccumulatorRecursionEliminationInitVal = CanTransformAccumulatorRecursion(BBI, CI))) { // Yes, this is accumulator recursion. Remember which instruction // accumulates. AccumulatorRecursionInstr = BBI; } else { return false; // Otherwise, we cannot eliminate the tail recursion! } } // We can only transform call/return pairs that either ignore the return value // of the call and return void, ignore the value of the call and return a // constant, return the value returned by the tail call, or that are being // accumulator recursion variable eliminated. if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI && !isa<UndefValue>(Ret->getReturnValue()) && AccumulatorRecursionEliminationInitVal == nullptr && !getCommonReturnValue(nullptr, CI)) { // One case remains that we are able to handle: the current return // instruction returns a constant, and all other return instructions // return a different constant. if (!isDynamicConstant(Ret->getReturnValue(), CI, Ret)) return false; // Current return instruction does not return a constant. // Check that all other return instructions return a common constant. If // so, record it in AccumulatorRecursionEliminationInitVal. AccumulatorRecursionEliminationInitVal = getCommonReturnValue(Ret, CI); if (!AccumulatorRecursionEliminationInitVal) return false; } BasicBlock *BB = Ret->getParent(); Function *F = BB->getParent(); emitOptimizationRemark(F->getContext(), "tailcallelim", *F, CI->getDebugLoc(), "transforming tail recursion to loop"); // OK! We can transform this tail call. If this is the first one found, // create the new entry block, allowing us to branch back to the old entry. if (!OldEntry) { OldEntry = &F->getEntryBlock(); BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry); NewEntry->takeName(OldEntry); OldEntry->setName("tailrecurse"); BranchInst::Create(OldEntry, NewEntry); // If this tail call is marked 'tail' and if there are any allocas in the // entry block, move them up to the new entry block. TailCallsAreMarkedTail = CI->isTailCall(); if (TailCallsAreMarkedTail) // Move all fixed sized allocas from OldEntry to NewEntry. for (BasicBlock::iterator OEBI = OldEntry->begin(), E = OldEntry->end(), NEBI = NewEntry->begin(); OEBI != E; ) if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++)) if (isa<ConstantInt>(AI->getArraySize())) AI->moveBefore(NEBI); // Now that we have created a new block, which jumps to the entry // block, insert a PHI node for each argument of the function. // For now, we initialize each PHI to only have the real arguments // which are passed in. Instruction *InsertPos = OldEntry->begin(); for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) { PHINode *PN = PHINode::Create(I->getType(), 2, I->getName() + ".tr", InsertPos); I->replaceAllUsesWith(PN); // Everyone use the PHI node now! PN->addIncoming(I, NewEntry); ArgumentPHIs.push_back(PN); } } // If this function has self recursive calls in the tail position where some // are marked tail and some are not, only transform one flavor or another. We // have to choose whether we move allocas in the entry block to the new entry // block or not, so we can't make a good choice for both. NOTE: We could do // slightly better here in the case that the function has no entry block // allocas. if (TailCallsAreMarkedTail && !CI->isTailCall()) return false; // Ok, now that we know we have a pseudo-entry block WITH all of the // required PHI nodes, add entries into the PHI node for the actual // parameters passed into the tail-recursive call. for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB); // If we are introducing an accumulator variable to eliminate the recursion, // do so now. Note that we _know_ that no subsequent tail recursion // eliminations will happen on this function because of the way the // accumulator recursion predicate is set up. // if (AccumulatorRecursionEliminationInitVal) { Instruction *AccRecInstr = AccumulatorRecursionInstr; // Start by inserting a new PHI node for the accumulator. pred_iterator PB = pred_begin(OldEntry), PE = pred_end(OldEntry); PHINode *AccPN = PHINode::Create(AccumulatorRecursionEliminationInitVal->getType(), std::distance(PB, PE) + 1, "accumulator.tr", OldEntry->begin()); // Loop over all of the predecessors of the tail recursion block. For the // real entry into the function we seed the PHI with the initial value, // computed earlier. For any other existing branches to this block (due to // other tail recursions eliminated) the accumulator is not modified. // Because we haven't added the branch in the current block to OldEntry yet, // it will not show up as a predecessor. for (pred_iterator PI = PB; PI != PE; ++PI) { BasicBlock *P = *PI; if (P == &F->getEntryBlock()) AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, P); else AccPN->addIncoming(AccPN, P); } if (AccRecInstr) { // Add an incoming argument for the current block, which is computed by // our associative and commutative accumulator instruction. AccPN->addIncoming(AccRecInstr, BB); // Next, rewrite the accumulator recursion instruction so that it does not // use the result of the call anymore, instead, use the PHI node we just // inserted. AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN); } else { // Add an incoming argument for the current block, which is just the // constant returned by the current return instruction. AccPN->addIncoming(Ret->getReturnValue(), BB); } // Finally, rewrite any return instructions in the program to return the PHI // node instead of the "initval" that they do currently. This loop will // actually rewrite the return value we are destroying, but that's ok. for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI) if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator())) RI->setOperand(0, AccPN); ++NumAccumAdded; } // Now that all of the PHI nodes are in place, remove the call and // ret instructions, replacing them with an unconditional branch. BranchInst *NewBI = BranchInst::Create(OldEntry, Ret); NewBI->setDebugLoc(CI->getDebugLoc()); BB->getInstList().erase(Ret); // Remove return. BB->getInstList().erase(CI); // Remove call. ++NumEliminated; return true; } bool TailCallElim::FoldReturnAndProcessPred(BasicBlock *BB, ReturnInst *Ret, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, bool CannotTailCallElimCallsMarkedTail) { bool Change = false; // If the return block contains nothing but the return and PHI's, // there might be an opportunity to duplicate the return in its // predecessors and perform TRC there. Look for predecessors that end // in unconditional branch and recursive call(s). SmallVector<BranchInst*, 8> UncondBranchPreds; for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { BasicBlock *Pred = *PI; TerminatorInst *PTI = Pred->getTerminator(); if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) if (BI->isUnconditional()) UncondBranchPreds.push_back(BI); } while (!UncondBranchPreds.empty()) { BranchInst *BI = UncondBranchPreds.pop_back_val(); BasicBlock *Pred = BI->getParent(); if (CallInst *CI = FindTRECandidate(BI, CannotTailCallElimCallsMarkedTail)){ DEBUG(dbgs() << "FOLDING: " << *BB << "INTO UNCOND BRANCH PRED: " << *Pred); ReturnInst *RI = FoldReturnIntoUncondBranch(Ret, BB, Pred); // Cleanup: if all predecessors of BB have been eliminated by // FoldReturnIntoUncondBranch, delete it. It is important to empty it, // because the ret instruction in there is still using a value which // EliminateRecursiveTailCall will attempt to remove. if (!BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) BB->eraseFromParent(); EliminateRecursiveTailCall(CI, RI, OldEntry, TailCallsAreMarkedTail, ArgumentPHIs, CannotTailCallElimCallsMarkedTail); ++NumRetDuped; Change = true; } } return Change; } bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, bool CannotTailCallElimCallsMarkedTail) { CallInst *CI = FindTRECandidate(Ret, CannotTailCallElimCallsMarkedTail); if (!CI) return false; return EliminateRecursiveTailCall(CI, Ret, OldEntry, TailCallsAreMarkedTail, ArgumentPHIs, CannotTailCallElimCallsMarkedTail); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/JumpThreading.cpp
//===- JumpThreading.cpp - Thread control through conditional blocks ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Jump Threading pass. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LazyValueInfo.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SSAUpdater.h" using namespace llvm; #define DEBUG_TYPE "jump-threading" STATISTIC(NumThreads, "Number of jumps threaded"); STATISTIC(NumFolds, "Number of terminators folded"); STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned> BBDuplicateThreshold("jump-threading-threshold", cl::desc("Max block size to duplicate for jump threading"), cl::init(6), cl::Hidden); #else static const unsigned BBDuplicateThreshold = 6; #endif // HLSL Change Ends namespace { // These are at global scope so static functions can use them too. typedef SmallVectorImpl<std::pair<Constant*, BasicBlock*> > PredValueInfo; typedef SmallVector<std::pair<Constant*, BasicBlock*>, 8> PredValueInfoTy; // This is used to keep track of what kind of constant we're currently hoping // to find. enum ConstantPreference { WantInteger, WantBlockAddress }; /// This pass performs 'jump threading', which looks at blocks that have /// multiple predecessors and multiple successors. If one or more of the /// predecessors of the block can be proven to always jump to one of the /// successors, we forward the edge from the predecessor to the successor by /// duplicating the contents of this block. /// /// An example of when this can occur is code like this: /// /// if () { ... /// X = 4; /// } /// if (X < 3) { /// /// In this case, the unconditional branch at the end of the first if can be /// revectored to the false side of the second if. /// class JumpThreading : public FunctionPass { TargetLibraryInfo *TLI; LazyValueInfo *LVI; #ifdef NDEBUG SmallPtrSet<BasicBlock*, 16> LoopHeaders; #else SmallSet<AssertingVH<BasicBlock>, 16> LoopHeaders; #endif DenseSet<std::pair<Value*, BasicBlock*> > RecursionSet; unsigned BBDupThreshold; // RAII helper for updating the recursion stack. struct RecursionSetRemover { DenseSet<std::pair<Value*, BasicBlock*> > &TheSet; std::pair<Value*, BasicBlock*> ThePair; RecursionSetRemover(DenseSet<std::pair<Value*, BasicBlock*> > &S, std::pair<Value*, BasicBlock*> P) : TheSet(S), ThePair(P) { } ~RecursionSetRemover() { TheSet.erase(ThePair); } }; public: static char ID; // Pass identification JumpThreading(int T = -1) : FunctionPass(ID) { BBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); initializeJumpThreadingPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LazyValueInfo>(); AU.addPreserved<LazyValueInfo>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } void FindLoopHeaders(Function &F); bool ProcessBlock(BasicBlock *BB); bool ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock*> &PredBBs, BasicBlock *SuccBB); bool DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs); bool ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result, ConstantPreference Preference, Instruction *CxtI = nullptr); bool ProcessThreadableEdges(Value *Cond, BasicBlock *BB, ConstantPreference Preference, Instruction *CxtI = nullptr); bool ProcessBranchOnPHI(PHINode *PN); bool ProcessBranchOnXOR(BinaryOperator *BO); bool SimplifyPartiallyRedundantLoad(LoadInst *LI); bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB); }; } char JumpThreading::ID = 0; INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading", "Jump Threading", false, false) INITIALIZE_PASS_DEPENDENCY(LazyValueInfo) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(JumpThreading, "jump-threading", "Jump Threading", false, false) // Public interface to the Jump Threading pass FunctionPass *llvm::createJumpThreadingPass(int Threshold) { return new JumpThreading(Threshold); } /// runOnFunction - Top level algorithm. /// bool JumpThreading::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); LVI = &getAnalysis<LazyValueInfo>(); // Remove unreachable blocks from function as they may result in infinite // loop. We do threading if we found something profitable. Jump threading a // branch can create other opportunities. If these opportunities form a cycle // i.e. if any jump treading is undoing previous threading in the path, then // we will loop forever. We take care of this issue by not jump threading for // back edges. This works for normal cases but not for unreachable blocks as // they may have cycle with no back edge. removeUnreachableBlocks(F); FindLoopHeaders(F); bool Changed, EverChanged = false; do { Changed = false; for (Function::iterator I = F.begin(), E = F.end(); I != E;) { BasicBlock *BB = I; // Thread all of the branches we can over this block. while (ProcessBlock(BB)) Changed = true; ++I; // If the block is trivially dead, zap it. This eliminates the successor // edges which simplifies the CFG. if ((pred_empty(BB) // HLSL change begin - delete self loop. || BB->getSinglePredecessor() == BB // HLSL change end. ) && BB != &BB->getParent()->getEntryBlock()) { DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName() << "' with terminator: " << *BB->getTerminator() << '\n'); LoopHeaders.erase(BB); LVI->eraseBlock(BB); DeleteDeadBlock(BB); Changed = true; continue; } BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); // Can't thread an unconditional jump, but if the block is "almost // empty", we can replace uses of it with uses of the successor and make // this dead. if (BI && BI->isUnconditional() && BB != &BB->getParent()->getEntryBlock() && // If the terminator is the only non-phi instruction, try to nuke it. BB->getFirstNonPHIOrDbg()->isTerminator()) { // Since TryToSimplifyUncondBranchFromEmptyBlock may delete the // block, we have to make sure it isn't in the LoopHeaders set. We // reinsert afterward if needed. bool ErasedFromLoopHeaders = LoopHeaders.erase(BB); BasicBlock *Succ = BI->getSuccessor(0); // FIXME: It is always conservatively correct to drop the info // for a block even if it doesn't get erased. This isn't totally // awesome, but it allows us to use AssertingVH to prevent nasty // dangling pointer issues within LazyValueInfo. LVI->eraseBlock(BB); if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) { Changed = true; // If we deleted BB and BB was the header of a loop, then the // successor is now the header of the loop. BB = Succ; } if (ErasedFromLoopHeaders) LoopHeaders.insert(BB); } } EverChanged |= Changed; } while (Changed); LoopHeaders.clear(); return EverChanged; } /// getJumpThreadDuplicationCost - Return the cost of duplicating this block to /// thread across it. Stop scanning the block when passing the threshold. static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB, unsigned Threshold) { /// Ignore PHI nodes, these will be flattened when duplication happens. BasicBlock::const_iterator I = BB->getFirstNonPHI(); // FIXME: THREADING will delete values that are just used to compute the // branch, so they shouldn't count against the duplication cost. // Sum up the cost of each instruction until we get to the terminator. Don't // include the terminator because the copy won't include it. unsigned Size = 0; for (; !isa<TerminatorInst>(I); ++I) { // Stop scanning the block if we've reached the threshold. if (Size > Threshold) return Size; // Debugger intrinsics don't incur code size. if (isa<DbgInfoIntrinsic>(I)) continue; // If this is a pointer->pointer bitcast, it is free. if (isa<BitCastInst>(I) && I->getType()->isPointerTy()) continue; // All other instructions count for at least one unit. ++Size; // Calls are more expensive. If they are non-intrinsic calls, we model them // as having cost of 4. If they are a non-vector intrinsic, we model them // as having cost of 2 total, and if they are a vector intrinsic, we model // them as having cost 1. if (const CallInst *CI = dyn_cast<CallInst>(I)) { if (CI->cannotDuplicate()) // Blocks with NoDuplicate are modelled as having infinite cost, so they // are never duplicated. return ~0U; else if (!isa<IntrinsicInst>(CI)) Size += 3; else if (!CI->getType()->isVectorTy()) Size += 1; } } // Threading through a switch statement is particularly profitable. If this // block ends in a switch, decrease its cost to make it more likely to happen. if (isa<SwitchInst>(I)) Size = Size > 6 ? Size-6 : 0; // The same holds for indirect branches, but slightly more so. if (isa<IndirectBrInst>(I)) Size = Size > 8 ? Size-8 : 0; return Size; } /// FindLoopHeaders - We do not want jump threading to turn proper loop /// structures into irreducible loops. Doing this breaks up the loop nesting /// hierarchy and pessimizes later transformations. To prevent this from /// happening, we first have to find the loop headers. Here we approximate this /// by finding targets of backedges in the CFG. /// /// Note that there definitely are cases when we want to allow threading of /// edges across a loop header. For example, threading a jump from outside the /// loop (the preheader) to an exit block of the loop is definitely profitable. /// It is also almost always profitable to thread backedges from within the loop /// to exit blocks, and is often profitable to thread backedges to other blocks /// within the loop (forming a nested loop). This simple analysis is not rich /// enough to track all of these properties and keep it up-to-date as the CFG /// mutates, so we don't allow any of these transformations. /// void JumpThreading::FindLoopHeaders(Function &F) { SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; FindFunctionBackedges(F, Edges); for (unsigned i = 0, e = Edges.size(); i != e; ++i) LoopHeaders.insert(const_cast<BasicBlock*>(Edges[i].second)); } /// getKnownConstant - Helper method to determine if we can thread over a /// terminator with the given value as its condition, and if so what value to /// use for that. What kind of value this is depends on whether we want an /// integer or a block address, but an undef is always accepted. /// Returns null if Val is null or not an appropriate constant. static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { if (!Val) return nullptr; // Undef is "known" enough. if (UndefValue *U = dyn_cast<UndefValue>(Val)) return U; if (Preference == WantBlockAddress) return dyn_cast<BlockAddress>(Val->stripPointerCasts()); return dyn_cast<ConstantInt>(Val); } /// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see /// if we can infer that the value is a known ConstantInt/BlockAddress or undef /// in any of our predecessors. If so, return the known list of value and pred /// BB in the result vector. /// /// This returns true if there were any known values. /// bool JumpThreading:: ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result, ConstantPreference Preference, Instruction *CxtI) { // This method walks up use-def chains recursively. Because of this, we could // get into an infinite loop going around loops in the use-def chain. To // prevent this, keep track of what (value, block) pairs we've already visited // and terminate the search if we loop back to them if (!RecursionSet.insert(std::make_pair(V, BB)).second) return false; // An RAII help to remove this pair from the recursion set once the recursion // stack pops back out again. RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB)); // If V is a constant, then it is known in all predecessors. if (Constant *KC = getKnownConstant(V, Preference)) { for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) Result.push_back(std::make_pair(KC, *PI)); return true; } // If V is a non-instruction value, or an instruction in a different block, // then it can't be derived from a PHI. Instruction *I = dyn_cast<Instruction>(V); if (!I || I->getParent() != BB) { // Okay, if this is a live-in value, see if it has a known value at the end // of any of our predecessors. // // FIXME: This should be an edge property, not a block end property. /// TODO: Per PR2563, we could infer value range information about a /// predecessor based on its terminator. // // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if // "I" is a non-local compare-with-a-constant instruction. This would be // able to handle value inequalities better, for example if the compare is // "X < 4" and "X < 3" is known true but "X < 4" itself is not available. // Perhaps getConstantOnEdge should be smart enough to do this? for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { BasicBlock *P = *PI; // If the value is known by LazyValueInfo to be a constant in a // predecessor, use that information to try to thread this block. Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); if (Constant *KC = getKnownConstant(PredCst, Preference)) Result.push_back(std::make_pair(KC, P)); } return !Result.empty(); } /// If I is a PHI node, then we know the incoming values for any constants. if (PHINode *PN = dyn_cast<PHINode>(I)) { for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { Value *InVal = PN->getIncomingValue(i); if (Constant *KC = getKnownConstant(InVal, Preference)) { Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i))); } else { Constant *CI = LVI->getConstantOnEdge(InVal, PN->getIncomingBlock(i), BB, CxtI); if (Constant *KC = getKnownConstant(CI, Preference)) Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i))); } } return !Result.empty(); } PredValueInfoTy LHSVals, RHSVals; // Handle some boolean conditions. if (I->getType()->getPrimitiveSizeInBits() == 1) { assert(Preference == WantInteger && "One-bit non-integer type?"); // X | true -> true // X & false -> false if (I->getOpcode() == Instruction::Or || I->getOpcode() == Instruction::And) { ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals, WantInteger, CxtI); ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals, WantInteger, CxtI); if (LHSVals.empty() && RHSVals.empty()) return false; ConstantInt *InterestingVal; if (I->getOpcode() == Instruction::Or) InterestingVal = ConstantInt::getTrue(I->getContext()); else InterestingVal = ConstantInt::getFalse(I->getContext()); SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; // Scan for the sentinel. If we find an undef, force it to the // interesting value: x|undef -> true and x&undef -> false. for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) if (LHSVals[i].first == InterestingVal || isa<UndefValue>(LHSVals[i].first)) { Result.push_back(LHSVals[i]); Result.back().first = InterestingVal; LHSKnownBBs.insert(LHSVals[i].second); } for (unsigned i = 0, e = RHSVals.size(); i != e; ++i) if (RHSVals[i].first == InterestingVal || isa<UndefValue>(RHSVals[i].first)) { // If we already inferred a value for this block on the LHS, don't // re-add it. if (!LHSKnownBBs.count(RHSVals[i].second)) { Result.push_back(RHSVals[i]); Result.back().first = InterestingVal; } } return !Result.empty(); } // Handle the NOT form of XOR. if (I->getOpcode() == Instruction::Xor && isa<ConstantInt>(I->getOperand(1)) && cast<ConstantInt>(I->getOperand(1))->isOne()) { ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result, WantInteger, CxtI); if (Result.empty()) return false; // Invert the known values. for (unsigned i = 0, e = Result.size(); i != e; ++i) Result[i].first = ConstantExpr::getNot(Result[i].first); return true; } // Try to simplify some other binary operator values. } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { assert(Preference != WantBlockAddress && "A binary operator creating a block address?"); if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { PredValueInfoTy LHSVals; ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals, WantInteger, CxtI); // Try to use constant folding to simplify the binary operator. for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) { Constant *V = LHSVals[i].first; Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI); if (Constant *KC = getKnownConstant(Folded, WantInteger)) Result.push_back(std::make_pair(KC, LHSVals[i].second)); } } return !Result.empty(); } // Handle compare with phi operand, where the PHI is defined in this block. if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { assert(Preference == WantInteger && "Compares only produce integers"); PHINode *PN = dyn_cast<PHINode>(Cmp->getOperand(0)); if (PN && PN->getParent() == BB) { const DataLayout &DL = PN->getModule()->getDataLayout(); // We can do this simplification if any comparisons fold to true or false. // See if any do. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { BasicBlock *PredBB = PN->getIncomingBlock(i); Value *LHS = PN->getIncomingValue(i); Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB); Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL); if (!Res) { if (!isa<Constant>(RHS)) continue; LazyValueInfo::Tristate ResT = LVI->getPredicateOnEdge(Cmp->getPredicate(), LHS, cast<Constant>(RHS), PredBB, BB, CxtI ? CxtI : Cmp); if (ResT == LazyValueInfo::Unknown) continue; Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); } if (Constant *KC = getKnownConstant(Res, WantInteger)) Result.push_back(std::make_pair(KC, PredBB)); } return !Result.empty(); } // If comparing a live-in value against a constant, see if we know the // live-in value on any predecessors. if (isa<Constant>(Cmp->getOperand(1)) && Cmp->getType()->isIntegerTy()) { if (!isa<Instruction>(Cmp->getOperand(0)) || cast<Instruction>(Cmp->getOperand(0))->getParent() != BB) { Constant *RHSCst = cast<Constant>(Cmp->getOperand(1)); for (pred_iterator PI = pred_begin(BB), E = pred_end(BB);PI != E; ++PI){ BasicBlock *P = *PI; // If the value is known by LazyValueInfo to be a constant in a // predecessor, use that information to try to thread this block. LazyValueInfo::Tristate Res = LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0), RHSCst, P, BB, CxtI ? CxtI : Cmp); if (Res == LazyValueInfo::Unknown) continue; Constant *ResC = ConstantInt::get(Cmp->getType(), Res); Result.push_back(std::make_pair(ResC, P)); } return !Result.empty(); } // Try to find a constant value for the LHS of a comparison, // and evaluate it statically if we can. if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) { PredValueInfoTy LHSVals; ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals, WantInteger, CxtI); for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) { Constant *V = LHSVals[i].first; Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(), V, CmpConst); if (Constant *KC = getKnownConstant(Folded, WantInteger)) Result.push_back(std::make_pair(KC, LHSVals[i].second)); } return !Result.empty(); } } } if (SelectInst *SI = dyn_cast<SelectInst>(I)) { // Handle select instructions where at least one operand is a known constant // and we can figure out the condition value for any predecessor block. Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); PredValueInfoTy Conds; if ((TrueVal || FalseVal) && ComputeValueKnownInPredecessors(SI->getCondition(), BB, Conds, WantInteger, CxtI)) { for (unsigned i = 0, e = Conds.size(); i != e; ++i) { Constant *Cond = Conds[i].first; // Figure out what value to use for the condition. bool KnownCond; if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { // A known boolean. KnownCond = CI->isOne(); } else { assert(isa<UndefValue>(Cond) && "Unexpected condition value"); // Either operand will do, so be sure to pick the one that's a known // constant. // FIXME: Do this more cleverly if both values are known constants? KnownCond = (TrueVal != nullptr); } // See if the select has a known constant value for this predecessor. if (Constant *Val = KnownCond ? TrueVal : FalseVal) Result.push_back(std::make_pair(Val, Conds[i].second)); } return !Result.empty(); } } // If all else fails, see if LVI can figure out a constant value for us. Constant *CI = LVI->getConstant(V, BB, CxtI); if (Constant *KC = getKnownConstant(CI, Preference)) { for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) Result.push_back(std::make_pair(KC, *PI)); } return !Result.empty(); } /// GetBestDestForBranchOnUndef - If we determine that the specified block ends /// in an undefined jump, decide which block is best to revector to. /// /// Since we can pick an arbitrary destination, we pick the successor with the /// fewest predecessors. This should reduce the in-degree of the others. /// static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) { TerminatorInst *BBTerm = BB->getTerminator(); unsigned MinSucc = 0; BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); // Compute the successor with the minimum number of predecessors. unsigned MinNumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { TestBB = BBTerm->getSuccessor(i); unsigned NumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); if (NumPreds < MinNumPreds) { MinSucc = i; MinNumPreds = NumPreds; } } return MinSucc; } static bool hasAddressTakenAndUsed(BasicBlock *BB) { if (!BB->hasAddressTaken()) return false; // If the block has its address taken, it may be a tree of dead constants // hanging off of it. These shouldn't keep the block alive. BlockAddress *BA = BlockAddress::get(BB); BA->removeDeadConstantUsers(); return !BA->use_empty(); } /// ProcessBlock - If there are any predecessors whose control can be threaded /// through to a successor, transform them now. bool JumpThreading::ProcessBlock(BasicBlock *BB) { // If the block is trivially dead, just return and let the caller nuke it. // This simplifies other transformations. if (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()) return false; // If this block has a single predecessor, and if that pred has a single // successor, merge the blocks. This encourages recursive jump threading // because now the condition in this block can be threaded through // predecessors of our predecessor block. if (BasicBlock *SinglePred = BB->getSinglePredecessor()) { if (SinglePred->getTerminator()->getNumSuccessors() == 1 && SinglePred != BB && !hasAddressTakenAndUsed(BB)) { // If SinglePred was a loop header, BB becomes one. if (LoopHeaders.erase(SinglePred)) LoopHeaders.insert(BB); LVI->eraseBlock(SinglePred); MergeBasicBlockIntoOnlyPred(BB); return true; } } // What kind of constant we're looking for. ConstantPreference Preference = WantInteger; // Look to see if the terminator is a conditional branch, switch or indirect // branch, if not we can't thread it. Value *Condition; Instruction *Terminator = BB->getTerminator(); if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { // Can't thread an unconditional jump. if (BI->isUnconditional()) return false; Condition = BI->getCondition(); } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { Condition = SI->getCondition(); } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { // Can't thread indirect branch with no successors. if (IB->getNumSuccessors() == 0) return false; Condition = IB->getAddress()->stripPointerCasts(); Preference = WantBlockAddress; } else { return false; // Must be an invoke. } // Run constant folding to see if we can reduce the condition to a simple // constant. if (Instruction *I = dyn_cast<Instruction>(Condition)) { Value *SimpleVal = ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI); if (SimpleVal) { I->replaceAllUsesWith(SimpleVal); I->eraseFromParent(); Condition = SimpleVal; } } // If the terminator is branching on an undef, we can pick any of the // successors to branch to. Let GetBestDestForJumpOnUndef decide. if (isa<UndefValue>(Condition)) { unsigned BestSucc = GetBestDestForJumpOnUndef(BB); // Fold the branch/switch. TerminatorInst *BBTerm = BB->getTerminator(); for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { if (i == BestSucc) continue; BBTerm->getSuccessor(i)->removePredecessor(BB, true); } DEBUG(dbgs() << " In block '" << BB->getName() << "' folding undef terminator: " << *BBTerm << '\n'); BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); BBTerm->eraseFromParent(); return true; } // If the terminator of this block is branching on a constant, simplify the // terminator to an unconditional branch. This can occur due to threading in // other blocks. if (getKnownConstant(Condition, Preference)) { DEBUG(dbgs() << " In block '" << BB->getName() << "' folding terminator: " << *BB->getTerminator() << '\n'); ++NumFolds; ConstantFoldTerminator(BB, true); return true; } Instruction *CondInst = dyn_cast<Instruction>(Condition); // All the rest of our checks depend on the condition being an instruction. if (!CondInst) { // FIXME: Unify this with code below. if (ProcessThreadableEdges(Condition, BB, Preference, Terminator)) return true; return false; } if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { // If we're branching on a conditional, LVI might be able to determine // it's value at the branch instruction. We only handle comparisons // against a constant at this time. // TODO: This should be extended to handle switches as well. BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1)); if (CondBr && CondConst && CondBr->isConditional()) { LazyValueInfo::Tristate Ret = LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), CondConst, CondBr); if (Ret != LazyValueInfo::Unknown) { unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0; unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1; CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true); BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr); CondBr->eraseFromParent(); if (CondCmp->use_empty()) CondCmp->eraseFromParent(); else if (CondCmp->getParent() == BB) { // If the fact we just learned is true for all uses of the // condition, replace it with a constant value auto *CI = Ret == LazyValueInfo::True ? ConstantInt::getTrue(CondCmp->getType()) : ConstantInt::getFalse(CondCmp->getType()); CondCmp->replaceAllUsesWith(CI); CondCmp->eraseFromParent(); } return true; } } if (CondBr && CondConst && TryToUnfoldSelect(CondCmp, BB)) return true; } // Check for some cases that are worth simplifying. Right now we want to look // for loads that are used by a switch or by the condition for the branch. If // we see one, check to see if it's partially redundant. If so, insert a PHI // which can then be used to thread the values. // Value *SimplifyValue = CondInst; if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) if (isa<Constant>(CondCmp->getOperand(1))) SimplifyValue = CondCmp->getOperand(0); // TODO: There are other places where load PRE would be profitable, such as // more complex comparisons. if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue)) if (SimplifyPartiallyRedundantLoad(LI)) return true; // Handle a variety of cases where we are branching on something derived from // a PHI node in the current block. If we can prove that any predecessors // compute a predictable value based on a PHI node, thread those predecessors. // if (ProcessThreadableEdges(CondInst, BB, Preference, Terminator)) return true; // If this is an otherwise-unfoldable branch on a phi node in the current // block, see if we can simplify. if (PHINode *PN = dyn_cast<PHINode>(CondInst)) if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) return ProcessBranchOnPHI(PN); // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. if (CondInst->getOpcode() == Instruction::Xor && CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst)); // TODO: If we have: "br (X > 0)" and we have a predecessor where we know // "(X == 4)", thread through this block. return false; } /// SimplifyPartiallyRedundantLoad - If LI is an obviously partially redundant /// load instruction, eliminate it by replacing it with a PHI node. This is an /// important optimization that encourages jump threading, and needs to be run /// interlaced with other jump threading tasks. bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { // Don't hack volatile/atomic loads. if (!LI->isSimple()) return false; // If the load is defined in a block with exactly one predecessor, it can't be // partially redundant. BasicBlock *LoadBB = LI->getParent(); if (LoadBB->getSinglePredecessor()) return false; // If the load is defined in a landing pad, it can't be partially redundant, // because the edges between the invoke and the landing pad cannot have other // instructions between them. if (LoadBB->isLandingPad()) return false; Value *LoadedPtr = LI->getOperand(0); // If the loaded operand is defined in the LoadBB, it can't be available. // TODO: Could do simple PHI translation, that would be fun :) if (Instruction *PtrOp = dyn_cast<Instruction>(LoadedPtr)) if (PtrOp->getParent() == LoadBB) return false; // Scan a few instructions up from the load, to see if it is obviously live at // the entry to its block. BasicBlock::iterator BBIt = LI; if (Value *AvailableVal = FindAvailableLoadedValue(LoadedPtr, LoadBB, BBIt, 6)) { // If the value if the load is locally available within the block, just use // it. This frequently occurs for reg2mem'd allocas. //cerr << "LOAD ELIMINATED:\n" << *BBIt << *LI << "\n"; // If the returned value is the load itself, replace with an undef. This can // only happen in dead loops. if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType()); if (AvailableVal->getType() != LI->getType()) AvailableVal = CastInst::CreateBitOrPointerCast(AvailableVal, LI->getType(), "", LI); LI->replaceAllUsesWith(AvailableVal); LI->eraseFromParent(); return true; } // Otherwise, if we scanned the whole block and got to the top of the block, // we know the block is locally transparent to the load. If not, something // might clobber its value. if (BBIt != LoadBB->begin()) return false; // If all of the loads and stores that feed the value have the same AA tags, // then we can propagate them onto any newly inserted loads. AAMDNodes AATags; LI->getAAMetadata(AATags); SmallPtrSet<BasicBlock*, 8> PredsScanned; typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy; AvailablePredsTy AvailablePreds; BasicBlock *OneUnavailablePred = nullptr; // If we got here, the loaded value is transparent through to the start of the // block. Check to see if it is available in any of the predecessor blocks. for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB); PI != PE; ++PI) { BasicBlock *PredBB = *PI; // If we already scanned this predecessor, skip it. if (!PredsScanned.insert(PredBB).second) continue; // Scan the predecessor to see if the value is available in the pred. BBIt = PredBB->end(); AAMDNodes ThisAATags; Value *PredAvailable = FindAvailableLoadedValue(LoadedPtr, PredBB, BBIt, 6, nullptr, &ThisAATags); if (!PredAvailable) { OneUnavailablePred = PredBB; continue; } // If AA tags disagree or are not present, forget about them. if (AATags != ThisAATags) AATags = AAMDNodes(); // If so, this load is partially redundant. Remember this info so that we // can create a PHI node. AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable)); } // If the loaded value isn't available in any predecessor, it isn't partially // redundant. if (AvailablePreds.empty()) return false; // Okay, the loaded value is available in at least one (and maybe all!) // predecessors. If the value is unavailable in more than one unique // predecessor, we want to insert a merge block for those common predecessors. // This ensures that we only have to insert one reload, thus not increasing // code size. BasicBlock *UnavailablePred = nullptr; // If there is exactly one predecessor where the value is unavailable, the // already computed 'OneUnavailablePred' block is it. If it ends in an // unconditional branch, we know that it isn't a critical edge. if (PredsScanned.size() == AvailablePreds.size()+1 && OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { UnavailablePred = OneUnavailablePred; } else if (PredsScanned.size() != AvailablePreds.size()) { // Otherwise, we had multiple unavailable predecessors or we had a critical // edge from the one. SmallVector<BasicBlock*, 8> PredsToSplit; SmallPtrSet<BasicBlock*, 8> AvailablePredSet; for (unsigned i = 0, e = AvailablePreds.size(); i != e; ++i) AvailablePredSet.insert(AvailablePreds[i].first); // Add all the unavailable predecessors to the PredsToSplit list. for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB); PI != PE; ++PI) { BasicBlock *P = *PI; // If the predecessor is an indirect goto, we can't split the edge. if (isa<IndirectBrInst>(P->getTerminator())) return false; if (!AvailablePredSet.count(P)) PredsToSplit.push_back(P); } // Split them out to their own block. UnavailablePred = SplitBlockPredecessors(LoadBB, PredsToSplit, "thread-pre-split"); } // If the value isn't available in all predecessors, then there will be // exactly one where it isn't available. Insert a load on that edge and add // it to the AvailablePreds list. if (UnavailablePred) { assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && "Can't handle critical edge here!"); LoadInst *NewVal = new LoadInst(LoadedPtr, LI->getName()+".pr", false, LI->getAlignment(), UnavailablePred->getTerminator()); NewVal->setDebugLoc(LI->getDebugLoc()); if (AATags) NewVal->setAAMetadata(AATags); AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal)); } // Now we know that each predecessor of this block has a value in // AvailablePreds, sort them for efficient access as we're walking the preds. array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); // Create a PHI node at the start of the block for the PRE'd load value. pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB); PHINode *PN = PHINode::Create(LI->getType(), std::distance(PB, PE), "", LoadBB->begin()); PN->takeName(LI); PN->setDebugLoc(LI->getDebugLoc()); // Insert new entries into the PHI for each predecessor. A single block may // have multiple entries here. for (pred_iterator PI = PB; PI != PE; ++PI) { BasicBlock *P = *PI; AvailablePredsTy::iterator I = std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(), std::make_pair(P, (Value*)nullptr)); assert(I != AvailablePreds.end() && I->first == P && "Didn't find entry for predecessor!"); // If we have an available predecessor but it requires casting, insert the // cast in the predecessor and use the cast. Note that we have to update the // AvailablePreds vector as we go so that all of the PHI entries for this // predecessor use the same bitcast. Value *&PredV = I->second; if (PredV->getType() != LI->getType()) PredV = CastInst::CreateBitOrPointerCast(PredV, LI->getType(), "", P->getTerminator()); PN->addIncoming(PredV, I->first); } //cerr << "PRE: " << *LI << *PN << "\n"; LI->replaceAllUsesWith(PN); LI->eraseFromParent(); return true; } /// FindMostPopularDest - The specified list contains multiple possible /// threadable destinations. Pick the one that occurs the most frequently in /// the list. static BasicBlock * FindMostPopularDest(BasicBlock *BB, const SmallVectorImpl<std::pair<BasicBlock*, BasicBlock*> > &PredToDestList) { assert(!PredToDestList.empty()); // Determine popularity. If there are multiple possible destinations, we // explicitly choose to ignore 'undef' destinations. We prefer to thread // blocks with known and real destinations to threading undef. We'll handle // them later if interesting. DenseMap<BasicBlock*, unsigned> DestPopularity; for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i) if (PredToDestList[i].second) DestPopularity[PredToDestList[i].second]++; // Find the most popular dest. DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin(); BasicBlock *MostPopularDest = DPI->first; unsigned Popularity = DPI->second; SmallVector<BasicBlock*, 4> SamePopularity; for (++DPI; DPI != DestPopularity.end(); ++DPI) { // If the popularity of this entry isn't higher than the popularity we've // seen so far, ignore it. if (DPI->second < Popularity) ; // ignore. else if (DPI->second == Popularity) { // If it is the same as what we've seen so far, keep track of it. SamePopularity.push_back(DPI->first); } else { // If it is more popular, remember it. SamePopularity.clear(); MostPopularDest = DPI->first; Popularity = DPI->second; } } // Okay, now we know the most popular destination. If there is more than one // destination, we need to determine one. This is arbitrary, but we need // to make a deterministic decision. Pick the first one that appears in the // successor list. if (!SamePopularity.empty()) { SamePopularity.push_back(MostPopularDest); TerminatorInst *TI = BB->getTerminator(); for (unsigned i = 0; ; ++i) { assert(i != TI->getNumSuccessors() && "Didn't find any successor!"); if (std::find(SamePopularity.begin(), SamePopularity.end(), TI->getSuccessor(i)) == SamePopularity.end()) continue; MostPopularDest = TI->getSuccessor(i); break; } } // Okay, we have finally picked the most popular destination. return MostPopularDest; } bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB, ConstantPreference Preference, Instruction *CxtI) { // If threading this would thread across a loop header, don't even try to // thread the edge. if (LoopHeaders.count(BB)) return false; PredValueInfoTy PredValues; if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues, Preference, CxtI)) return false; assert(!PredValues.empty() && "ComputeValueKnownInPredecessors returned true with no values"); DEBUG(dbgs() << "IN BB: " << *BB; for (unsigned i = 0, e = PredValues.size(); i != e; ++i) { dbgs() << " BB '" << BB->getName() << "': FOUND condition = " << *PredValues[i].first << " for pred '" << PredValues[i].second->getName() << "'.\n"; }); // Decide what we want to thread through. Convert our list of known values to // a list of known destinations for each pred. This also discards duplicate // predecessors and keeps track of the undefined inputs (which are represented // as a null dest in the PredToDestList). SmallPtrSet<BasicBlock*, 16> SeenPreds; SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; BasicBlock *OnlyDest = nullptr; BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; for (unsigned i = 0, e = PredValues.size(); i != e; ++i) { BasicBlock *Pred = PredValues[i].second; if (!SeenPreds.insert(Pred).second) continue; // Duplicate predecessor entry. // If the predecessor ends with an indirect goto, we can't change its // destination. if (isa<IndirectBrInst>(Pred->getTerminator())) continue; Constant *Val = PredValues[i].first; BasicBlock *DestBB; if (isa<UndefValue>(Val)) DestBB = nullptr; else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { DestBB = SI->findCaseValue(cast<ConstantInt>(Val)).getCaseSuccessor(); } else { assert(isa<IndirectBrInst>(BB->getTerminator()) && "Unexpected terminator"); DestBB = cast<BlockAddress>(Val)->getBasicBlock(); } // If we have exactly one destination, remember it for efficiency below. if (PredToDestList.empty()) OnlyDest = DestBB; else if (OnlyDest != DestBB) OnlyDest = MultipleDestSentinel; PredToDestList.push_back(std::make_pair(Pred, DestBB)); } // If all edges were unthreadable, we fail. if (PredToDestList.empty()) return false; // Determine which is the most common successor. If we have many inputs and // this block is a switch, we want to start by threading the batch that goes // to the most popular destination first. If we only know about one // threadable destination (the common case) we can avoid this. BasicBlock *MostPopularDest = OnlyDest; if (MostPopularDest == MultipleDestSentinel) MostPopularDest = FindMostPopularDest(BB, PredToDestList); // Now that we know what the most popular destination is, factor all // predecessors that will jump to it into a single predecessor. SmallVector<BasicBlock*, 16> PredsToFactor; for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i) if (PredToDestList[i].second == MostPopularDest) { BasicBlock *Pred = PredToDestList[i].first; // This predecessor may be a switch or something else that has multiple // edges to the block. Factor each of these edges by listing them // according to # occurrences in PredsToFactor. TerminatorInst *PredTI = Pred->getTerminator(); for (unsigned i = 0, e = PredTI->getNumSuccessors(); i != e; ++i) if (PredTI->getSuccessor(i) == BB) PredsToFactor.push_back(Pred); } // If the threadable edges are branching on an undefined value, we get to pick // the destination that these predecessors should get to. if (!MostPopularDest) MostPopularDest = BB->getTerminator()-> getSuccessor(GetBestDestForJumpOnUndef(BB)); // Ok, try to thread it! return ThreadEdge(BB, PredsToFactor, MostPopularDest); } /// ProcessBranchOnPHI - We have an otherwise unthreadable conditional branch on /// a PHI node in the current block. See if there are any simplifications we /// can do based on inputs to the phi node. /// bool JumpThreading::ProcessBranchOnPHI(PHINode *PN) { BasicBlock *BB = PN->getParent(); // TODO: We could make use of this to do it once for blocks with common PHI // values. SmallVector<BasicBlock*, 1> PredBBs; PredBBs.resize(1); // If any of the predecessor blocks end in an unconditional branch, we can // *duplicate* the conditional branch into that block in order to further // encourage jump threading and to eliminate cases where we have branch on a // phi of an icmp (branch on icmp is much better). for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { BasicBlock *PredBB = PN->getIncomingBlock(i); if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) if (PredBr->isUnconditional()) { PredBBs[0] = PredBB; // Try to duplicate BB into PredBB. if (DuplicateCondBranchOnPHIIntoPred(BB, PredBBs)) return true; } } return false; } /// ProcessBranchOnXOR - We have an otherwise unthreadable conditional branch on /// a xor instruction in the current block. See if there are any /// simplifications we can do based on inputs to the xor. /// bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) { BasicBlock *BB = BO->getParent(); // If either the LHS or RHS of the xor is a constant, don't do this // optimization. if (isa<ConstantInt>(BO->getOperand(0)) || isa<ConstantInt>(BO->getOperand(1))) return false; // If the first instruction in BB isn't a phi, we won't be able to infer // anything special about any particular predecessor. if (!isa<PHINode>(BB->front())) return false; // If we have a xor as the branch input to this block, and we know that the // LHS or RHS of the xor in any predecessor is true/false, then we can clone // the condition into the predecessor and fix that value to true, saving some // logical ops on that path and encouraging other paths to simplify. // // This copies something like this: // // BB: // %X = phi i1 [1], [%X'] // %Y = icmp eq i32 %A, %B // %Z = xor i1 %X, %Y // br i1 %Z, ... // // Into: // BB': // %Y = icmp ne i32 %A, %B // br i1 %Z, ... PredValueInfoTy XorOpValues; bool isLHS = true; if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, WantInteger, BO)) { assert(XorOpValues.empty()); if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, WantInteger, BO)) return false; isLHS = false; } assert(!XorOpValues.empty() && "ComputeValueKnownInPredecessors returned true with no values"); // Scan the information to see which is most popular: true or false. The // predecessors can be of the set true, false, or undef. unsigned NumTrue = 0, NumFalse = 0; for (unsigned i = 0, e = XorOpValues.size(); i != e; ++i) { if (isa<UndefValue>(XorOpValues[i].first)) // Ignore undefs for the count. continue; if (cast<ConstantInt>(XorOpValues[i].first)->isZero()) ++NumFalse; else ++NumTrue; } // Determine which value to split on, true, false, or undef if neither. ConstantInt *SplitVal = nullptr; if (NumTrue > NumFalse) SplitVal = ConstantInt::getTrue(BB->getContext()); else if (NumTrue != 0 || NumFalse != 0) SplitVal = ConstantInt::getFalse(BB->getContext()); // Collect all of the blocks that this can be folded into so that we can // factor this once and clone it once. SmallVector<BasicBlock*, 8> BlocksToFoldInto; for (unsigned i = 0, e = XorOpValues.size(); i != e; ++i) { if (XorOpValues[i].first != SplitVal && !isa<UndefValue>(XorOpValues[i].first)) continue; BlocksToFoldInto.push_back(XorOpValues[i].second); } // If we inferred a value for all of the predecessors, then duplication won't // help us. However, we can just replace the LHS or RHS with the constant. if (BlocksToFoldInto.size() == cast<PHINode>(BB->front()).getNumIncomingValues()) { if (!SplitVal) { // If all preds provide undef, just nuke the xor, because it is undef too. BO->replaceAllUsesWith(UndefValue::get(BO->getType())); BO->eraseFromParent(); } else if (SplitVal->isZero()) { // If all preds provide 0, replace the xor with the other input. BO->replaceAllUsesWith(BO->getOperand(isLHS)); BO->eraseFromParent(); } else { // If all preds provide 1, set the computed value to 1. BO->setOperand(!isLHS, SplitVal); } return true; } // Try to duplicate BB into PredBB. return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); } /// AddPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new /// predecessor to the PHIBB block. If it has PHI nodes, add entries for /// NewPred using the entries from OldPred (suitably mapped). static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, BasicBlock *OldPred, BasicBlock *NewPred, DenseMap<Instruction*, Value*> &ValueMap) { for (BasicBlock::iterator PNI = PHIBB->begin(); PHINode *PN = dyn_cast<PHINode>(PNI); ++PNI) { // Ok, we have a PHI node. Figure out what the incoming value was for the // DestBlock. Value *IV = PN->getIncomingValueForBlock(OldPred); // Remap the value if necessary. if (Instruction *Inst = dyn_cast<Instruction>(IV)) { DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); if (I != ValueMap.end()) IV = I->second; } PN->addIncoming(IV, NewPred); } } /// ThreadEdge - We have decided that it is safe and profitable to factor the /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB /// across BB. Transform the IR to reflect this change. bool JumpThreading::ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock*> &PredBBs, BasicBlock *SuccBB) { // If threading to the same block as we come from, we would infinite loop. if (SuccBB == BB) { DEBUG(dbgs() << " Not threading across BB '" << BB->getName() << "' - would thread to self!\n"); return false; } // If threading this would thread across a loop header, don't thread the edge. // See the comments above FindLoopHeaders for justifications and caveats. if (LoopHeaders.count(BB)) { DEBUG(dbgs() << " Not threading across loop header BB '" << BB->getName() << "' to dest BB '" << SuccBB->getName() << "' - it might create an irreducible loop!\n"); return false; } unsigned JumpThreadCost = getJumpThreadDuplicationCost(BB, BBDupThreshold); if (JumpThreadCost > BBDupThreshold) { DEBUG(dbgs() << " Not threading BB '" << BB->getName() << "' - Cost is too high: " << JumpThreadCost << "\n"); return false; } // And finally, do it! Start by factoring the predecessors is needed. BasicBlock *PredBB; if (PredBBs.size() == 1) PredBB = PredBBs[0]; else { DEBUG(dbgs() << " Factoring out " << PredBBs.size() << " common predecessors.\n"); PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm"); } // And finally, do it! DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '" << SuccBB->getName() << "' with cost: " << JumpThreadCost << ", across block:\n " << *BB << "\n"); LVI->threadEdge(PredBB, BB, SuccBB); // We are going to have to map operands from the original BB block to the new // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to // account for entry from PredBB. DenseMap<Instruction*, Value*> ValueMapping; BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), BB->getName()+".thread", BB->getParent(), BB); NewBB->moveAfter(PredBB); BasicBlock::iterator BI = BB->begin(); for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); // Clone the non-phi instructions of BB into NewBB, keeping track of the // mapping and using it to remap operands in the cloned instructions. for (; !isa<TerminatorInst>(BI); ++BI) { Instruction *New = BI->clone(); New->setName(BI->getName()); NewBB->getInstList().push_back(New); ValueMapping[BI] = New; // Remap operands to patch up intra-block references. for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); if (I != ValueMapping.end()) New->setOperand(i, I->second); } } // We didn't copy the terminator from BB over to NewBB, because there is now // an unconditional jump to SuccBB. Insert the unconditional jump. BranchInst *NewBI =BranchInst::Create(SuccBB, NewBB); NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the // PHI nodes for NewBB now. AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); // If there were values defined in BB that are used outside the block, then we // now have to update all uses of the value to use either the original value, // the cloned value, or some PHI derived value. This can require arbitrary // PHI insertion, of which we are prepared to do, clean these up now. SSAUpdater SSAUpdate; SmallVector<Use*, 16> UsesToRename; for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) { // Scan all uses of this instruction to see if it is used outside of its // block, and if so, record them in UsesToRename. for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); if (PHINode *UserPN = dyn_cast<PHINode>(User)) { if (UserPN->getIncomingBlock(U) == BB) continue; } else if (User->getParent() == BB) continue; UsesToRename.push_back(&U); } // If there are no uses outside the block, we're done with this instruction. if (UsesToRename.empty()) continue; DEBUG(dbgs() << "JT: Renaming non-local uses of: " << *I << "\n"); // We found a use of I outside of BB. Rename all uses of I that are outside // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks // with the two values we know. SSAUpdate.Initialize(I->getType(), I->getName()); SSAUpdate.AddAvailableValue(BB, I); SSAUpdate.AddAvailableValue(NewBB, ValueMapping[I]); while (!UsesToRename.empty()) SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); DEBUG(dbgs() << "\n"); } // Ok, NewBB is good to go. Update the terminator of PredBB to jump to // NewBB instead of BB. This eliminates predecessors from BB, which requires // us to simplify any PHI nodes in BB. TerminatorInst *PredTerm = PredBB->getTerminator(); for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) if (PredTerm->getSuccessor(i) == BB) { BB->removePredecessor(PredBB, true); PredTerm->setSuccessor(i, NewBB); } // At this point, the IR is fully up to date and consistent. Do a quick scan // over the new instructions and zap any that are constants or dead. This // frequently happens because of phi translation. SimplifyInstructionsInBlock(NewBB, TLI); // Threaded an edge! ++NumThreads; return true; } /// DuplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch /// to BB which contains an i1 PHI node and a conditional branch on that PHI. /// If we can duplicate the contents of BB up into PredBB do so now, this /// improves the odds that the branch will be on an analyzable instruction like /// a compare. bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { assert(!PredBBs.empty() && "Can't handle an empty set"); // If BB is a loop header, then duplicating this block outside the loop would // cause us to transform this into an irreducible loop, don't do this. // See the comments above FindLoopHeaders for justifications and caveats. if (LoopHeaders.count(BB)) { DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() << "' into predecessor block '" << PredBBs[0]->getName() << "' - it might create an irreducible loop!\n"); return false; } unsigned DuplicationCost = getJumpThreadDuplicationCost(BB, BBDupThreshold); if (DuplicationCost > BBDupThreshold) { DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() << "' - Cost is too high: " << DuplicationCost << "\n"); return false; } // And finally, do it! Start by factoring the predecessors is needed. BasicBlock *PredBB; if (PredBBs.size() == 1) PredBB = PredBBs[0]; else { DEBUG(dbgs() << " Factoring out " << PredBBs.size() << " common predecessors.\n"); PredBB = SplitBlockPredecessors(BB, PredBBs, ".thr_comm"); } // Okay, we decided to do this! Clone all the instructions in BB onto the end // of PredBB. DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" << PredBB->getName() << "' to eliminate branch on phi. Cost: " << DuplicationCost << " block is:" << *BB << "\n"); // Unless PredBB ends with an unconditional branch, split the edge so that we // can just clone the bits from BB into the end of the new PredBB. BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); if (!OldPredBranch || !OldPredBranch->isUnconditional()) { PredBB = SplitEdge(PredBB, BB); OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); } // We are going to have to map operands from the original BB block into the // PredBB block. Evaluate PHI nodes in BB. DenseMap<Instruction*, Value*> ValueMapping; BasicBlock::iterator BI = BB->begin(); for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); // Clone the non-phi instructions of BB into PredBB, keeping track of the // mapping and using it to remap operands in the cloned instructions. for (; BI != BB->end(); ++BI) { Instruction *New = BI->clone(); // Remap operands to patch up intra-block references. for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); if (I != ValueMapping.end()) New->setOperand(i, I->second); } // If this instruction can be simplified after the operands are updated, // just use the simplified value instead. This frequently happens due to // phi translation. if (Value *IV = SimplifyInstruction(New, BB->getModule()->getDataLayout())) { delete New; ValueMapping[BI] = IV; } else { // Otherwise, insert the new instruction into the block. New->setName(BI->getName()); PredBB->getInstList().insert(OldPredBranch, New); ValueMapping[BI] = New; } } // Check to see if the targets of the branch had PHI nodes. If so, we need to // add entries to the PHI nodes for branch from PredBB now. BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, ValueMapping); AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, ValueMapping); // If there were values defined in BB that are used outside the block, then we // now have to update all uses of the value to use either the original value, // the cloned value, or some PHI derived value. This can require arbitrary // PHI insertion, of which we are prepared to do, clean these up now. SSAUpdater SSAUpdate; SmallVector<Use*, 16> UsesToRename; for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) { // Scan all uses of this instruction to see if it is used outside of its // block, and if so, record them in UsesToRename. for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); if (PHINode *UserPN = dyn_cast<PHINode>(User)) { if (UserPN->getIncomingBlock(U) == BB) continue; } else if (User->getParent() == BB) continue; UsesToRename.push_back(&U); } // If there are no uses outside the block, we're done with this instruction. if (UsesToRename.empty()) continue; DEBUG(dbgs() << "JT: Renaming non-local uses of: " << *I << "\n"); // We found a use of I outside of BB. Rename all uses of I that are outside // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks // with the two values we know. SSAUpdate.Initialize(I->getType(), I->getName()); SSAUpdate.AddAvailableValue(BB, I); SSAUpdate.AddAvailableValue(PredBB, ValueMapping[I]); while (!UsesToRename.empty()) SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); DEBUG(dbgs() << "\n"); } // PredBB no longer jumps to BB, remove entries in the PHI node for the edge // that we nuked. BB->removePredecessor(PredBB, true); // Remove the unconditional branch at the end of the PredBB block. OldPredBranch->eraseFromParent(); ++NumDupes; return true; } /// TryToUnfoldSelect - Look for blocks of the form /// bb1: /// %a = select /// br bb /// /// bb2: /// %p = phi [%a, %bb] ... /// %c = icmp %p /// br i1 %c /// /// And expand the select into a branch structure if one of its arms allows %c /// to be folded. This later enables threading from bb1 over bb2. bool JumpThreading::TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); if (!CondBr || !CondBr->isConditional() || !CondLHS || CondLHS->getParent() != BB) return false; for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { BasicBlock *Pred = CondLHS->getIncomingBlock(I); SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); // Look if one of the incoming values is a select in the corresponding // predecessor. if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) continue; BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); if (!PredTerm || !PredTerm->isUnconditional()) continue; // Now check if one of the select values would allow us to constant fold the // terminator in BB. We don't do the transform if both sides fold, those // cases will be threaded in any case. LazyValueInfo::Tristate LHSFolds = LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), CondRHS, Pred, BB, CondCmp); LazyValueInfo::Tristate RHSFolds = LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), CondRHS, Pred, BB, CondCmp); if ((LHSFolds != LazyValueInfo::Unknown || RHSFolds != LazyValueInfo::Unknown) && LHSFolds != RHSFolds) { // Expand the select. // // Pred -- // | v // | NewBB // | | // |----- // v // BB BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", BB->getParent(), BB); // Move the unconditional branch to NewBB. PredTerm->removeFromParent(); NewBB->getInstList().insert(NewBB->end(), PredTerm); // Create a conditional branch and update PHI nodes. BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); CondLHS->setIncomingValue(I, SI->getFalseValue()); CondLHS->addIncoming(SI->getTrueValue(), NewBB); // The select is now dead. SI->eraseFromParent(); // Update any other PHI nodes in BB. for (BasicBlock::iterator BI = BB->begin(); PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) if (Phi != CondLHS) Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); return true; } } return false; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
//===- LowerExpectIntrinsic.cpp - Lower expect intrinsic ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass lowers the 'expect' intrinsic to LLVM metadata. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Scalar.h" using namespace llvm; #define DEBUG_TYPE "lower-expect-intrinsic" STATISTIC(ExpectIntrinsicsHandled, "Number of 'expect' intrinsic instructions handled"); #if 0 // HLSL Change Starts - option pending static cl::opt<uint32_t> LikelyBranchWeight("likely-branch-weight", cl::Hidden, cl::init(64), cl::desc("Weight of the branch likely to be taken (default = 64)")); static cl::opt<uint32_t> UnlikelyBranchWeight("unlikely-branch-weight", cl::Hidden, cl::init(4), cl::desc("Weight of the branch unlikely to be taken (default = 4)")); #else static const uint32_t LikelyBranchWeight = 64; static const uint32_t UnlikelyBranchWeight = 4; #endif static bool handleSwitchExpect(SwitchInst &SI) { CallInst *CI = dyn_cast<CallInst>(SI.getCondition()); if (!CI) return false; Function *Fn = CI->getCalledFunction(); if (!Fn || Fn->getIntrinsicID() != Intrinsic::expect) return false; Value *ArgValue = CI->getArgOperand(0); ConstantInt *ExpectedValue = dyn_cast<ConstantInt>(CI->getArgOperand(1)); if (!ExpectedValue) return false; SwitchInst::CaseIt Case = SI.findCaseValue(ExpectedValue); unsigned n = SI.getNumCases(); // +1 for default case. #if 0 // HLSL Change - help the compiler pick the right constructor overload SmallVector<uint32_t, 16> Weights(n + 1, UnlikelyBranchWeight); #else SmallVector<uint32_t, 16> Weights; Weights.assign(n + 1, UnlikelyBranchWeight); #endif if (Case == SI.case_default()) Weights[0] = LikelyBranchWeight; else Weights[Case.getCaseIndex() + 1] = LikelyBranchWeight; SI.setMetadata(LLVMContext::MD_prof, MDBuilder(CI->getContext()).createBranchWeights(Weights)); SI.setCondition(ArgValue); return true; } static bool handleBranchExpect(BranchInst &BI) { if (BI.isUnconditional()) return false; // Handle non-optimized IR code like: // %expval = call i64 @llvm.expect.i64(i64 %conv1, i64 1) // %tobool = icmp ne i64 %expval, 0 // br i1 %tobool, label %if.then, label %if.end // // Or the following simpler case: // %expval = call i1 @llvm.expect.i1(i1 %cmp, i1 1) // br i1 %expval, label %if.then, label %if.end CallInst *CI; ICmpInst *CmpI = dyn_cast<ICmpInst>(BI.getCondition()); if (!CmpI) { CI = dyn_cast<CallInst>(BI.getCondition()); } else { if (CmpI->getPredicate() != CmpInst::ICMP_NE) return false; CI = dyn_cast<CallInst>(CmpI->getOperand(0)); } if (!CI) return false; Function *Fn = CI->getCalledFunction(); if (!Fn || Fn->getIntrinsicID() != Intrinsic::expect) return false; Value *ArgValue = CI->getArgOperand(0); ConstantInt *ExpectedValue = dyn_cast<ConstantInt>(CI->getArgOperand(1)); if (!ExpectedValue) return false; MDBuilder MDB(CI->getContext()); MDNode *Node; // If expect value is equal to 1 it means that we are more likely to take // branch 0, in other case more likely is branch 1. if (ExpectedValue->isOne()) Node = MDB.createBranchWeights(LikelyBranchWeight, UnlikelyBranchWeight); else Node = MDB.createBranchWeights(UnlikelyBranchWeight, LikelyBranchWeight); BI.setMetadata(LLVMContext::MD_prof, Node); if (CmpI) CmpI->setOperand(0, ArgValue); else BI.setCondition(ArgValue); return true; } static bool lowerExpectIntrinsic(Function &F) { bool Changed = false; for (BasicBlock &BB : F) { // Create "block_weights" metadata. if (BranchInst *BI = dyn_cast<BranchInst>(BB.getTerminator())) { if (handleBranchExpect(*BI)) ExpectIntrinsicsHandled++; } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB.getTerminator())) { if (handleSwitchExpect(*SI)) ExpectIntrinsicsHandled++; } // remove llvm.expect intrinsics. for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { CallInst *CI = dyn_cast<CallInst>(BI++); if (!CI) continue; Function *Fn = CI->getCalledFunction(); if (Fn && Fn->getIntrinsicID() == Intrinsic::expect) { Value *Exp = CI->getArgOperand(0); CI->replaceAllUsesWith(Exp); CI->eraseFromParent(); Changed = true; } } } return Changed; } PreservedAnalyses LowerExpectIntrinsicPass::run(Function &F) { if (lowerExpectIntrinsic(F)) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } namespace { /// \brief Legacy pass for lowering expect intrinsics out of the IR. /// /// When this pass is run over a function it uses expect intrinsics which feed /// branches and switches to provide branch weight metadata for those /// terminators. It then removes the expect intrinsics from the IR so the rest /// of the optimizer can ignore them. class LowerExpectIntrinsic : public FunctionPass { public: static char ID; LowerExpectIntrinsic() : FunctionPass(ID) { initializeLowerExpectIntrinsicPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override { return lowerExpectIntrinsic(F); } }; } char LowerExpectIntrinsic::ID = 0; INITIALIZE_PASS(LowerExpectIntrinsic, "lower-expect", "Lower 'expect' Intrinsics", false, false) FunctionPass *llvm::createLowerExpectIntrinsicPass() { return new LowerExpectIntrinsic(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/Sink.cpp
//===-- Sink.cpp - Code Sinking -------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass moves instructions into successor blocks, when possible, so that // they aren't executed on paths where their results aren't needed. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CFG.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "sink" STATISTIC(NumSunk, "Number of instructions sunk"); STATISTIC(NumSinkIter, "Number of sinking iterations"); namespace { class Sinking : public FunctionPass { DominatorTree *DT; LoopInfo *LI; AliasAnalysis *AA; public: static char ID; // Pass identification Sinking() : FunctionPass(ID) { initializeSinkingPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); FunctionPass::getAnalysisUsage(AU); AU.addRequired<AliasAnalysis>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); } private: bool ProcessBlock(BasicBlock &BB); bool SinkInstruction(Instruction *I, SmallPtrSetImpl<Instruction*> &Stores); bool AllUsesDominatedByBlock(Instruction *Inst, BasicBlock *BB) const; bool IsAcceptableTarget(Instruction *Inst, BasicBlock *SuccToSinkTo) const; }; } // end anonymous namespace char Sinking::ID = 0; INITIALIZE_PASS_BEGIN(Sinking, "sink", "Code sinking", false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(Sinking, "sink", "Code sinking", false, false) FunctionPass *llvm::createSinkingPass() { return new Sinking(); } /// AllUsesDominatedByBlock - Return true if all uses of the specified value /// occur in blocks dominated by the specified block. bool Sinking::AllUsesDominatedByBlock(Instruction *Inst, BasicBlock *BB) const { // Ignoring debug uses is necessary so debug info doesn't affect the code. // This may leave a referencing dbg_value in the original block, before // the definition of the vreg. Dwarf generator handles this although the // user might not get the right info at runtime. for (Use &U : Inst->uses()) { // Determine the block of the use. Instruction *UseInst = cast<Instruction>(U.getUser()); BasicBlock *UseBlock = UseInst->getParent(); if (PHINode *PN = dyn_cast<PHINode>(UseInst)) { // PHI nodes use the operand in the predecessor block, not the block with // the PHI. unsigned Num = PHINode::getIncomingValueNumForOperand(U.getOperandNo()); UseBlock = PN->getIncomingBlock(Num); } // Check that it dominates. if (!DT->dominates(BB, UseBlock)) return false; } return true; } bool Sinking::runOnFunction(Function &F) { DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); AA = &getAnalysis<AliasAnalysis>(); bool MadeChange, EverMadeChange = false; do { MadeChange = false; DEBUG(dbgs() << "Sinking iteration " << NumSinkIter << "\n"); // Process all basic blocks. for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) MadeChange |= ProcessBlock(*I); EverMadeChange |= MadeChange; NumSinkIter++; } while (MadeChange); return EverMadeChange; } bool Sinking::ProcessBlock(BasicBlock &BB) { // Can't sink anything out of a block that has less than two successors. if (BB.getTerminator()->getNumSuccessors() <= 1 || BB.empty()) return false; // Don't bother sinking code out of unreachable blocks. In addition to being // unprofitable, it can also lead to infinite looping, because in an // unreachable loop there may be nowhere to stop. if (!DT->isReachableFromEntry(&BB)) return false; bool MadeChange = false; // Walk the basic block bottom-up. Remember if we saw a store. BasicBlock::iterator I = BB.end(); --I; bool ProcessedBegin = false; SmallPtrSet<Instruction *, 8> Stores; do { Instruction *Inst = I; // The instruction to sink. // Predecrement I (if it's not begin) so that it isn't invalidated by // sinking. ProcessedBegin = I == BB.begin(); if (!ProcessedBegin) --I; if (isa<DbgInfoIntrinsic>(Inst)) continue; if (SinkInstruction(Inst, Stores)) ++NumSunk, MadeChange = true; // If we just processed the first instruction in the block, we're done. } while (!ProcessedBegin); return MadeChange; } static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA, SmallPtrSetImpl<Instruction *> &Stores) { if (Inst->mayWriteToMemory()) { Stores.insert(Inst); return false; } if (LoadInst *L = dyn_cast<LoadInst>(Inst)) { MemoryLocation Loc = MemoryLocation::get(L); for (Instruction *S : Stores) if (AA->getModRefInfo(S, Loc) & AliasAnalysis::Mod) return false; } if (isa<TerminatorInst>(Inst) || isa<PHINode>(Inst)) return false; // Convergent operations can only be moved to control equivalent blocks. if (auto CS = CallSite(Inst)) { if (CS.hasFnAttr(Attribute::Convergent)) return false; } return true; } /// IsAcceptableTarget - Return true if it is possible to sink the instruction /// in the specified basic block. bool Sinking::IsAcceptableTarget(Instruction *Inst, BasicBlock *SuccToSinkTo) const { assert(Inst && "Instruction to be sunk is null"); assert(SuccToSinkTo && "Candidate sink target is null"); // It is not possible to sink an instruction into its own block. This can // happen with loops. if (Inst->getParent() == SuccToSinkTo) return false; // If the block has multiple predecessors, this would introduce computation // on different code paths. We could split the critical edge, but for now we // just punt. // FIXME: Split critical edges if not backedges. if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) { // We cannot sink a load across a critical edge - there may be stores in // other code paths. if (!isSafeToSpeculativelyExecute(Inst)) return false; // We don't want to sink across a critical edge if we don't dominate the // successor. We could be introducing calculations to new code paths. if (!DT->dominates(Inst->getParent(), SuccToSinkTo)) return false; // Don't sink instructions into a loop. Loop *succ = LI->getLoopFor(SuccToSinkTo); Loop *cur = LI->getLoopFor(Inst->getParent()); if (succ != nullptr && succ != cur) return false; } // Finally, check that all the uses of the instruction are actually // dominated by the candidate return AllUsesDominatedByBlock(Inst, SuccToSinkTo); } /// SinkInstruction - Determine whether it is safe to sink the specified machine /// instruction out of its current block into a successor. bool Sinking::SinkInstruction(Instruction *Inst, SmallPtrSetImpl<Instruction *> &Stores) { // Don't sink static alloca instructions. CodeGen assumes allocas outside the // entry block are dynamically sized stack objects. if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst)) if (AI->isStaticAlloca()) return false; // Check if it's safe to move the instruction. if (!isSafeToMove(Inst, AA, Stores)) return false; // FIXME: This should include support for sinking instructions within the // block they are currently in to shorten the live ranges. We often get // instructions sunk into the top of a large block, but it would be better to // also sink them down before their first use in the block. This xform has to // be careful not to *increase* register pressure though, e.g. sinking // "x = y + z" down if it kills y and z would increase the live ranges of y // and z and only shrink the live range of x. // SuccToSinkTo - This is the successor to sink this instruction to, once we // decide. BasicBlock *SuccToSinkTo = nullptr; // Instructions can only be sunk if all their uses are in blocks // dominated by one of the successors. // Look at all the postdominators and see if we can sink it in one. DomTreeNode *DTN = DT->getNode(Inst->getParent()); for (DomTreeNode::iterator I = DTN->begin(), E = DTN->end(); I != E && SuccToSinkTo == nullptr; ++I) { BasicBlock *Candidate = (*I)->getBlock(); if ((*I)->getIDom()->getBlock() == Inst->getParent() && IsAcceptableTarget(Inst, Candidate)) SuccToSinkTo = Candidate; } // If no suitable postdominator was found, look at all the successors and // decide which one we should sink to, if any. for (succ_iterator I = succ_begin(Inst->getParent()), E = succ_end(Inst->getParent()); I != E && !SuccToSinkTo; ++I) { if (IsAcceptableTarget(Inst, *I)) SuccToSinkTo = *I; } // If we couldn't find a block to sink to, ignore this instruction. if (!SuccToSinkTo) return false; DEBUG(dbgs() << "Sink" << *Inst << " ("; Inst->getParent()->printAsOperand(dbgs(), false); dbgs() << " -> "; SuccToSinkTo->printAsOperand(dbgs(), false); dbgs() << ")\n"); // Move the instruction. Inst->moveBefore(SuccToSinkTo->getFirstInsertionPt()); return true; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/GVN.cpp
//===- GVN.cpp - Eliminate redundant values and loads ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass performs global value numbering to eliminate fully redundant // instructions. It also performs simple dead load elimination. // // Note that this pass does the value numbering itself; it does not use the // ValueNumbering analysis passes. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/PHITransAddr.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include <vector> #include "dxc/DXIL/DxilConstants.h" // HLSL Change #include "dxc/DXIL/DxilOperations.h" // HLSL Change using namespace llvm; using namespace PatternMatch; #define DEBUG_TYPE "gvn" STATISTIC(NumGVNInstr, "Number of instructions deleted"); STATISTIC(NumGVNLoad, "Number of loads deleted"); STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); STATISTIC(NumGVNBlocks, "Number of blocks merged"); STATISTIC(NumGVNSimpl, "Number of instructions simplified"); STATISTIC(NumGVNEqProp, "Number of equalities propagated"); STATISTIC(NumPRELoad, "Number of loads PRE'd"); #if 0 // HLSL Change Starts - option pending static cl::opt<bool> EnablePRE("enable-pre", cl::init(true), cl::Hidden); static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); // Maximum allowed recursion depth. static cl::opt<uint32_t> MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, cl::desc("Max recurse depth (default = 1000)")); #else static const bool EnablePRE = true; static const bool EnableLoadPRE = true; static const uint32_t MaxRecurseDepth = 1000; #endif // HLSL Change Ends //===----------------------------------------------------------------------===// // ValueTable Class //===----------------------------------------------------------------------===// /// This class holds the mapping between values and value numbers. It is used /// as an efficient mechanism to determine the expression-wise equivalence of /// two values. namespace { struct Expression { uint32_t opcode; Type *type; SmallVector<uint32_t, 4> varargs; Expression(uint32_t o = ~2U) : opcode(o) { } bool operator==(const Expression &other) const { if (opcode != other.opcode) return false; if (opcode == ~0U || opcode == ~1U) return true; if (type != other.type) return false; if (varargs != other.varargs) return false; return true; } friend hash_code hash_value(const Expression &Value) { return hash_combine(Value.opcode, Value.type, hash_combine_range(Value.varargs.begin(), Value.varargs.end())); } }; class ValueTable { DenseMap<Value*, uint32_t> valueNumbering; DenseMap<Expression, uint32_t> expressionNumbering; AliasAnalysis *AA; MemoryDependenceAnalysis *MD; DominatorTree *DT; uint32_t nextValueNumber; Expression create_expression(Instruction* I); Expression create_cmp_expression(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS); Expression create_extractvalue_expression(ExtractValueInst* EI); uint32_t lookup_or_add_call(CallInst* C); public: ValueTable() : nextValueNumber(1) { } uint32_t lookup_or_add(Value *V); uint32_t lookup(Value *V) const; uint32_t lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Pred, Value *LHS, Value *RHS); void add(Value *V, uint32_t num); void clear(); void erase(Value *v); void setAliasAnalysis(AliasAnalysis* A) { AA = A; } AliasAnalysis *getAliasAnalysis() const { return AA; } void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } void setDomTree(DominatorTree* D) { DT = D; } uint32_t getNextUnusedValueNumber() { return nextValueNumber; } void verifyRemoved(const Value *) const; }; } namespace llvm { template <> struct DenseMapInfo<Expression> { static inline Expression getEmptyKey() { return ~0U; } static inline Expression getTombstoneKey() { return ~1U; } static unsigned getHashValue(const Expression e) { using llvm::hash_value; return static_cast<unsigned>(hash_value(e)); } static bool isEqual(const Expression &LHS, const Expression &RHS) { return LHS == RHS; } }; } //===----------------------------------------------------------------------===// // ValueTable Internal Functions //===----------------------------------------------------------------------===// Expression ValueTable::create_expression(Instruction *I) { Expression e; e.type = I->getType(); e.opcode = I->getOpcode(); for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) e.varargs.push_back(lookup_or_add(*OI)); if (I->isCommutative()) { // Ensure that commutative instructions that only differ by a permutation // of their operands get the same value number by sorting the operand value // numbers. Since all commutative instructions have two operands it is more // efficient to sort by hand rather than using, say, std::sort. assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); if (e.varargs[0] > e.varargs[1]) std::swap(e.varargs[0], e.varargs[1]); } if (CmpInst *C = dyn_cast<CmpInst>(I)) { // Sort the operand value numbers so x<y and y>x get the same value number. CmpInst::Predicate Predicate = C->getPredicate(); if (e.varargs[0] > e.varargs[1]) { std::swap(e.varargs[0], e.varargs[1]); Predicate = CmpInst::getSwappedPredicate(Predicate); } e.opcode = (C->getOpcode() << 8) | Predicate; } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); II != IE; ++II) e.varargs.push_back(*II); } return e; } Expression ValueTable::create_cmp_expression(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) { assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && "Not a comparison!"); Expression e; e.type = CmpInst::makeCmpResultType(LHS->getType()); e.varargs.push_back(lookup_or_add(LHS)); e.varargs.push_back(lookup_or_add(RHS)); // Sort the operand value numbers so x<y and y>x get the same value number. if (e.varargs[0] > e.varargs[1]) { std::swap(e.varargs[0], e.varargs[1]); Predicate = CmpInst::getSwappedPredicate(Predicate); } e.opcode = (Opcode << 8) | Predicate; return e; } Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) { assert(EI && "Not an ExtractValueInst?"); Expression e; e.type = EI->getType(); e.opcode = 0; IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand()); if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { // EI might be an extract from one of our recognised intrinsics. If it // is we'll synthesize a semantically equivalent expression instead on // an extract value expression. switch (I->getIntrinsicID()) { case Intrinsic::sadd_with_overflow: case Intrinsic::uadd_with_overflow: e.opcode = Instruction::Add; break; case Intrinsic::ssub_with_overflow: case Intrinsic::usub_with_overflow: e.opcode = Instruction::Sub; break; case Intrinsic::smul_with_overflow: case Intrinsic::umul_with_overflow: e.opcode = Instruction::Mul; break; default: break; } if (e.opcode != 0) { // Intrinsic recognized. Grab its args to finish building the expression. assert(I->getNumArgOperands() == 2 && "Expect two args for recognised intrinsics."); e.varargs.push_back(lookup_or_add(I->getArgOperand(0))); e.varargs.push_back(lookup_or_add(I->getArgOperand(1))); return e; } } // Not a recognised intrinsic. Fall back to producing an extract value // expression. e.opcode = EI->getOpcode(); for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); OI != OE; ++OI) e.varargs.push_back(lookup_or_add(*OI)); for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); II != IE; ++II) e.varargs.push_back(*II); return e; } //===----------------------------------------------------------------------===// // ValueTable External Functions //===----------------------------------------------------------------------===// /// add - Insert a value into the table with a specified value number. void ValueTable::add(Value *V, uint32_t num) { valueNumbering.insert(std::make_pair(V, num)); } uint32_t ValueTable::lookup_or_add_call(CallInst *C) { if (AA->doesNotAccessMemory(C)) { Expression exp = create_expression(C); uint32_t &e = expressionNumbering[exp]; if (!e) e = nextValueNumber++; valueNumbering[C] = e; return e; } else if (AA->onlyReadsMemory(C)) { Expression exp = create_expression(C); uint32_t &e = expressionNumbering[exp]; if (!e) { e = nextValueNumber++; valueNumbering[C] = e; return e; } if (!MD) { e = nextValueNumber++; valueNumbering[C] = e; return e; } MemDepResult local_dep = MD->getDependency(C); if (!local_dep.isDef() && !local_dep.isNonLocal()) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } if (local_dep.isDef()) { CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); if (c_vn != cd_vn) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } } uint32_t v = lookup_or_add(local_cdep); valueNumbering[C] = v; return v; } // Non-local case. const MemoryDependenceAnalysis::NonLocalDepInfo &deps = MD->getNonLocalCallDependency(CallSite(C)); // FIXME: Move the checking logic to MemDep! CallInst* cdep = nullptr; // Check to see if we have a single dominating call instruction that is // identical to C. for (unsigned i = 0, e = deps.size(); i != e; ++i) { const NonLocalDepEntry *I = &deps[i]; if (I->getResult().isNonLocal()) continue; // We don't handle non-definitions. If we already have a call, reject // instruction dependencies. if (!I->getResult().isDef() || cdep != nullptr) { cdep = nullptr; break; } CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); // FIXME: All duplicated with non-local case. if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ cdep = NonLocalDepCall; continue; } cdep = nullptr; break; } if (!cdep) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } if (cdep->getNumArgOperands() != C->getNumArgOperands()) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); if (c_vn != cd_vn) { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } } uint32_t v = lookup_or_add(cdep); valueNumbering[C] = v; return v; } else { valueNumbering[C] = nextValueNumber; return nextValueNumber++; } } /// lookup_or_add - Returns the value number for the specified value, assigning /// it a new number if it did not have one before. uint32_t ValueTable::lookup_or_add(Value *V) { DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); if (VI != valueNumbering.end()) return VI->second; if (!isa<Instruction>(V)) { valueNumbering[V] = nextValueNumber; return nextValueNumber++; } Instruction* I = cast<Instruction>(V); Expression exp; switch (I->getOpcode()) { case Instruction::Call: return lookup_or_add_call(cast<CallInst>(I)); case Instruction::Add: case Instruction::FAdd: case Instruction::Sub: case Instruction::FSub: case Instruction::Mul: case Instruction::FMul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::FDiv: case Instruction::URem: case Instruction::SRem: case Instruction::FRem: case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: case Instruction::FCmp: case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::FPToUI: case Instruction::FPToSI: case Instruction::UIToFP: case Instruction::SIToFP: case Instruction::FPTrunc: case Instruction::FPExt: case Instruction::PtrToInt: case Instruction::IntToPtr: case Instruction::BitCast: case Instruction::Select: case Instruction::ExtractElement: case Instruction::InsertElement: case Instruction::ShuffleVector: case Instruction::InsertValue: case Instruction::GetElementPtr: exp = create_expression(I); break; case Instruction::ExtractValue: exp = create_extractvalue_expression(cast<ExtractValueInst>(I)); break; default: valueNumbering[V] = nextValueNumber; return nextValueNumber++; } uint32_t& e = expressionNumbering[exp]; if (!e) e = nextValueNumber++; valueNumbering[V] = e; return e; } /// Returns the value number of the specified value. Fails if /// the value has not yet been numbered. uint32_t ValueTable::lookup(Value *V) const { DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); assert(VI != valueNumbering.end() && "Value not numbered?"); return VI->second; } /// Returns the value number of the given comparison, /// assigning it a new number if it did not have one before. Useful when /// we deduced the result of a comparison, but don't immediately have an /// instruction realizing that comparison to hand. uint32_t ValueTable::lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) { Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS); uint32_t& e = expressionNumbering[exp]; if (!e) e = nextValueNumber++; return e; } /// Remove all entries from the ValueTable. void ValueTable::clear() { valueNumbering.clear(); expressionNumbering.clear(); nextValueNumber = 1; } /// Remove a value from the value numbering. void ValueTable::erase(Value *V) { valueNumbering.erase(V); } /// verifyRemoved - Verify that the value is removed from all internal data /// structures. void ValueTable::verifyRemoved(const Value *V) const { for (DenseMap<Value*, uint32_t>::const_iterator I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { assert(I->first != V && "Inst still occurs in value numbering map!"); } } //===----------------------------------------------------------------------===// // GVN Pass //===----------------------------------------------------------------------===// namespace { class GVN; struct AvailableValueInBlock { /// BB - The basic block in question. BasicBlock *BB; enum ValType { SimpleVal, // A simple offsetted value that is accessed. LoadVal, // A value produced by a load. MemIntrin, // A memory intrinsic which is loaded from. UndefVal // A UndefValue representing a value from dead block (which // is not yet physically removed from the CFG). }; /// V - The value that is live out of the block. PointerIntPair<Value *, 2, ValType> Val; /// Offset - The byte offset in Val that is interesting for the load query. unsigned Offset; static AvailableValueInBlock get(BasicBlock *BB, Value *V, unsigned Offset = 0) { AvailableValueInBlock Res; Res.BB = BB; Res.Val.setPointer(V); Res.Val.setInt(SimpleVal); Res.Offset = Offset; return Res; } static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, unsigned Offset = 0) { AvailableValueInBlock Res; Res.BB = BB; Res.Val.setPointer(MI); Res.Val.setInt(MemIntrin); Res.Offset = Offset; return Res; } static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI, unsigned Offset = 0) { AvailableValueInBlock Res; Res.BB = BB; Res.Val.setPointer(LI); Res.Val.setInt(LoadVal); Res.Offset = Offset; return Res; } static AvailableValueInBlock getUndef(BasicBlock *BB) { AvailableValueInBlock Res; Res.BB = BB; Res.Val.setPointer(nullptr); Res.Val.setInt(UndefVal); Res.Offset = 0; return Res; } bool isSimpleValue() const { return Val.getInt() == SimpleVal; } bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } bool isUndefValue() const { return Val.getInt() == UndefVal; } Value *getSimpleValue() const { assert(isSimpleValue() && "Wrong accessor"); return Val.getPointer(); } LoadInst *getCoercedLoadValue() const { assert(isCoercedLoadValue() && "Wrong accessor"); return cast<LoadInst>(Val.getPointer()); } MemIntrinsic *getMemIntrinValue() const { assert(isMemIntrinValue() && "Wrong accessor"); return cast<MemIntrinsic>(Val.getPointer()); } /// Emit code into this block to adjust the value defined here to the /// specified type. This handles various coercion cases. Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const; }; class GVN : public FunctionPass { bool NoLoads; MemoryDependenceAnalysis *MD; DominatorTree *DT; const TargetLibraryInfo *TLI; AssumptionCache *AC; SetVector<BasicBlock *> DeadBlocks; ValueTable VN; /// A mapping from value numbers to lists of Value*'s that /// have that value number. Use findLeader to query it. struct LeaderTableEntry { Value *Val; const BasicBlock *BB; LeaderTableEntry *Next; }; DenseMap<uint32_t, LeaderTableEntry> LeaderTable; BumpPtrAllocator TableAllocator; SmallVector<Instruction*, 8> InstrsToErase; typedef SmallVector<NonLocalDepResult, 64> LoadDepVect; typedef SmallVector<AvailableValueInBlock, 64> AvailValInBlkVect; typedef SmallVector<BasicBlock*, 64> UnavailBlkVect; public: static char ID; // Pass identification, replacement for typeid explicit GVN(bool noloads = false) : FunctionPass(ID), NoLoads(noloads), MD(nullptr) { initializeGVNPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; /// This removes the specified instruction from /// our various maps and marks it for deletion. void markInstructionForDeletion(Instruction *I) { VN.erase(I); InstrsToErase.push_back(I); } DominatorTree &getDominatorTree() const { return *DT; } AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } MemoryDependenceAnalysis &getMemDep() const { return *MD; } private: /// Push a new Value to the LeaderTable onto the list for its value number. void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) { LeaderTableEntry &Curr = LeaderTable[N]; if (!Curr.Val) { Curr.Val = V; Curr.BB = BB; return; } LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>(); Node->Val = V; Node->BB = BB; Node->Next = Curr.Next; Curr.Next = Node; } /// Scan the list of values corresponding to a given /// value number, and remove the given instruction if encountered. void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) { LeaderTableEntry* Prev = nullptr; LeaderTableEntry* Curr = &LeaderTable[N]; while (Curr && (Curr->Val != I || Curr->BB != BB)) { Prev = Curr; Curr = Curr->Next; } if (!Curr) return; if (Prev) { Prev->Next = Curr->Next; } else { if (!Curr->Next) { Curr->Val = nullptr; Curr->BB = nullptr; } else { LeaderTableEntry* Next = Curr->Next; Curr->Val = Next->Val; Curr->BB = Next->BB; Curr->Next = Next->Next; } } } // List of critical edges to be split between iterations. SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; // This transformation requires dominator postdominator info void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); if (!NoLoads) AU.addRequired<MemoryDependenceAnalysis>(); AU.addRequired<AliasAnalysis>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<AliasAnalysis>(); } // Helper fuctions of redundant load elimination bool processLoad(LoadInst *L); bool processNonLocalLoad(LoadInst *L); void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, AvailValInBlkVect &ValuesPerBlock, UnavailBlkVect &UnavailableBlocks); bool PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, UnavailBlkVect &UnavailableBlocks); // Other helper routines bool processInstruction(Instruction *I); bool processBlock(BasicBlock *BB); void dump(DenseMap<uint32_t, Value*> &d); bool iterateOnFunction(Function &F); bool performPRE(Function &F); bool performScalarPRE(Instruction *I); bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, unsigned int ValNo); Value *findLeader(const BasicBlock *BB, uint32_t num); void cleanupGlobalSets(); void verifyRemoved(const Instruction *I) const; bool splitCriticalEdges(); BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ); bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root); bool processFoldableCondBr(BranchInst *BI); void addDeadBlock(BasicBlock *BB); void assignValNumForDeadCode(); }; char GVN::ID = 0; } // The public interface to this file... FunctionPass *llvm::createGVNPass(bool NoLoads) { return new GVN(NoLoads); } INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void GVN::dump(DenseMap<uint32_t, Value*>& d) { errs() << "{\n"; for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), E = d.end(); I != E; ++I) { errs() << I->first << "\n"; I->second->dump(); } errs() << "}\n"; } #endif /// Return true if we can prove that the value /// we're analyzing is fully available in the specified block. As we go, keep /// track of which blocks we know are fully alive in FullyAvailableBlocks. This /// map is actually a tri-state map with the following values: /// 0) we know the block *is not* fully available. /// 1) we know the block *is* fully available. /// 2) we do not know whether the block is fully available or not, but we are /// currently speculating that it will be. /// 3) we are speculating for this block and have used that to speculate for /// other blocks. static bool IsValueFullyAvailableInBlock(BasicBlock *BB, DenseMap<BasicBlock*, char> &FullyAvailableBlocks, uint32_t RecurseDepth) { if (RecurseDepth > MaxRecurseDepth) return false; // Optimistically assume that the block is fully available and check to see // if we already know about this block in one lookup. std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = FullyAvailableBlocks.insert(std::make_pair(BB, 2)); // If the entry already existed for this block, return the precomputed value. if (!IV.second) { // If this is a speculative "available" value, mark it as being used for // speculation of other blocks. if (IV.first->second == 2) IV.first->second = 3; return IV.first->second != 0; } // Otherwise, see if it is fully available in all predecessors. pred_iterator PI = pred_begin(BB), PE = pred_end(BB); // If this block has no predecessors, it isn't live-in here. if (PI == PE) goto SpeculationFailure; for (; PI != PE; ++PI) // If the value isn't fully available in one of our predecessors, then it // isn't fully available in this block either. Undo our previous // optimistic assumption and bail out. if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1)) goto SpeculationFailure; return true; // If we get here, we found out that this is not, after // all, a fully-available block. We have a problem if we speculated on this and // used the speculation to mark other blocks as available. SpeculationFailure: char &BBVal = FullyAvailableBlocks[BB]; // If we didn't speculate on this, just return with it set to false. if (BBVal == 2) { BBVal = 0; return false; } // If we did speculate on this value, we could have blocks set to 1 that are // incorrect. Walk the (transitive) successors of this block and mark them as // 0 if set to one. SmallVector<BasicBlock*, 32> BBWorklist; BBWorklist.push_back(BB); do { BasicBlock *Entry = BBWorklist.pop_back_val(); // Note that this sets blocks to 0 (unavailable) if they happen to not // already be in FullyAvailableBlocks. This is safe. char &EntryVal = FullyAvailableBlocks[Entry]; if (EntryVal == 0) continue; // Already unavailable. // Mark as unavailable. EntryVal = 0; BBWorklist.append(succ_begin(Entry), succ_end(Entry)); } while (!BBWorklist.empty()); return false; } /// Return true if CoerceAvailableValueToLoadType will succeed. static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, const DataLayout &DL) { // If the loaded or stored value is an first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy() || StoredVal->getType()->isStructTy() || StoredVal->getType()->isArrayTy()) return false; // The store has to be at least as big as the load. if (DL.getTypeSizeInBits(StoredVal->getType()) < DL.getTypeSizeInBits(LoadTy)) return false; return true; } /// If we saw a store of a value to memory, and /// then a load from a must-aliased pointer of a different type, try to coerce /// the stored value. LoadedTy is the type of the load we want to replace. /// IRB is IRBuilder used to insert new instructions. /// /// If we can't do it, return null. static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, IRBuilder<> &IRB, const DataLayout &DL) { if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL)) return nullptr; // If this is already the right type, just return it. Type *StoredValTy = StoredVal->getType(); uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy); uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy); // If the store and reload are the same size, we can always reuse it. if (StoreSize == LoadSize) { // Pointer to Pointer -> use bitcast. if (StoredValTy->getScalarType()->isPointerTy() && LoadedTy->getScalarType()->isPointerTy()) return IRB.CreateBitCast(StoredVal, LoadedTy); // Convert source pointers to integers, which can be bitcast. if (StoredValTy->getScalarType()->isPointerTy()) { StoredValTy = DL.getIntPtrType(StoredValTy); StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); } Type *TypeToCastTo = LoadedTy; if (TypeToCastTo->getScalarType()->isPointerTy()) TypeToCastTo = DL.getIntPtrType(TypeToCastTo); if (StoredValTy != TypeToCastTo) StoredVal = IRB.CreateBitCast(StoredVal, TypeToCastTo); // Cast to pointer if the load needs a pointer type. if (LoadedTy->getScalarType()->isPointerTy()) StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy); return StoredVal; } // If the loaded value is smaller than the available value, then we can // extract out a piece from it. If the available value is too small, then we // can't do anything. assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); // Convert source pointers to integers, which can be manipulated. if (StoredValTy->getScalarType()->isPointerTy()) { StoredValTy = DL.getIntPtrType(StoredValTy); StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); } // Convert vectors and fp to integer, which can be manipulated. if (!StoredValTy->isIntegerTy()) { StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); StoredVal = IRB.CreateBitCast(StoredVal, StoredValTy); } // If this is a big-endian system, we need to shift the value down to the low // bits so that a truncate will work. if (DL.isBigEndian()) { StoredVal = IRB.CreateLShr(StoredVal, StoreSize - LoadSize, "tmp"); } // Truncate the integer to the right size now. Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); StoredVal = IRB.CreateTrunc(StoredVal, NewIntTy, "trunc"); if (LoadedTy == NewIntTy) return StoredVal; // If the result is a pointer, inttoptr. if (LoadedTy->getScalarType()->isPointerTy()) return IRB.CreateIntToPtr(StoredVal, LoadedTy, "inttoptr"); // Otherwise, bitcast. return IRB.CreateBitCast(StoredVal, LoadedTy, "bitcast"); } #if 0 // HLSL Change: Don't support bitcasting to different sizes. /// This function is called when we have a /// memdep query of a load that ends up being a clobbering memory write (store, /// memset, memcpy, memmove). This means that the write *may* provide bits used /// by the load but we can't be sure because the pointers don't mustalias. /// /// Check this case to see if there is anything more we can do before we give /// up. This returns -1 if we have to give up, or a byte number in the stored /// value of the piece that feeds the load. static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, const DataLayout &DL) { // If the loaded or stored value is a first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy()) return -1; int64_t StoreOffset = 0, LoadOffset = 0; Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL); Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL); if (StoreBase != LoadBase) return -1; // If the load and store are to the exact same address, they should have been // a must alias. AA must have gotten confused. // FIXME: Study to see if/when this happens. One case is forwarding a memset // to a load from the base of the memset. #if 0 if (LoadOffset == StoreOffset) { dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" << "Base = " << *StoreBase << "\n" << "Store Ptr = " << *WritePtr << "\n" << "Store Offs = " << StoreOffset << "\n" << "Load Ptr = " << *LoadPtr << "\n"; abort(); } #endif // If the load and store don't overlap at all, the store doesn't provide // anything to the load. In this case, they really don't alias at all, AA // must have gotten confused. uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy); if ((WriteSizeInBits & 7) | (LoadSize & 7)) return -1; uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. LoadSize >>= 3; bool isAAFailure = false; if (StoreOffset < LoadOffset) isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; else isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; if (isAAFailure) { #if 0 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" << "Base = " << *StoreBase << "\n" << "Store Ptr = " << *WritePtr << "\n" << "Store Offs = " << StoreOffset << "\n" << "Load Ptr = " << *LoadPtr << "\n"; abort(); #endif return -1; } // If the Load isn't completely contained within the stored bits, we don't // have all the bits to feed it. We could do something crazy in the future // (issue a smaller load then merge the bits in) but this seems unlikely to be // valuable. if (StoreOffset > LoadOffset || StoreOffset+StoreSize < LoadOffset+LoadSize) return -1; // Okay, we can do this transformation. Return the number of bytes into the // store that the load is. return LoadOffset-StoreOffset; return -1; } #endif // HLSL Change: Don't support bitcasting to different sizes. /// This function is called when we have a /// memdep query of a load that ends up being a clobbering store. static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI) { #if 0 // HLSL Change: Don't support bitcasting to different sizes. // Cannot handle reading from store of first-class aggregate yet. if (DepSI->getValueOperand()->getType()->isStructTy() || DepSI->getValueOperand()->getType()->isArrayTy()) return -1; const DataLayout &DL = DepSI->getModule()->getDataLayout(); Value *StorePtr = DepSI->getPointerOperand(); uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()); return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, StorePtr, StoreSize, DL); #endif // HLSL Change: Don't support bitcasting to different sizes. return -1; } /// This function is called when we have a /// memdep query of a load that ends up being clobbered by another load. See if /// the other load can feed into the second load. static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI, const DataLayout &DL){ #if 0 // HLSL Change: Don't support bitcasting to different sizes. // Cannot handle reading from store of first-class aggregate yet. if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) return -1; Value *DepPtr = DepLI->getPointerOperand(); uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()); int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); if (R != -1) return R; // If we have a load/load clobber an DepLI can be widened to cover this load, // then we should widen it! int64_t LoadOffs = 0; const Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL); unsigned LoadSize = DL.getTypeStoreSize(LoadTy); unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( LoadBase, LoadOffs, LoadSize, DepLI); if (Size == 0) return -1; return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL); #endif return -1; } static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *MI, const DataLayout &DL) { #if 0 // HLSL Change: Don't support bitcasting to different sizes. // If the mem operation is a non-constant size, we can't handle it. ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); if (!SizeCst) return -1; uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; // If this is memset, we just need to see if the offset is valid in the size // of the memset.. if (MI->getIntrinsicID() == Intrinsic::memset) return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), MemSizeInBits, DL); // If we have a memcpy/memmove, the only case we can handle is if this is a // copy from constant memory. In that case, we can read directly from the // constant memory. MemTransferInst *MTI = cast<MemTransferInst>(MI); Constant *Src = dyn_cast<Constant>(MTI->getSource()); if (!Src) return -1; GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL)); if (!GV || !GV->isConstant()) return -1; // See if the access is within the bounds of the transfer. int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), MemSizeInBits, DL); if (Offset == -1) return Offset; unsigned AS = Src->getType()->getPointerAddressSpace(); // Otherwise, see if we can constant fold a load from the constant with the // offset applied as appropriate. Src = ConstantExpr::getBitCast(Src, Type::getInt8PtrTy(Src->getContext(), AS)); Constant *OffsetCst = ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, OffsetCst); Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); if (ConstantFoldLoadFromConstPtr(Src, DL)) return Offset; #endif return -1; } /// This function is called when we have a /// memdep query of a load that ends up being a clobbering store. This means /// that the store provides bits used by the load but we the pointers don't /// mustalias. Check this case to see if there is anything more we can do /// before we give up. static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL){ LLVMContext &Ctx = SrcVal->getType()->getContext(); uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8; IRBuilder<> Builder(InsertPt); // Compute which bits of the stored value are being used by the load. Convert // to an integer type to start with. if (SrcVal->getType()->getScalarType()->isPointerTy()) SrcVal = Builder.CreatePtrToInt(SrcVal, DL.getIntPtrType(SrcVal->getType())); if (!SrcVal->getType()->isIntegerTy()) SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8)); // Shift the bits to the least significant depending on endianness. unsigned ShiftAmt; if (DL.isLittleEndian()) ShiftAmt = Offset*8; else ShiftAmt = (StoreSize-LoadSize-Offset)*8; if (ShiftAmt) SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt); if (LoadSize != StoreSize) SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8)); return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL); } /// This function is called when we have a /// memdep query of a load that ends up being a clobbering load. This means /// that the load *may* provide bits used by the load but we can't be sure /// because the pointers don't mustalias. Check this case to see if there is /// anything more we can do before we give up. static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, GVN &gvn) { const DataLayout &DL = SrcVal->getModule()->getDataLayout(); // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to // widen SrcVal out to a larger load. unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType()); unsigned LoadSize = DL.getTypeStoreSize(LoadTy); if (Offset+LoadSize > SrcValSize) { assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!"); assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load"); // If we have a load/load clobber an DepLI can be widened to cover this // load, then we should widen it to the next power of 2 size big enough! unsigned NewLoadSize = Offset+LoadSize; if (!isPowerOf2_32(NewLoadSize)) NewLoadSize = NextPowerOf2(NewLoadSize); Value *PtrVal = SrcVal->getPointerOperand(); // Insert the new load after the old load. This ensures that subsequent // memdep queries will find the new load. We can't easily remove the old // load completely because it is already in the value numbering table. IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal)); Type *DestPTy = IntegerType::get(LoadTy->getContext(), NewLoadSize*8); DestPTy = PointerType::get(DestPTy, PtrVal->getType()->getPointerAddressSpace()); Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc()); PtrVal = Builder.CreateBitCast(PtrVal, DestPTy); LoadInst *NewLoad = Builder.CreateLoad(PtrVal); NewLoad->takeName(SrcVal); NewLoad->setAlignment(SrcVal->getAlignment()); DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); // Replace uses of the original load with the wider load. On a big endian // system, we need to shift down to get the relevant bits. Value *RV = NewLoad; if (DL.isBigEndian()) RV = Builder.CreateLShr(RV, NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits()); RV = Builder.CreateTrunc(RV, SrcVal->getType()); SrcVal->replaceAllUsesWith(RV); // We would like to use gvn.markInstructionForDeletion here, but we can't // because the load is already memoized into the leader map table that GVN // tracks. It is potentially possible to remove the load from the table, // but then there all of the operations based on it would need to be // rehashed. Just leave the dead load around. gvn.getMemDep().removeInstruction(SrcVal); SrcVal = NewLoad; } return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL); } /// This function is called when we have a /// memdep query of a load that ends up being a clobbering mem intrinsic. static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL){ LLVMContext &Ctx = LoadTy->getContext(); uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8; IRBuilder<> Builder(InsertPt); // We know that this method is only called when the mem transfer fully // provides the bits for the load. if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and // independently of what the offset is. Value *Val = MSI->getValue(); if (LoadSize != 1) Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); Value *OneElt = Val; // Splat the value out to the right number of bits. for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { // If we can double the number of bytes set, do it. if (NumBytesSet*2 <= LoadSize) { Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); Val = Builder.CreateOr(Val, ShVal); NumBytesSet <<= 1; continue; } // Otherwise insert one byte at a time. Value *ShVal = Builder.CreateShl(Val, 1*8); Val = Builder.CreateOr(OneElt, ShVal); ++NumBytesSet; } return CoerceAvailableValueToLoadType(Val, LoadTy, Builder, DL); } // Otherwise, this is a memcpy/memmove from a constant global. MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); Constant *Src = cast<Constant>(MTI->getSource()); unsigned AS = Src->getType()->getPointerAddressSpace(); // Otherwise, see if we can constant fold a load from the constant with the // offset applied as appropriate. Src = ConstantExpr::getBitCast(Src, Type::getInt8PtrTy(Src->getContext(), AS)); Constant *OffsetCst = ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, OffsetCst); Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); return ConstantFoldLoadFromConstPtr(Src, DL); } /// Given a set of loads specified by ValuesPerBlock, /// construct SSA form, allowing us to eliminate LI. This returns the value /// that should be used at LI's definition site. static Value *ConstructSSAForLoadSet(LoadInst *LI, SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, GVN &gvn) { // Check for the fully redundant, dominating load case. In this case, we can // just use the dominating value directly. if (ValuesPerBlock.size() == 1 && gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) { assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block"); return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn); } // Otherwise, we have to construct SSA form. SmallVector<PHINode*, 8> NewPHIs; SSAUpdater SSAUpdate(&NewPHIs); SSAUpdate.Initialize(LI->getType(), LI->getName()); for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { const AvailableValueInBlock &AV = ValuesPerBlock[i]; BasicBlock *BB = AV.BB; if (SSAUpdate.HasValueForBlock(BB)) continue; SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn)); } // Perform PHI construction. Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); // If new PHI nodes were created, notify alias analysis. if (V->getType()->getScalarType()->isPointerTy()) { AliasAnalysis *AA = gvn.getAliasAnalysis(); // Scan the new PHIs and inform alias analysis that we've added potentially // escaping uses to any values that are operands to these PHIs. for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) { PHINode *P = NewPHIs[i]; for (unsigned ii = 0, ee = P->getNumIncomingValues(); ii != ee; ++ii) { unsigned jj = PHINode::getOperandNumForIncomingValue(ii); AA->addEscapingUse(P->getOperandUse(jj)); } } } return V; } Value *AvailableValueInBlock::MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const { Value *Res; Type *LoadTy = LI->getType(); const DataLayout &DL = LI->getModule()->getDataLayout(); if (isSimpleValue()) { Res = getSimpleValue(); if (Res->getType() != LoadTy) { Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL); DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " << *getSimpleValue() << '\n' << *Res << '\n' << "\n\n\n"); } } else if (isCoercedLoadValue()) { LoadInst *Load = getCoercedLoadValue(); if (Load->getType() == LoadTy && Offset == 0) { Res = Load; } else { Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(), gvn); DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " << *getCoercedLoadValue() << '\n' << *Res << '\n' << "\n\n\n"); } } else if (isMemIntrinValue()) { Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, BB->getTerminator(), DL); DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset << " " << *getMemIntrinValue() << '\n' << *Res << '\n' << "\n\n\n"); } else { assert(isUndefValue() && "Should be UndefVal"); DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); return UndefValue::get(LoadTy); } return Res; } static bool isLifetimeStart(const Instruction *Inst) { if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) return II->getIntrinsicID() == Intrinsic::lifetime_start; return false; } void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, AvailValInBlkVect &ValuesPerBlock, UnavailBlkVect &UnavailableBlocks) { // Filter out useless results (non-locals, etc). Keep track of the blocks // where we have a value available in repl, also keep track of whether we see // dependencies that produce an unknown value for the load (such as a call // that could potentially clobber the load). unsigned NumDeps = Deps.size(); const DataLayout &DL = LI->getModule()->getDataLayout(); for (unsigned i = 0, e = NumDeps; i != e; ++i) { BasicBlock *DepBB = Deps[i].getBB(); MemDepResult DepInfo = Deps[i].getResult(); if (DeadBlocks.count(DepBB)) { // Dead dependent mem-op disguise as a load evaluating the same value // as the load in question. ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); continue; } if (!DepInfo.isDef() && !DepInfo.isClobber()) { UnavailableBlocks.push_back(DepBB); continue; } if (DepInfo.isClobber()) { // The address being loaded in this non-local block may not be the same as // the pointer operand of the load if PHI translation occurs. Make sure // to consider the right address. Value *Address = Deps[i].getAddress(); // If the dependence is to a store that writes to a superset of the bits // read by the load, we can extract the bits we need for the load from the // stored value. if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { if (Address) { int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, DepSI->getValueOperand(), Offset)); continue; } } } // Check to see if we have something like this: // load i32* P // load i8* (P+1) // if we have this, replace the later with an extraction from the former. if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { // If this is a clobber and L is the first instruction in its block, then // we have the first instruction in the entry block. if (DepLI != LI && Address) { int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI, Offset)); continue; } } } // If the clobbering value is a memset/memcpy/memmove, see if we can // forward a value on from it. if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { if (Address) { int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, DepMI, DL); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, Offset)); continue; } } } UnavailableBlocks.push_back(DepBB); continue; } // DepInfo.isDef() here Instruction *DepInst = DepInfo.getInst(); // Loading the allocation -> undef. if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) || // Loading immediately after lifetime begin -> undef. isLifetimeStart(DepInst)) { ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, UndefValue::get(LI->getType()))); continue; } // Loading from calloc (which zero initializes memory) -> zero if (isCallocLikeFn(DepInst, TLI)) { ValuesPerBlock.push_back(AvailableValueInBlock::get( DepBB, Constant::getNullValue(LI->getType()))); continue; } if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { // Reject loads and stores that are to the same address but are of // different types if we have to. if (S->getValueOperand()->getType() != LI->getType()) { // If the stored value is larger or equal to the loaded value, we can // reuse it. if (!CanCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(), DL)) { UnavailableBlocks.push_back(DepBB); continue; } } ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, S->getValueOperand())); continue; } if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { // If the types mismatch and we can't handle it, reject reuse of the load. if (LD->getType() != LI->getType()) { // If the stored value is larger or equal to the loaded value, we can // reuse it. if (!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) { UnavailableBlocks.push_back(DepBB); continue; } } ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB, LD)); continue; } UnavailableBlocks.push_back(DepBB); } } bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, UnavailBlkVect &UnavailableBlocks) { // Okay, we have *some* definitions of the value. This means that the value // is available in some of our (transitive) predecessors. Lets think about // doing PRE of this load. This will involve inserting a new load into the // predecessor when it's not available. We could do this in general, but // prefer to not increase code size. As such, we only do this when we know // that we only have to insert *one* load (which means we're basically moving // the load, not inserting a new one). SmallPtrSet<BasicBlock *, 4> Blockers; for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) Blockers.insert(UnavailableBlocks[i]); // Let's find the first basic block with more than one predecessor. Walk // backwards through predecessors if needed. BasicBlock *LoadBB = LI->getParent(); BasicBlock *TmpBB = LoadBB; while (TmpBB->getSinglePredecessor()) { TmpBB = TmpBB->getSinglePredecessor(); if (TmpBB == LoadBB) // Infinite (unreachable) loop. return false; if (Blockers.count(TmpBB)) return false; // If any of these blocks has more than one successor (i.e. if the edge we // just traversed was critical), then there are other paths through this // block along which the load may not be anticipated. Hoisting the load // above this block would be adding the load to execution paths along // which it was not previously executed. if (TmpBB->getTerminator()->getNumSuccessors() != 1) return false; } assert(TmpBB); LoadBB = TmpBB; // Check to see how many predecessors have the loaded value fully // available. MapVector<BasicBlock *, Value *> PredLoads; DenseMap<BasicBlock*, char> FullyAvailableBlocks; for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) FullyAvailableBlocks[UnavailableBlocks[i]] = false; SmallVector<BasicBlock *, 4> CriticalEdgePred; for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); PI != E; ++PI) { BasicBlock *Pred = *PI; if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { continue; } if (Pred->getTerminator()->getNumSuccessors() != 1) { if (isa<IndirectBrInst>(Pred->getTerminator())) { DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" << Pred->getName() << "': " << *LI << '\n'); return false; } if (LoadBB->isLandingPad()) { DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF LANDING PAD CRITICAL EDGE '" << Pred->getName() << "': " << *LI << '\n'); return false; } CriticalEdgePred.push_back(Pred); } else { // Only add the predecessors that will not be split for now. PredLoads[Pred] = nullptr; } } // Decide whether PRE is profitable for this load. unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); assert(NumUnavailablePreds != 0 && "Fully available value should already be eliminated!"); // If this load is unavailable in multiple predecessors, reject it. // FIXME: If we could restructure the CFG, we could make a common pred with // all the preds that don't have an available LI and insert a new load into // that one block. if (NumUnavailablePreds != 1) return false; // Split critical edges, and update the unavailable predecessors accordingly. for (BasicBlock *OrigPred : CriticalEdgePred) { BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); PredLoads[NewPred] = nullptr; DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" << LoadBB->getName() << '\n'); } // Check if the load can safely be moved to all the unavailable predecessors. bool CanDoPRE = true; const DataLayout &DL = LI->getModule()->getDataLayout(); SmallVector<Instruction*, 8> NewInsts; for (auto &PredLoad : PredLoads) { BasicBlock *UnavailablePred = PredLoad.first; // Do PHI translation to get its value in the predecessor if necessary. The // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. // If all preds have a single successor, then we know it is safe to insert // the load on the pred (?!?), so we can insert code to materialize the // pointer if it is not available. PHITransAddr Address(LI->getPointerOperand(), DL, AC); Value *LoadPtr = nullptr; LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT, NewInsts); // If we couldn't find or insert a computation of this phi translated value, // we fail PRE. if (!LoadPtr) { DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " << *LI->getPointerOperand() << "\n"); CanDoPRE = false; break; } PredLoad.second = LoadPtr; } if (!CanDoPRE) { while (!NewInsts.empty()) { Instruction *I = NewInsts.pop_back_val(); if (MD) MD->removeInstruction(I); I->eraseFromParent(); } // HINT: Don't revert the edge-splitting as following transformation may // also need to split these critical edges. return !CriticalEdgePred.empty(); } // Okay, we can eliminate this load by inserting a reload in the predecessor // and using PHI construction to get the value in the other predecessors, do // it. DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back() << '\n'); // Assign value numbers to the new instructions. for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { // FIXME: We really _ought_ to insert these value numbers into their // parent's availability map. However, in doing so, we risk getting into // ordering issues. If a block hasn't been processed yet, we would be // marking a value as AVAIL-IN, which isn't what we intend. VN.lookup_or_add(NewInsts[i]); } for (const auto &PredLoad : PredLoads) { BasicBlock *UnavailablePred = PredLoad.first; Value *LoadPtr = PredLoad.second; Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, LI->getAlignment(), UnavailablePred->getTerminator()); // Transfer the old load's AA tags to the new load. AAMDNodes Tags; LI->getAAMetadata(Tags); if (Tags) NewLoad->setAAMetadata(Tags); // Transfer DebugLoc. NewLoad->setDebugLoc(LI->getDebugLoc()); // Add the newly created load. ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, NewLoad)); MD->invalidateCachedPointerInfo(LoadPtr); DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); } // Perform PHI construction. Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); LI->replaceAllUsesWith(V); if (isa<PHINode>(V)) V->takeName(LI); if (Instruction *I = dyn_cast<Instruction>(V)) I->setDebugLoc(LI->getDebugLoc()); if (V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(LI); ++NumPRELoad; return true; } /// Attempt to eliminate a load whose dependencies are /// non-local by performing PHI construction. bool GVN::processNonLocalLoad(LoadInst *LI) { // Step 1: Find the non-local dependencies of the load. LoadDepVect Deps; MD->getNonLocalPointerDependency(LI, Deps); // If we had to process more than one hundred blocks to find the // dependencies, this load isn't worth worrying about. Optimizing // it will be too expensive. unsigned NumDeps = Deps.size(); if (NumDeps > 100) return false; // If we had a phi translation failure, we'll have a single entry which is a // clobber in the current block. Reject this early. if (NumDeps == 1 && !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { DEBUG( dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs()); dbgs() << " has unknown dependencies\n"; ); return false; } // If this load follows a GEP, see if we can PRE the indices before analyzing. if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) { for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(), OE = GEP->idx_end(); OI != OE; ++OI) if (Instruction *I = dyn_cast<Instruction>(OI->get())) performScalarPRE(I); } // Step 2: Analyze the availability of the load AvailValInBlkVect ValuesPerBlock; UnavailBlkVect UnavailableBlocks; AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks); // If we have no predecessors that produce a known value for this load, exit // early. if (ValuesPerBlock.empty()) return false; // Step 3: Eliminate fully redundancy. // // If all of the instructions we depend on produce a known value for this // load, then it is fully redundant and we can use PHI insertion to compute // its value. Insert PHIs and remove the fully redundant value now. if (UnavailableBlocks.empty()) { DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); // Perform PHI construction. Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); LI->replaceAllUsesWith(V); if (isa<PHINode>(V)) V->takeName(LI); if (Instruction *I = dyn_cast<Instruction>(V)) I->setDebugLoc(LI->getDebugLoc()); if (V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(LI); ++NumGVNLoad; return true; } // Step 4: Eliminate partial redundancy. if (!EnablePRE || !EnableLoadPRE) return false; return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks); } static void patchReplacementInstruction(Instruction *I, Value *Repl) { // Patch the replacement so that it is not more restrictive than the value // being replaced. BinaryOperator *Op = dyn_cast<BinaryOperator>(I); BinaryOperator *ReplOp = dyn_cast<BinaryOperator>(Repl); if (Op && ReplOp) ReplOp->andIRFlags(Op); if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) { // FIXME: If both the original and replacement value are part of the // same control-flow region (meaning that the execution of one // guarentees the executation of the other), then we can combine the // noalias scopes here and do better than the general conservative // answer used in combineMetadata(). // In general, GVN unifies expressions over different control-flow // regions, and so we need a conservative combination of the noalias // scopes. static const unsigned KnownIDs[] = { LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, LLVMContext::MD_noalias, LLVMContext::MD_range, LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, }; combineMetadata(ReplInst, I, KnownIDs); } } static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { patchReplacementInstruction(I, Repl); I->replaceAllUsesWith(Repl); } /// Attempt to eliminate a load, first by eliminating it /// locally, and then attempting non-local elimination if that fails. bool GVN::processLoad(LoadInst *L) { if (!MD) return false; if (!L->isSimple()) return false; if (L->use_empty()) { markInstructionForDeletion(L); return true; } // ... to a pointer that has been loaded from before... MemDepResult Dep = MD->getDependency(L); const DataLayout &DL = L->getModule()->getDataLayout(); // If we have a clobber and target data is around, see if this is a clobber // that we can fix up through code synthesis. if (Dep.isClobber()) { // Check to see if we have something like this: // store i32 123, i32* %P // %A = bitcast i32* %P to i8* // %B = gep i8* %A, i32 1 // %C = load i8* %B // // We could do that by recognizing if the clobber instructions are obviously // a common base + constant offset, and if the previous store (or memset) // completely covers this load. This sort of thing can happen in bitfield // access code. Value *AvailVal = nullptr; if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) { int Offset = AnalyzeLoadFromClobberingStore( L->getType(), L->getPointerOperand(), DepSI); if (Offset != -1) AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, L->getType(), L, DL); } // Check to see if we have something like this: // load i32* P // load i8* (P+1) // if we have this, replace the later with an extraction from the former. if (LoadInst *DepLI = dyn_cast<LoadInst>(Dep.getInst())) { // If this is a clobber and L is the first instruction in its block, then // we have the first instruction in the entry block. if (DepLI == L) return false; int Offset = AnalyzeLoadFromClobberingLoad( L->getType(), L->getPointerOperand(), DepLI, DL); if (Offset != -1) AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this); } // If the clobbering value is a memset/memcpy/memmove, see if we can forward // a value on from it. if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { int Offset = AnalyzeLoadFromClobberingMemInst( L->getType(), L->getPointerOperand(), DepMI, DL); if (Offset != -1) AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL); } if (AvailVal) { DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' << *AvailVal << '\n' << *L << "\n\n\n"); // Replace the load! L->replaceAllUsesWith(AvailVal); if (AvailVal->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(AvailVal); markInstructionForDeletion(L); ++NumGVNLoad; return true; } } // If the value isn't available, don't do anything! if (Dep.isClobber()) { DEBUG( // fast print dep, using operator<< on instruction is too slow. dbgs() << "GVN: load "; L->printAsOperand(dbgs()); Instruction *I = Dep.getInst(); dbgs() << " is clobbered by " << *I << '\n'; ); return false; } // If it is defined in another block, try harder. if (Dep.isNonLocal()) return processNonLocalLoad(L); if (!Dep.isDef()) { DEBUG( // fast print dep, using operator<< on instruction is too slow. dbgs() << "GVN: load "; L->printAsOperand(dbgs()); dbgs() << " has unknown dependence\n"; ); return false; } Instruction *DepInst = Dep.getInst(); if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { Value *StoredVal = DepSI->getValueOperand(); // The store and load are to a must-aliased pointer, but they may not // actually have the same type. See if we know how to reuse the stored // value (depending on its type). if (StoredVal->getType() != L->getType()) { IRBuilder<> Builder(L); StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), Builder, DL); if (!StoredVal) return false; DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal << '\n' << *L << "\n\n\n"); } // Remove it! L->replaceAllUsesWith(StoredVal); if (StoredVal->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(StoredVal); markInstructionForDeletion(L); ++NumGVNLoad; return true; } if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { Value *AvailableVal = DepLI; // The loads are of a must-aliased pointer, but they may not actually have // the same type. See if we know how to reuse the previously loaded value // (depending on its type). if (DepLI->getType() != L->getType()) { IRBuilder<> Builder(L); AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), Builder, DL); if (!AvailableVal) return false; DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal << "\n" << *L << "\n\n\n"); } // Remove it! patchAndReplaceAllUsesWith(L, AvailableVal); if (DepLI->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(DepLI); markInstructionForDeletion(L); ++NumGVNLoad; return true; } // If this load really doesn't depend on anything, then we must be loading an // undef value. This can happen when loading for a fresh allocation with no // intervening stores, for example. if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) { L->replaceAllUsesWith(UndefValue::get(L->getType())); markInstructionForDeletion(L); ++NumGVNLoad; return true; } // If this load occurs either right after a lifetime begin, // then the loaded value is undefined. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(DepInst)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start) { L->replaceAllUsesWith(UndefValue::get(L->getType())); markInstructionForDeletion(L); ++NumGVNLoad; return true; } } // If this load follows a calloc (which zero initializes memory), // then the loaded value is zero if (isCallocLikeFn(DepInst, TLI)) { L->replaceAllUsesWith(Constant::getNullValue(L->getType())); markInstructionForDeletion(L); ++NumGVNLoad; return true; } return false; } // In order to find a leader for a given value number at a // specific basic block, we first obtain the list of all Values for that number, // and then scan the list to find one whose block dominates the block in // question. This is fast because dominator tree queries consist of only // a few comparisons of DFS numbers. Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) { LeaderTableEntry Vals = LeaderTable[num]; if (!Vals.Val) return nullptr; Value *Val = nullptr; if (DT->dominates(Vals.BB, BB)) { Val = Vals.Val; if (isa<Constant>(Val)) return Val; } LeaderTableEntry* Next = Vals.Next; while (Next) { if (DT->dominates(Next->BB, BB)) { if (isa<Constant>(Next->Val)) return Next->Val; if (!Val) Val = Next->Val; } Next = Next->Next; } return Val; } /// There is an edge from 'Src' to 'Dst'. Return /// true if every path from the entry block to 'Dst' passes via this edge. In /// particular 'Dst' must not be reachable via another edge from 'Src'. static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, DominatorTree *DT) { // While in theory it is interesting to consider the case in which Dst has // more than one predecessor, because Dst might be part of a loop which is // only reachable from Src, in practice it is pointless since at the time // GVN runs all such loops have preheaders, which means that Dst will have // been changed to have only one predecessor, namely Src. const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); const BasicBlock *Src = E.getStart(); assert((!Pred || Pred == Src) && "No edge between these basic blocks!"); (void)Src; return Pred != nullptr; } /// The given values are known to be equal in every block /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with /// 'RHS' everywhere in the scope. Returns whether a change was made. bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root) { SmallVector<std::pair<Value*, Value*>, 4> Worklist; Worklist.push_back(std::make_pair(LHS, RHS)); bool Changed = false; // For speed, compute a conservative fast approximation to // DT->dominates(Root, Root.getEnd()); bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); while (!Worklist.empty()) { std::pair<Value*, Value*> Item = Worklist.pop_back_val(); LHS = Item.first; RHS = Item.second; if (LHS == RHS) continue; assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); // Don't try to propagate equalities between constants. if (isa<Constant>(LHS) && isa<Constant>(RHS)) continue; // Prefer a constant on the right-hand side, or an Argument if no constants. if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) std::swap(LHS, RHS); assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); // If there is no obvious reason to prefer the left-hand side over the // right-hand side, ensure the longest lived term is on the right-hand side, // so the shortest lived term will be replaced by the longest lived. // This tends to expose more simplifications. uint32_t LVN = VN.lookup_or_add(LHS); if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { // Move the 'oldest' value to the right-hand side, using the value number // as a proxy for age. uint32_t RVN = VN.lookup_or_add(RHS); if (LVN < RVN) { std::swap(LHS, RHS); LVN = RVN; } } // If value numbering later sees that an instruction in the scope is equal // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve // the invariant that instructions only occur in the leader table for their // own value number (this is used by removeFromLeaderTable), do not do this // if RHS is an instruction (if an instruction in the scope is morphed into // LHS then it will be turned into RHS by the next GVN iteration anyway, so // using the leader table is about compiling faster, not optimizing better). // The leader table only tracks basic blocks, not edges. Only add to if we // have the simple case where the edge dominates the end. if (RootDominatesEnd && !isa<Instruction>(RHS)) addToLeaderTable(LVN, RHS, Root.getEnd()); // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As // LHS always has at least one use that is not dominated by Root, this will // never do anything if LHS has only one use. if (!LHS->hasOneUse()) { // HLSL Change Begin - Don't replace readfirstlane to help propagate // uniform info. if (CallInst *CI = dyn_cast<CallInst>(LHS)) { if (hlsl::OP::IsDxilOpFuncCallInst( CI, hlsl::DXIL::OpCode::WaveReadLaneFirst)) { continue; } } // HLSL Change End unsigned NumReplacements = replaceDominatedUsesWith(LHS, RHS, *DT, Root); Changed |= NumReplacements > 0; NumGVNEqProp += NumReplacements; } // Now try to deduce additional equalities from this one. For example, if // the known equality was "(A != B)" == "false" then it follows that A and B // are equal in the scope. Only boolean equalities with an explicit true or // false RHS are currently supported. if (!RHS->getType()->isIntegerTy(1)) // Not a boolean equality - bail out. continue; ConstantInt *CI = dyn_cast<ConstantInt>(RHS); if (!CI) // RHS neither 'true' nor 'false' - bail out. continue; // Whether RHS equals 'true'. Otherwise it equals 'false'. bool isKnownTrue = CI->isAllOnesValue(); bool isKnownFalse = !isKnownTrue; // If "A && B" is known true then both A and B are known true. If "A || B" // is known false then both A and B are known false. Value *A, *B; if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) || (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) { Worklist.push_back(std::make_pair(A, RHS)); Worklist.push_back(std::make_pair(B, RHS)); continue; } // If we are propagating an equality like "(A == B)" == "true" then also // propagate the equality A == B. When propagating a comparison such as // "(A >= B)" == "true", replace all instances of "A < B" with "false". if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) { Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); // If "A == B" is known true, or "A != B" is known false, then replace // A with B everywhere in the scope. if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) || (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) Worklist.push_back(std::make_pair(Op0, Op1)); // Handle the floating point versions of equality comparisons too. if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) || (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) { // Floating point -0.0 and 0.0 compare equal, so we can only // propagate values if we know that we have a constant and that // its value is non-zero. // FIXME: We should do this optimization if 'no signed zeros' is // applicable via an instruction-level fast-math-flag or some other // indicator that relaxed FP semantics are being used. if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero()) Worklist.push_back(std::make_pair(Op0, Op1)); } // If "A >= B" is known true, replace "A < B" with false everywhere. CmpInst::Predicate NotPred = Cmp->getInversePredicate(); Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); // Since we don't have the instruction "A < B" immediately to hand, work // out the value number that it would have and use that to find an // appropriate instruction (if any). uint32_t NextNum = VN.getNextUnusedValueNumber(); uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1); // If the number we were assigned was brand new then there is no point in // looking for an instruction realizing it: there cannot be one! if (Num < NextNum) { Value *NotCmp = findLeader(Root.getEnd(), Num); if (NotCmp && isa<Instruction>(NotCmp)) { unsigned NumReplacements = replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root); Changed |= NumReplacements > 0; NumGVNEqProp += NumReplacements; } } // Ensure that any instruction in scope that gets the "A < B" value number // is replaced with false. // The leader table only tracks basic blocks, not edges. Only add to if we // have the simple case where the edge dominates the end. if (RootDominatesEnd) addToLeaderTable(Num, NotVal, Root.getEnd()); continue; } } return Changed; } /// When calculating availability, handle an instruction /// by inserting it into the appropriate sets bool GVN::processInstruction(Instruction *I) { // Ignore dbg info intrinsics. if (isa<DbgInfoIntrinsic>(I)) return false; // If the instruction can be easily simplified then do so now in preference // to value numbering it. Value numbering often exposes redundancies, for // example if it determines that %y is equal to %x then the instruction // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. const DataLayout &DL = I->getModule()->getDataLayout(); if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) { I->replaceAllUsesWith(V); if (MD && V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(I); ++NumGVNSimpl; return true; } if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (processLoad(LI)) return true; unsigned Num = VN.lookup_or_add(LI); addToLeaderTable(Num, LI, LI->getParent()); return false; } // For conditional branches, we can perform simple conditional propagation on // the condition value itself. if (BranchInst *BI = dyn_cast<BranchInst>(I)) { if (!BI->isConditional()) return false; if (isa<Constant>(BI->getCondition())) return processFoldableCondBr(BI); Value *BranchCond = BI->getCondition(); BasicBlock *TrueSucc = BI->getSuccessor(0); BasicBlock *FalseSucc = BI->getSuccessor(1); // Avoid multiple edges early. if (TrueSucc == FalseSucc) return false; BasicBlock *Parent = BI->getParent(); bool Changed = false; Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); BasicBlockEdge TrueE(Parent, TrueSucc); Changed |= propagateEquality(BranchCond, TrueVal, TrueE); Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); BasicBlockEdge FalseE(Parent, FalseSucc); Changed |= propagateEquality(BranchCond, FalseVal, FalseE); return Changed; } // For switches, propagate the case values into the case destinations. if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { Value *SwitchCond = SI->getCondition(); BasicBlock *Parent = SI->getParent(); bool Changed = false; // Remember how many outgoing edges there are to every successor. SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges; for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) ++SwitchEdges[SI->getSuccessor(i)]; for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) { BasicBlock *Dst = i.getCaseSuccessor(); // If there is only a single edge, propagate the case value into it. if (SwitchEdges.lookup(Dst) == 1) { BasicBlockEdge E(Parent, Dst); Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E); } } return Changed; } // Instructions with void type don't return a value, so there's // no point in trying to find redundancies in them. if (I->getType()->isVoidTy()) return false; uint32_t NextNum = VN.getNextUnusedValueNumber(); unsigned Num = VN.lookup_or_add(I); // Allocations are always uniquely numbered, so we can save time and memory // by fast failing them. if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) { addToLeaderTable(Num, I, I->getParent()); return false; } // If the number we were assigned was a brand new VN, then we don't // need to do a lookup to see if the number already exists // somewhere in the domtree: it can't! if (Num >= NextNum) { addToLeaderTable(Num, I, I->getParent()); return false; } // Perform fast-path value-number based elimination of values inherited from // dominators. Value *repl = findLeader(I->getParent(), Num); if (!repl) { // Failure, just remember this instance for future use. addToLeaderTable(Num, I, I->getParent()); return false; } // Remove it! patchAndReplaceAllUsesWith(I, repl); if (MD && repl->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(repl); markInstructionForDeletion(I); return true; } /// runOnFunction - This is the main transformation entry point for a function. bool GVN::runOnFunction(Function& F) { if (skipOptnoneFunction(F)) return false; if (!NoLoads) MD = &getAnalysis<MemoryDependenceAnalysis>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); VN.setMemDep(MD); VN.setDomTree(DT); bool Changed = false; bool ShouldContinue = true; // Merge unconditional branches, allowing PRE to catch more // optimization opportunities. for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { BasicBlock *BB = FI++; bool removedBlock = MergeBlockIntoPredecessor( BB, DT, /* LoopInfo */ nullptr, VN.getAliasAnalysis(), MD); if (removedBlock) ++NumGVNBlocks; Changed |= removedBlock; } unsigned Iteration = 0; while (ShouldContinue) { DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); ShouldContinue = iterateOnFunction(F); Changed |= ShouldContinue; ++Iteration; } if (EnablePRE) { // Fabricate val-num for dead-code in order to suppress assertion in // performPRE(). assignValNumForDeadCode(); bool PREChanged = true; while (PREChanged) { PREChanged = performPRE(F); Changed |= PREChanged; } } // FIXME: Should perform GVN again after PRE does something. PRE can move // computations into blocks where they become fully redundant. Note that // we can't do this until PRE's critical edge splitting updates memdep. // Actually, when this happens, we should just fully integrate PRE into GVN. cleanupGlobalSets(); // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each // iteration. DeadBlocks.clear(); return Changed; } bool GVN::processBlock(BasicBlock *BB) { // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function // (and incrementing BI before processing an instruction). assert(InstrsToErase.empty() && "We expect InstrsToErase to be empty across iterations"); if (DeadBlocks.count(BB)) return false; bool ChangedFunction = false; for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { ChangedFunction |= processInstruction(BI); if (InstrsToErase.empty()) { ++BI; continue; } // If we need some instructions deleted, do it now. NumGVNInstr += InstrsToErase.size(); // Avoid iterator invalidation. bool AtStart = BI == BB->begin(); if (!AtStart) --BI; for (SmallVectorImpl<Instruction *>::iterator I = InstrsToErase.begin(), E = InstrsToErase.end(); I != E; ++I) { DEBUG(dbgs() << "GVN removed: " << **I << '\n'); if (MD) MD->removeInstruction(*I); DEBUG(verifyRemoved(*I)); (*I)->eraseFromParent(); } InstrsToErase.clear(); if (AtStart) BI = BB->begin(); else ++BI; } return ChangedFunction; } // Instantiate an expression in a predecessor that lacked it. bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, unsigned int ValNo) { // Because we are going top-down through the block, all value numbers // will be available in the predecessor by the time we need them. Any // that weren't originally present will have been instantiated earlier // in this loop. bool success = true; for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { Value *Op = Instr->getOperand(i); if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) continue; if (Value *V = findLeader(Pred, VN.lookup(Op))) { Instr->setOperand(i, V); } else { success = false; break; } } // Fail out if we encounter an operand that is not available in // the PRE predecessor. This is typically because of loads which // are not value numbered precisely. if (!success) return false; Instr->insertBefore(Pred->getTerminator()); Instr->setName(Instr->getName() + ".pre"); Instr->setDebugLoc(Instr->getDebugLoc()); VN.add(Instr, ValNo); // Update the availability map to include the new instruction. addToLeaderTable(ValNo, Instr, Pred); return true; } bool GVN::performScalarPRE(Instruction *CurInst) { SmallVector<std::pair<Value*, BasicBlock*>, 8> predMap; if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || isa<DbgInfoIntrinsic>(CurInst)) return false; // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from // sinking the compare again, and it would force the code generator to // move the i1 from processor flags or predicate registers into a general // purpose register. if (isa<CmpInst>(CurInst)) return false; // HLSL Change Begin - Don't do PRE on pointer which may generate phi of // pointers. if (dyn_cast<PointerType>(CurInst->getType())) { return false; } // HLSL Change End // We don't currently value number ANY inline asm calls. if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) if (CallI->isInlineAsm()) return false; uint32_t ValNo = VN.lookup(CurInst); // Look for the predecessors for PRE opportunities. We're // only trying to solve the basic diamond case, where // a value is computed in the successor and one predecessor, // but not the other. We also explicitly disallow cases // where the successor is its own predecessor, because they're // more complicated to get right. unsigned NumWith = 0; unsigned NumWithout = 0; BasicBlock *PREPred = nullptr; BasicBlock *CurrentBlock = CurInst->getParent(); predMap.clear(); for (pred_iterator PI = pred_begin(CurrentBlock), PE = pred_end(CurrentBlock); PI != PE; ++PI) { BasicBlock *P = *PI; // We're not interested in PRE where the block is its // own predecessor, or in blocks with predecessors // that are not reachable. if (P == CurrentBlock) { NumWithout = 2; break; } else if (!DT->isReachableFromEntry(P)) { NumWithout = 2; break; } Value *predV = findLeader(P, ValNo); if (!predV) { predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P)); PREPred = P; ++NumWithout; } else if (predV == CurInst) { /* CurInst dominates this predecessor. */ NumWithout = 2; break; } else { predMap.push_back(std::make_pair(predV, P)); ++NumWith; } } // Don't do PRE when it might increase code size, i.e. when // we would need to insert instructions in more than one pred. if (NumWithout > 1 || NumWith == 0) return false; // We may have a case where all predecessors have the instruction, // and we just need to insert a phi node. Otherwise, perform // insertion. Instruction *PREInstr = nullptr; if (NumWithout != 0) { // Don't do PRE across indirect branch. if (isa<IndirectBrInst>(PREPred->getTerminator())) return false; // We can't do PRE safely on a critical edge, so instead we schedule // the edge to be split and perform the PRE the next time we iterate // on the function. unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); return false; } // We need to insert somewhere, so let's give it a shot PREInstr = CurInst->clone(); if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) { // If we failed insertion, make sure we remove the instruction. DEBUG(verifyRemoved(PREInstr)); delete PREInstr; return false; } } // Either we should have filled in the PRE instruction, or we should // not have needed insertions. assert (PREInstr != nullptr || NumWithout == 0); ++NumGVNPRE; // Create a PHI to make the value available in this block. PHINode *Phi = PHINode::Create(CurInst->getType(), predMap.size(), CurInst->getName() + ".pre-phi", CurrentBlock->begin()); for (unsigned i = 0, e = predMap.size(); i != e; ++i) { if (Value *V = predMap[i].first) Phi->addIncoming(V, predMap[i].second); else Phi->addIncoming(PREInstr, PREPred); } VN.add(Phi, ValNo); addToLeaderTable(ValNo, Phi, CurrentBlock); Phi->setDebugLoc(CurInst->getDebugLoc()); CurInst->replaceAllUsesWith(Phi); if (Phi->getType()->getScalarType()->isPointerTy()) { // Because we have added a PHI-use of the pointer value, it has now // "escaped" from alias analysis' perspective. We need to inform // AA of this. for (unsigned ii = 0, ee = Phi->getNumIncomingValues(); ii != ee; ++ii) { unsigned jj = PHINode::getOperandNumForIncomingValue(ii); VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(jj)); } if (MD) MD->invalidateCachedPointerInfo(Phi); } VN.erase(CurInst); removeFromLeaderTable(ValNo, CurInst, CurrentBlock); DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); if (MD) MD->removeInstruction(CurInst); DEBUG(verifyRemoved(CurInst)); CurInst->eraseFromParent(); ++NumGVNInstr; return true; } /// Perform a purely local form of PRE that looks for diamond /// control flow patterns and attempts to perform simple PRE at the join point. bool GVN::performPRE(Function &F) { bool Changed = false; for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { // Nothing to PRE in the entry block. if (CurrentBlock == &F.getEntryBlock()) continue; // Don't perform PRE on a landing pad. if (CurrentBlock->isLandingPad()) continue; for (BasicBlock::iterator BI = CurrentBlock->begin(), BE = CurrentBlock->end(); BI != BE;) { Instruction *CurInst = BI++; Changed = performScalarPRE(CurInst); } } if (splitCriticalEdges()) Changed = true; return Changed; } /// Split the critical edge connecting the given two blocks, and return /// the block inserted to the critical edge. BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { BasicBlock *BB = SplitCriticalEdge( Pred, Succ, CriticalEdgeSplittingOptions(getAliasAnalysis(), DT)); if (MD) MD->invalidateCachedPredecessors(); return BB; } /// Split critical edges found during the previous /// iteration that may enable further optimization. bool GVN::splitCriticalEdges() { if (toSplit.empty()) return false; do { std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); SplitCriticalEdge(Edge.first, Edge.second, CriticalEdgeSplittingOptions(getAliasAnalysis(), DT)); } while (!toSplit.empty()); if (MD) MD->invalidateCachedPredecessors(); return true; } /// Executes one iteration of GVN bool GVN::iterateOnFunction(Function &F) { cleanupGlobalSets(); // Top-down walk of the dominator tree bool Changed = false; // Save the blocks this function have before transformation begins. GVN may // split critical edge, and hence may invalidate the RPO/DT iterator. // std::vector<BasicBlock *> BBVect; BBVect.reserve(256); // Needed for value numbering with phi construction to work. ReversePostOrderTraversal<Function *> RPOT(&F); for (ReversePostOrderTraversal<Function *>::rpo_iterator RI = RPOT.begin(), RE = RPOT.end(); RI != RE; ++RI) BBVect.push_back(*RI); for (std::vector<BasicBlock *>::iterator I = BBVect.begin(), E = BBVect.end(); I != E; I++) Changed |= processBlock(*I); return Changed; } void GVN::cleanupGlobalSets() { VN.clear(); LeaderTable.clear(); TableAllocator.Reset(); } /// Verify that the specified instruction does not occur in our /// internal data structures. void GVN::verifyRemoved(const Instruction *Inst) const { VN.verifyRemoved(Inst); // Walk through the value number scope to make sure the instruction isn't // ferreted away in it. for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { const LeaderTableEntry *Node = &I->second; assert(Node->Val != Inst && "Inst still in value numbering scope!"); while (Node->Next) { Node = Node->Next; assert(Node->Val != Inst && "Inst still in value numbering scope!"); } } } /// BB is declared dead, which implied other blocks become dead as well. This /// function is to add all these blocks to "DeadBlocks". For the dead blocks' /// live successors, update their phi nodes by replacing the operands /// corresponding to dead blocks with UndefVal. void GVN::addDeadBlock(BasicBlock *BB) { SmallVector<BasicBlock *, 4> NewDead; SmallSetVector<BasicBlock *, 4> DF; NewDead.push_back(BB); while (!NewDead.empty()) { BasicBlock *D = NewDead.pop_back_val(); if (DeadBlocks.count(D)) continue; // All blocks dominated by D are dead. SmallVector<BasicBlock *, 8> Dom; DT->getDescendants(D, Dom); DeadBlocks.insert(Dom.begin(), Dom.end()); // Figure out the dominance-frontier(D). for (SmallVectorImpl<BasicBlock *>::iterator I = Dom.begin(), E = Dom.end(); I != E; I++) { BasicBlock *B = *I; for (succ_iterator SI = succ_begin(B), SE = succ_end(B); SI != SE; SI++) { BasicBlock *S = *SI; if (DeadBlocks.count(S)) continue; bool AllPredDead = true; for (pred_iterator PI = pred_begin(S), PE = pred_end(S); PI != PE; PI++) if (!DeadBlocks.count(*PI)) { AllPredDead = false; break; } if (!AllPredDead) { // S could be proved dead later on. That is why we don't update phi // operands at this moment. DF.insert(S); } else { // While S is not dominated by D, it is dead by now. This could take // place if S already have a dead predecessor before D is declared // dead. NewDead.push_back(S); } } } } // For the dead blocks' live successors, update their phi nodes by replacing // the operands corresponding to dead blocks with UndefVal. for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end(); I != E; I++) { BasicBlock *B = *I; if (DeadBlocks.count(B)) continue; SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B)); for (SmallVectorImpl<BasicBlock *>::iterator PI = Preds.begin(), PE = Preds.end(); PI != PE; PI++) { BasicBlock *P = *PI; if (!DeadBlocks.count(P)) continue; if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) { if (BasicBlock *S = splitCriticalEdges(P, B)) DeadBlocks.insert(P = S); } for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) { PHINode &Phi = cast<PHINode>(*II); Phi.setIncomingValue(Phi.getBasicBlockIndex(P), UndefValue::get(Phi.getType())); } } } } // If the given branch is recognized as a foldable branch (i.e. conditional // branch with constant condition), it will perform following analyses and // transformation. // 1) If the dead out-coming edge is a critical-edge, split it. Let // R be the target of the dead out-coming edge. // 1) Identify the set of dead blocks implied by the branch's dead outcoming // edge. The result of this step will be {X| X is dominated by R} // 2) Identify those blocks which haves at least one dead prodecessor. The // result of this step will be dominance-frontier(R). // 3) Update the PHIs in DF(R) by replacing the operands corresponding to // dead blocks with "UndefVal" in an hope these PHIs will optimized away. // // Return true iff *NEW* dead code are found. bool GVN::processFoldableCondBr(BranchInst *BI) { if (!BI || BI->isUnconditional()) return false; // If a branch has two identical successors, we cannot declare either dead. if (BI->getSuccessor(0) == BI->getSuccessor(1)) return false; ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); if (!Cond) return false; BasicBlock *DeadRoot = Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0); if (DeadBlocks.count(DeadRoot)) return false; if (!DeadRoot->getSinglePredecessor()) DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); addDeadBlock(DeadRoot); return true; } // performPRE() will trigger assert if it comes across an instruction without // associated val-num. As it normally has far more live instructions than dead // instructions, it makes more sense just to "fabricate" a val-number for the // dead code than checking if instruction involved is dead or not. void GVN::assignValNumForDeadCode() { for (SetVector<BasicBlock *>::iterator I = DeadBlocks.begin(), E = DeadBlocks.end(); I != E; I++) { BasicBlock *BB = *I; for (BasicBlock::iterator II = BB->begin(), EE = BB->end(); II != EE; II++) { Instruction *Inst = &*II; unsigned ValNum = VN.lookup_or_add(Inst); addToLeaderTable(ValNum, Inst, BB); } } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/FlattenCFGPass.cpp
//===- FlattenCFGPass.cpp - CFG Flatten Pass ----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements flattening of CFG. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/IR/CFG.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "flattencfg" namespace { struct FlattenCFGPass : public FunctionPass { static char ID; // Pass identification, replacement for typeid public: FlattenCFGPass() : FunctionPass(ID) { initializeFlattenCFGPassPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AliasAnalysis>(); } private: AliasAnalysis *AA; }; } char FlattenCFGPass::ID = 0; INITIALIZE_PASS_BEGIN(FlattenCFGPass, "flattencfg", "Flatten the CFG", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(FlattenCFGPass, "flattencfg", "Flatten the CFG", false, false) // Public interface to the FlattenCFG pass FunctionPass *llvm::createFlattenCFGPass() { return new FlattenCFGPass(); } /// iterativelyFlattenCFG - Call FlattenCFG on all the blocks in the function, /// iterating until no more changes are made. static bool iterativelyFlattenCFG(Function &F, AliasAnalysis *AA) { bool Changed = false; bool LocalChange = true; while (LocalChange) { LocalChange = false; // Loop over all of the basic blocks and remove them if they are unneeded... // for (Function::iterator BBIt = F.begin(); BBIt != F.end();) { if (FlattenCFG(BBIt++, AA)) { LocalChange = true; } } Changed |= LocalChange; } return Changed; } bool FlattenCFGPass::runOnFunction(Function &F) { AA = &getAnalysis<AliasAnalysis>(); bool EverChanged = false; // iterativelyFlattenCFG can make some blocks dead. while (iterativelyFlattenCFG(F, AA)) { removeUnreachableBlocks(F); EverChanged = true; } return EverChanged; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
//===-- InductiveRangeCheckElimination.cpp - ------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // The InductiveRangeCheckElimination pass splits a loop's iteration space into // three disjoint ranges. It does that in a way such that the loop running in // the middle loop provably does not need range checks. As an example, it will // convert // // len = < known positive > // for (i = 0; i < n; i++) { // if (0 <= i && i < len) { // do_something(); // } else { // throw_out_of_bounds(); // } // } // // to // // len = < known positive > // limit = smin(n, len) // // no first segment // for (i = 0; i < limit; i++) { // if (0 <= i && i < len) { // this check is fully redundant // do_something(); // } else { // throw_out_of_bounds(); // } // } // for (i = limit; i < n; i++) { // if (0 <= i && i < len) { // do_something(); // } else { // throw_out_of_bounds(); // } // } //===----------------------------------------------------------------------===// #include "llvm/ADT/Optional.h" #include "llvm/Analysis/BranchProbabilityInfo.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/ValueHandle.h" #include "llvm/IR/Verifier.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/SimplifyIndVar.h" #include "llvm/Transforms/Utils/UnrollLoop.h" #include <array> using namespace llvm; static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden, cl::init(64)); static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden, cl::init(false)); static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden, cl::init(false)); static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal", cl::Hidden, cl::init(10)); #define DEBUG_TYPE "irce" namespace { /// An inductive range check is conditional branch in a loop with /// /// 1. a very cold successor (i.e. the branch jumps to that successor very /// rarely) /// /// and /// /// 2. a condition that is provably true for some contiguous range of values /// taken by the containing loop's induction variable. /// class InductiveRangeCheck { // Classifies a range check enum RangeCheckKind : unsigned { // Range check of the form "0 <= I". RANGE_CHECK_LOWER = 1, // Range check of the form "I < L" where L is known positive. RANGE_CHECK_UPPER = 2, // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER // conditions. RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER, // Unrecognized range check condition. RANGE_CHECK_UNKNOWN = (unsigned)-1 }; static const char *rangeCheckKindToStr(RangeCheckKind); const SCEV *Offset; const SCEV *Scale; Value *Length; BranchInst *Branch; RangeCheckKind Kind; static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI, ScalarEvolution &SE, Value *&Index, Value *&Length); static InductiveRangeCheck::RangeCheckKind parseRangeCheck(Loop *L, ScalarEvolution &SE, Value *Condition, const SCEV *&Index, Value *&UpperLimit); InductiveRangeCheck() : Offset(nullptr), Scale(nullptr), Length(nullptr), Branch(nullptr) { } public: const SCEV *getOffset() const { return Offset; } const SCEV *getScale() const { return Scale; } Value *getLength() const { return Length; } void print(raw_ostream &OS) const { OS << "InductiveRangeCheck:\n"; OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n"; OS << " Offset: "; Offset->print(OS); OS << " Scale: "; Scale->print(OS); OS << " Length: "; if (Length) Length->print(OS); else OS << "(null)"; OS << "\n Branch: "; getBranch()->print(OS); OS << "\n"; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void dump() { print(dbgs()); } #endif BranchInst *getBranch() const { return Branch; } /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If /// R.getEnd() sle R.getBegin(), then R denotes the empty range. class Range { const SCEV *Begin; const SCEV *End; public: Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) { assert(Begin->getType() == End->getType() && "ill-typed range!"); } Type *getType() const { return Begin->getType(); } const SCEV *getBegin() const { return Begin; } const SCEV *getEnd() const { return End; } }; typedef SpecificBumpPtrAllocator<InductiveRangeCheck> AllocatorTy; /// This is the value the condition of the branch needs to evaluate to for the /// branch to take the hot successor (see (1) above). bool getPassingDirection() { return true; } /// Computes a range for the induction variable (IndVar) in which the range /// check is redundant and can be constant-folded away. The induction /// variable is not required to be the canonical {0,+,1} induction variable. Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE, const SCEVAddRecExpr *IndVar, IRBuilder<> &B) const; /// Create an inductive range check out of BI if possible, else return /// nullptr. static InductiveRangeCheck *create(AllocatorTy &Alloc, BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI); }; class InductiveRangeCheckElimination : public LoopPass { InductiveRangeCheck::AllocatorTy Allocator; public: static char ID; InductiveRangeCheckElimination() : LoopPass(ID) { initializeInductiveRangeCheckEliminationPass( *PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LoopInfoWrapperPass>(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addRequired<ScalarEvolution>(); AU.addRequired<BranchProbabilityInfo>(); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; }; char InductiveRangeCheckElimination::ID = 0; } INITIALIZE_PASS(InductiveRangeCheckElimination, "irce", "Inductive range check elimination", false, false) const char *InductiveRangeCheck::rangeCheckKindToStr( InductiveRangeCheck::RangeCheckKind RCK) { switch (RCK) { case InductiveRangeCheck::RANGE_CHECK_UNKNOWN: return "RANGE_CHECK_UNKNOWN"; case InductiveRangeCheck::RANGE_CHECK_UPPER: return "RANGE_CHECK_UPPER"; case InductiveRangeCheck::RANGE_CHECK_LOWER: return "RANGE_CHECK_LOWER"; case InductiveRangeCheck::RANGE_CHECK_BOTH: return "RANGE_CHECK_BOTH"; } llvm_unreachable("unknown range check type!"); } /// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` /// cannot /// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set /// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value /// being /// range checked, and set `Length` to the upper limit `Index` is being range /// checked with if (and only if) the range check type is stronger or equal to /// RANGE_CHECK_UPPER. /// InductiveRangeCheck::RangeCheckKind InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI, ScalarEvolution &SE, Value *&Index, Value *&Length) { auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) { const SCEV *S = SE.getSCEV(V); if (isa<SCEVCouldNotCompute>(S)) return false; return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant && SE.isKnownNonNegative(S); }; using namespace llvm::PatternMatch; ICmpInst::Predicate Pred = ICI->getPredicate(); Value *LHS = ICI->getOperand(0); Value *RHS = ICI->getOperand(1); switch (Pred) { default: return RANGE_CHECK_UNKNOWN; case ICmpInst::ICMP_SLE: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_SGE: if (match(RHS, m_ConstantInt<0>())) { Index = LHS; return RANGE_CHECK_LOWER; } return RANGE_CHECK_UNKNOWN; case ICmpInst::ICMP_SLT: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_SGT: if (match(RHS, m_ConstantInt<-1>())) { Index = LHS; return RANGE_CHECK_LOWER; } if (IsNonNegativeAndNotLoopVarying(LHS)) { Index = RHS; Length = LHS; return RANGE_CHECK_UPPER; } return RANGE_CHECK_UNKNOWN; case ICmpInst::ICMP_ULT: std::swap(LHS, RHS); LLVM_FALLTHROUGH; // HLSL Change case ICmpInst::ICMP_UGT: if (IsNonNegativeAndNotLoopVarying(LHS)) { Index = RHS; Length = LHS; return RANGE_CHECK_BOTH; } return RANGE_CHECK_UNKNOWN; } llvm_unreachable("default clause returns!"); } /// Parses an arbitrary condition into a range check. `Length` is set only if /// the range check is recognized to be `RANGE_CHECK_UPPER` or stronger. InductiveRangeCheck::RangeCheckKind InductiveRangeCheck::parseRangeCheck(Loop *L, ScalarEvolution &SE, Value *Condition, const SCEV *&Index, Value *&Length) { using namespace llvm::PatternMatch; Value *A = nullptr; Value *B = nullptr; if (match(Condition, m_And(m_Value(A), m_Value(B)))) { Value *IndexA = nullptr, *IndexB = nullptr; Value *LengthA = nullptr, *LengthB = nullptr; ICmpInst *ICmpA = dyn_cast<ICmpInst>(A), *ICmpB = dyn_cast<ICmpInst>(B); if (!ICmpA || !ICmpB) return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; auto RCKindA = parseRangeCheckICmp(L, ICmpA, SE, IndexA, LengthA); auto RCKindB = parseRangeCheckICmp(L, ICmpB, SE, IndexB, LengthB); if (RCKindA == InductiveRangeCheck::RANGE_CHECK_UNKNOWN || RCKindB == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; if (IndexA != IndexB) return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; if (LengthA != nullptr && LengthB != nullptr && LengthA != LengthB) return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; Index = SE.getSCEV(IndexA); if (isa<SCEVCouldNotCompute>(Index)) return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; Length = LengthA == nullptr ? LengthB : LengthA; return (InductiveRangeCheck::RangeCheckKind)(RCKindA | RCKindB); } if (ICmpInst *ICI = dyn_cast<ICmpInst>(Condition)) { Value *IndexVal = nullptr; auto RCKind = parseRangeCheckICmp(L, ICI, SE, IndexVal, Length); if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; Index = SE.getSCEV(IndexVal); if (isa<SCEVCouldNotCompute>(Index)) return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; return RCKind; } return InductiveRangeCheck::RANGE_CHECK_UNKNOWN; } InductiveRangeCheck * InductiveRangeCheck::create(InductiveRangeCheck::AllocatorTy &A, BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI) { if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch()) return nullptr; BranchProbability LikelyTaken(15, 16); if (BPI.getEdgeProbability(BI->getParent(), (unsigned) 0) < LikelyTaken) return nullptr; Value *Length = nullptr; const SCEV *IndexSCEV = nullptr; auto RCKind = InductiveRangeCheck::parseRangeCheck(L, SE, BI->getCondition(), IndexSCEV, Length); if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) return nullptr; assert(IndexSCEV && "contract with SplitRangeCheckCondition!"); assert((!(RCKind & InductiveRangeCheck::RANGE_CHECK_UPPER) || Length) && "contract with SplitRangeCheckCondition!"); const SCEVAddRecExpr *IndexAddRec = dyn_cast<SCEVAddRecExpr>(IndexSCEV); bool IsAffineIndex = IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine(); if (!IsAffineIndex) return nullptr; InductiveRangeCheck *IRC = new (A.Allocate()) InductiveRangeCheck; IRC->Length = Length; IRC->Offset = IndexAddRec->getStart(); IRC->Scale = IndexAddRec->getStepRecurrence(SE); IRC->Branch = BI; IRC->Kind = RCKind; return IRC; } namespace { // Keeps track of the structure of a loop. This is similar to llvm::Loop, // except that it is more lightweight and can track the state of a loop through // changing and potentially invalid IR. This structure also formalizes the // kinds of loops we can deal with -- ones that have a single latch that is also // an exiting block *and* have a canonical induction variable. struct LoopStructure { const char *Tag; BasicBlock *Header; BasicBlock *Latch; // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th // successor is `LatchExit', the exit block of the loop. BranchInst *LatchBr; BasicBlock *LatchExit; unsigned LatchBrExitIdx; Value *IndVarNext; Value *IndVarStart; Value *LoopExitAt; bool IndVarIncreasing; LoopStructure() : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr), LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr), IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {} template <typename M> LoopStructure map(M Map) const { LoopStructure Result; Result.Tag = Tag; Result.Header = cast<BasicBlock>(Map(Header)); Result.Latch = cast<BasicBlock>(Map(Latch)); Result.LatchBr = cast<BranchInst>(Map(LatchBr)); Result.LatchExit = cast<BasicBlock>(Map(LatchExit)); Result.LatchBrExitIdx = LatchBrExitIdx; Result.IndVarNext = Map(IndVarNext); Result.IndVarStart = Map(IndVarStart); Result.LoopExitAt = Map(LoopExitAt); Result.IndVarIncreasing = IndVarIncreasing; return Result; } static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &, BranchProbabilityInfo &BPI, Loop &, const char *&); }; /// This class is used to constrain loops to run within a given iteration space. /// The algorithm this class implements is given a Loop and a range [Begin, /// End). The algorithm then tries to break out a "main loop" out of the loop /// it is given in a way that the "main loop" runs with the induction variable /// in a subset of [Begin, End). The algorithm emits appropriate pre and post /// loops to run any remaining iterations. The pre loop runs any iterations in /// which the induction variable is < Begin, and the post loop runs any /// iterations in which the induction variable is >= End. /// class LoopConstrainer { // The representation of a clone of the original loop we started out with. struct ClonedLoop { // The cloned blocks std::vector<BasicBlock *> Blocks; // `Map` maps values in the clonee into values in the cloned version ValueToValueMapTy Map; // An instance of `LoopStructure` for the cloned loop LoopStructure Structure; }; // Result of rewriting the range of a loop. See changeIterationSpaceEnd for // more details on what these fields mean. struct RewrittenRangeInfo { BasicBlock *PseudoExit; BasicBlock *ExitSelector; std::vector<PHINode *> PHIValuesAtPseudoExit; PHINode *IndVarEnd; RewrittenRangeInfo() : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {} }; // Calculated subranges we restrict the iteration space of the main loop to. // See the implementation of `calculateSubRanges' for more details on how // these fields are computed. `LowLimit` is None if there is no restriction // on low end of the restricted iteration space of the main loop. `HighLimit` // is None if there is no restriction on high end of the restricted iteration // space of the main loop. struct SubRanges { Optional<const SCEV *> LowLimit; Optional<const SCEV *> HighLimit; }; // A utility function that does a `replaceUsesOfWith' on the incoming block // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's // incoming block list with `ReplaceBy'. static void replacePHIBlock(PHINode *PN, BasicBlock *Block, BasicBlock *ReplaceBy); // Compute a safe set of limits for the main loop to run in -- effectively the // intersection of `Range' and the iteration space of the original loop. // Return None if unable to compute the set of subranges. // Optional<SubRanges> calculateSubRanges() const; // Clone `OriginalLoop' and return the result in CLResult. The IR after // running `cloneLoop' is well formed except for the PHI nodes in CLResult -- // the PHI nodes say that there is an incoming edge from `OriginalPreheader` // but there is no such edge. // void cloneLoop(ClonedLoop &CLResult, const char *Tag) const; // Rewrite the iteration space of the loop denoted by (LS, Preheader). The // iteration space of the rewritten loop ends at ExitLoopAt. The start of the // iteration space is not changed. `ExitLoopAt' is assumed to be slt // `OriginalHeaderCount'. // // If there are iterations left to execute, control is made to jump to // `ContinuationBlock', otherwise they take the normal loop exit. The // returned `RewrittenRangeInfo' object is populated as follows: // // .PseudoExit is a basic block that unconditionally branches to // `ContinuationBlock'. // // .ExitSelector is a basic block that decides, on exit from the loop, // whether to branch to the "true" exit or to `PseudoExit'. // // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value // for each PHINode in the loop header on taking the pseudo exit. // // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate // preheader because it is made to branch to the loop header only // conditionally. // RewrittenRangeInfo changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader, Value *ExitLoopAt, BasicBlock *ContinuationBlock) const; // The loop denoted by `LS' has `OldPreheader' as its preheader. This // function creates a new preheader for `LS' and returns it. // BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader, const char *Tag) const; // `ContinuationBlockAndPreheader' was the continuation block for some call to // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'. // This function rewrites the PHI nodes in `LS.Header' to start with the // correct value. void rewriteIncomingValuesForPHIs( LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader, const LoopConstrainer::RewrittenRangeInfo &RRI) const; // Even though we do not preserve any passes at this time, we at least need to // keep the parent loop structure consistent. The `LPPassManager' seems to // verify this after running a loop pass. This function adds the list of // blocks denoted by BBs to this loops parent loop if required. void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs); // Some global state. Function &F; LLVMContext &Ctx; ScalarEvolution &SE; // Information about the original loop we started out with. Loop &OriginalLoop; LoopInfo &OriginalLoopInfo; const SCEV *LatchTakenCount; BasicBlock *OriginalPreheader; // The preheader of the main loop. This may or may not be different from // `OriginalPreheader'. BasicBlock *MainLoopPreheader; // The range we need to run the main loop in. InductiveRangeCheck::Range Range; // The structure of the main loop (see comment at the beginning of this class // for a definition) LoopStructure MainLoopStructure; public: LoopConstrainer(Loop &L, LoopInfo &LI, const LoopStructure &LS, ScalarEvolution &SE, InductiveRangeCheck::Range R) : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()), SE(SE), OriginalLoop(L), OriginalLoopInfo(LI), LatchTakenCount(nullptr), OriginalPreheader(nullptr), MainLoopPreheader(nullptr), Range(R), MainLoopStructure(LS) {} // Entry point for the algorithm. Returns true on success. bool run(); }; } void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block, BasicBlock *ReplaceBy) { for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingBlock(i) == Block) PN->setIncomingBlock(i, ReplaceBy); } static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) { APInt SMax = APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth()); return SE.getSignedRange(S).contains(SMax) && SE.getUnsignedRange(S).contains(SMax); } static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) { APInt SMin = APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth()); return SE.getSignedRange(S).contains(SMin) && SE.getUnsignedRange(S).contains(SMin); } Optional<LoopStructure> LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI, Loop &L, const char *&FailureReason) { assert(L.isLoopSimplifyForm() && "should follow from addRequired<>"); BasicBlock *Latch = L.getLoopLatch(); if (!L.isLoopExiting(Latch)) { FailureReason = "no loop latch"; return None; } BasicBlock *Header = L.getHeader(); BasicBlock *Preheader = L.getLoopPreheader(); if (!Preheader) { FailureReason = "no preheader"; return None; } BranchInst *LatchBr = dyn_cast<BranchInst>(&*Latch->rbegin()); if (!LatchBr || LatchBr->isUnconditional()) { FailureReason = "latch terminator not conditional branch"; return None; } unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0; BranchProbability ExitProbability = BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx); if (ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) { FailureReason = "short running loop, not profitable"; return None; } ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition()); if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) { FailureReason = "latch terminator branch not conditional on integral icmp"; return None; } const SCEV *LatchCount = SE.getExitCount(&L, Latch); if (isa<SCEVCouldNotCompute>(LatchCount)) { FailureReason = "could not compute latch count"; return None; } ICmpInst::Predicate Pred = ICI->getPredicate(); Value *LeftValue = ICI->getOperand(0); const SCEV *LeftSCEV = SE.getSCEV(LeftValue); IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType()); Value *RightValue = ICI->getOperand(1); const SCEV *RightSCEV = SE.getSCEV(RightValue); // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence. if (!isa<SCEVAddRecExpr>(LeftSCEV)) { if (isa<SCEVAddRecExpr>(RightSCEV)) { std::swap(LeftSCEV, RightSCEV); std::swap(LeftValue, RightValue); Pred = ICmpInst::getSwappedPredicate(Pred); } else { FailureReason = "no add recurrences in the icmp"; return None; } } auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) { if (AR->getNoWrapFlags(SCEV::FlagNSW)) return true; IntegerType *Ty = cast<IntegerType>(AR->getType()); IntegerType *WideTy = IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2); const SCEVAddRecExpr *ExtendAfterOp = dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); if (ExtendAfterOp) { const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy); const SCEV *ExtendedStep = SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy); bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart && ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep; if (NoSignedWrap) return true; } // We may have proved this when computing the sign extension above. return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap; }; auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing) { if (!AR->isAffine()) return false; // Currently we only work with induction variables that have been proved to // not wrap. This restriction can potentially be lifted in the future. if (!HasNoSignedWrap(AR)) return false; if (const SCEVConstant *StepExpr = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) { ConstantInt *StepCI = StepExpr->getValue(); if (StepCI->isOne() || StepCI->isMinusOne()) { IsIncreasing = StepCI->isOne(); return true; } } return false; }; // `ICI` is interpreted as taking the backedge if the *next* value of the // induction variable satisfies some constraint. const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV); bool IsIncreasing = false; if (!IsInductionVar(IndVarNext, IsIncreasing)) { FailureReason = "LHS in icmp not induction variable"; return None; } ConstantInt *One = ConstantInt::get(IndVarTy, 1); // TODO: generalize the predicates here to also match their unsigned variants. if (IsIncreasing) { bool FoundExpectedPred = (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) || (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0); if (!FoundExpectedPred) { FailureReason = "expected icmp slt semantically, found something else"; return None; } if (LatchBrExitIdx == 0) { if (CanBeSMax(SE, RightSCEV)) { // TODO: this restriction is easily removable -- we just have to // remember that the icmp was an slt and not an sle. FailureReason = "limit may overflow when coercing sle to slt"; return None; } IRBuilder<> B(&*Preheader->rbegin()); RightValue = B.CreateAdd(RightValue, One); } } else { bool FoundExpectedPred = (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) || (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0); if (!FoundExpectedPred) { FailureReason = "expected icmp sgt semantically, found something else"; return None; } if (LatchBrExitIdx == 0) { if (CanBeSMin(SE, RightSCEV)) { // TODO: this restriction is easily removable -- we just have to // remember that the icmp was an sgt and not an sge. FailureReason = "limit may overflow when coercing sge to sgt"; return None; } IRBuilder<> B(&*Preheader->rbegin()); RightValue = B.CreateSub(RightValue, One); } } const SCEV *StartNext = IndVarNext->getStart(); const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE)); const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend); BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx); assert(SE.getLoopDisposition(LatchCount, &L) == ScalarEvolution::LoopInvariant && "loop variant exit count doesn't make sense!"); assert(!L.contains(LatchExit) && "expected an exit block!"); const DataLayout &DL = Preheader->getModule()->getDataLayout(); Value *IndVarStartV = SCEVExpander(SE, DL, "irce") .expandCodeFor(IndVarStart, IndVarTy, &*Preheader->rbegin()); IndVarStartV->setName("indvar.start"); LoopStructure Result; Result.Tag = "main"; Result.Header = Header; Result.Latch = Latch; Result.LatchBr = LatchBr; Result.LatchExit = LatchExit; Result.LatchBrExitIdx = LatchBrExitIdx; Result.IndVarStart = IndVarStartV; Result.IndVarNext = LeftValue; Result.IndVarIncreasing = IsIncreasing; Result.LoopExitAt = RightValue; FailureReason = nullptr; return Result; } Optional<LoopConstrainer::SubRanges> LoopConstrainer::calculateSubRanges() const { IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType()); if (Range.getType() != Ty) return None; LoopConstrainer::SubRanges Result; // I think we can be more aggressive here and make this nuw / nsw if the // addition that feeds into the icmp for the latch's terminating branch is nuw // / nsw. In any case, a wrapping 2's complement addition is safe. ConstantInt *One = ConstantInt::get(Ty, 1); const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart); const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt); bool Increasing = MainLoopStructure.IndVarIncreasing; // We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the // range of values the induction variable takes. const SCEV *Smallest = nullptr, *Greatest = nullptr; if (Increasing) { Smallest = Start; Greatest = End; } else { // These two computations may sign-overflow. Here is why that is okay: // // We know that the induction variable does not sign-overflow on any // iteration except the last one, and it starts at `Start` and ends at // `End`, decrementing by one every time. // // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the // induction variable is decreasing we know that that the smallest value // the loop body is actually executed with is `INT_SMIN` == `Smallest`. // // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In // that case, `Clamp` will always return `Smallest` and // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`) // will be an empty range. Returning an empty range is always safe. // Smallest = SE.getAddExpr(End, SE.getSCEV(One)); Greatest = SE.getAddExpr(Start, SE.getSCEV(One)); } auto Clamp = [this, Smallest, Greatest](const SCEV *S) { return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S)); }; // In some cases we can prove that we don't need a pre or post loop bool ProvablyNoPreloop = SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest); if (!ProvablyNoPreloop) Result.LowLimit = Clamp(Range.getBegin()); bool ProvablyNoPostLoop = SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd()); if (!ProvablyNoPostLoop) Result.HighLimit = Clamp(Range.getEnd()); return Result; } void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result, const char *Tag) const { for (BasicBlock *BB : OriginalLoop.getBlocks()) { BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F); Result.Blocks.push_back(Clone); Result.Map[BB] = Clone; } auto GetClonedValue = [&Result](Value *V) { assert(V && "null values not in domain!"); auto It = Result.Map.find(V); if (It == Result.Map.end()) return V; return static_cast<Value *>(It->second); }; Result.Structure = MainLoopStructure.map(GetClonedValue); Result.Structure.Tag = Tag; for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) { BasicBlock *ClonedBB = Result.Blocks[i]; BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i]; assert(Result.Map[OriginalBB] == ClonedBB && "invariant!"); for (Instruction &I : *ClonedBB) RemapInstruction(&I, Result.Map, RF_NoModuleLevelChanges | RF_IgnoreMissingEntries); // Exit blocks will now have one more predecessor and their PHI nodes need // to be edited to reflect that. No phi nodes need to be introduced because // the loop is in LCSSA. for (auto SBBI = succ_begin(OriginalBB), SBBE = succ_end(OriginalBB); SBBI != SBBE; ++SBBI) { if (OriginalLoop.contains(*SBBI)) continue; // not an exit block for (Instruction &I : **SBBI) { if (!isa<PHINode>(&I)) break; PHINode *PN = cast<PHINode>(&I); Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB); PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB); } } } } LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd( const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt, BasicBlock *ContinuationBlock) const { // We start with a loop with a single latch: // // +--------------------+ // | | // | preheader | // | | // +--------+-----------+ // | ----------------\ // | / | // +--------v----v------+ | // | | | // | header | | // | | | // +--------------------+ | // | // ..... | // | // +--------------------+ | // | | | // | latch >----------/ // | | // +-------v------------+ // | // | // | +--------------------+ // | | | // +---> original exit | // | | // +--------------------+ // // We change the control flow to look like // // // +--------------------+ // | | // | preheader >-------------------------+ // | | | // +--------v-----------+ | // | /-------------+ | // | / | | // +--------v--v--------+ | | // | | | | // | header | | +--------+ | // | | | | | | // +--------------------+ | | +-----v-----v-----------+ // | | | | // | | | .pseudo.exit | // | | | | // | | +-----------v-----------+ // | | | // ..... | | | // | | +--------v-------------+ // +--------------------+ | | | | // | | | | | ContinuationBlock | // | latch >------+ | | | // | | | +----------------------+ // +---------v----------+ | // | | // | | // | +---------------^-----+ // | | | // +-----> .exit.selector | // | | // +----------v----------+ // | // +--------------------+ | // | | | // | original exit <----+ // | | // +--------------------+ // RewrittenRangeInfo RRI; auto BBInsertLocation = std::next(Function::iterator(LS.Latch)); RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector", &F, BBInsertLocation); RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F, BBInsertLocation); BranchInst *PreheaderJump = cast<BranchInst>(&*Preheader->rbegin()); bool Increasing = LS.IndVarIncreasing; IRBuilder<> B(PreheaderJump); // EnterLoopCond - is it okay to start executing this `LS'? Value *EnterLoopCond = Increasing ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt) : B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt); B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit); PreheaderJump->eraseFromParent(); LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector); B.SetInsertPoint(LS.LatchBr); Value *TakeBackedgeLoopCond = Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt) : B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt); Value *CondForBranch = LS.LatchBrExitIdx == 1 ? TakeBackedgeLoopCond : B.CreateNot(TakeBackedgeLoopCond); LS.LatchBr->setCondition(CondForBranch); B.SetInsertPoint(RRI.ExitSelector); // IterationsLeft - are there any more iterations left, given the original // upper bound on the induction variable? If not, we branch to the "real" // exit. Value *IterationsLeft = Increasing ? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt) : B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt); B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit); BranchInst *BranchToContinuation = BranchInst::Create(ContinuationBlock, RRI.PseudoExit); // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of // each of the PHI nodes in the loop header. This feeds into the initial // value of the same PHI nodes if/when we continue execution. for (Instruction &I : *LS.Header) { if (!isa<PHINode>(&I)) break; PHINode *PN = cast<PHINode>(&I); PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy", BranchToContinuation); NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader); NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch), RRI.ExitSelector); RRI.PHIValuesAtPseudoExit.push_back(NewPHI); } RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end", BranchToContinuation); RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader); RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector); // The latch exit now has a branch from `RRI.ExitSelector' instead of // `LS.Latch'. The PHI nodes need to be updated to reflect that. for (Instruction &I : *LS.LatchExit) { if (PHINode *PN = dyn_cast<PHINode>(&I)) replacePHIBlock(PN, LS.Latch, RRI.ExitSelector); else break; } return RRI; } void LoopConstrainer::rewriteIncomingValuesForPHIs( LoopStructure &LS, BasicBlock *ContinuationBlock, const LoopConstrainer::RewrittenRangeInfo &RRI) const { unsigned PHIIndex = 0; for (Instruction &I : *LS.Header) { if (!isa<PHINode>(&I)) break; PHINode *PN = cast<PHINode>(&I); for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) if (PN->getIncomingBlock(i) == ContinuationBlock) PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]); } LS.IndVarStart = RRI.IndVarEnd; } BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader, const char *Tag) const { BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header); BranchInst::Create(LS.Header, Preheader); for (Instruction &I : *LS.Header) { if (!isa<PHINode>(&I)) break; PHINode *PN = cast<PHINode>(&I); for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) replacePHIBlock(PN, OldPreheader, Preheader); } return Preheader; } void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) { Loop *ParentLoop = OriginalLoop.getParentLoop(); if (!ParentLoop) return; for (BasicBlock *BB : BBs) ParentLoop->addBasicBlockToLoop(BB, OriginalLoopInfo); } bool LoopConstrainer::run() { BasicBlock *Preheader = nullptr; LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch); Preheader = OriginalLoop.getLoopPreheader(); assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr && "preconditions!"); OriginalPreheader = Preheader; MainLoopPreheader = Preheader; Optional<SubRanges> MaybeSR = calculateSubRanges(); if (!MaybeSR.hasValue()) { DEBUG(dbgs() << "irce: could not compute subranges\n"); return false; } SubRanges SR = MaybeSR.getValue(); bool Increasing = MainLoopStructure.IndVarIncreasing; IntegerType *IVTy = cast<IntegerType>(MainLoopStructure.IndVarNext->getType()); SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce"); Instruction *InsertPt = OriginalPreheader->getTerminator(); // It would have been better to make `PreLoop' and `PostLoop' // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy // constructor. ClonedLoop PreLoop, PostLoop; bool NeedsPreLoop = Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue(); bool NeedsPostLoop = Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue(); Value *ExitPreLoopAt = nullptr; Value *ExitMainLoopAt = nullptr; const SCEVConstant *MinusOneS = cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */)); if (NeedsPreLoop) { const SCEV *ExitPreLoopAtSCEV = nullptr; if (Increasing) ExitPreLoopAtSCEV = *SR.LowLimit; else { if (CanBeSMin(SE, *SR.HighLimit)) { DEBUG(dbgs() << "irce: could not prove no-overflow when computing " << "preloop exit limit. HighLimit = " << *(*SR.HighLimit) << "\n"); return false; } ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS); } ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt); ExitPreLoopAt->setName("exit.preloop.at"); } if (NeedsPostLoop) { const SCEV *ExitMainLoopAtSCEV = nullptr; if (Increasing) ExitMainLoopAtSCEV = *SR.HighLimit; else { if (CanBeSMin(SE, *SR.LowLimit)) { DEBUG(dbgs() << "irce: could not prove no-overflow when computing " << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit) << "\n"); return false; } ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS); } ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt); ExitMainLoopAt->setName("exit.mainloop.at"); } // We clone these ahead of time so that we don't have to deal with changing // and temporarily invalid IR as we transform the loops. if (NeedsPreLoop) cloneLoop(PreLoop, "preloop"); if (NeedsPostLoop) cloneLoop(PostLoop, "postloop"); RewrittenRangeInfo PreLoopRRI; if (NeedsPreLoop) { Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header, PreLoop.Structure.Header); MainLoopPreheader = createPreheader(MainLoopStructure, Preheader, "mainloop"); PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader, ExitPreLoopAt, MainLoopPreheader); rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader, PreLoopRRI); } BasicBlock *PostLoopPreheader = nullptr; RewrittenRangeInfo PostLoopRRI; if (NeedsPostLoop) { PostLoopPreheader = createPreheader(PostLoop.Structure, Preheader, "postloop"); PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader, ExitMainLoopAt, PostLoopPreheader); rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader, PostLoopRRI); } BasicBlock *NewMainLoopPreheader = MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr; BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit, PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit, PostLoopRRI.ExitSelector, NewMainLoopPreheader}; // Some of the above may be nullptr, filter them out before passing to // addToParentLoopIfNeeded. auto NewBlocksEnd = std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr); addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd)); addToParentLoopIfNeeded(PreLoop.Blocks); addToParentLoopIfNeeded(PostLoop.Blocks); return true; } /// Computes and returns a range of values for the induction variable (IndVar) /// in which the range check can be safely elided. If it cannot compute such a /// range, returns None. Optional<InductiveRangeCheck::Range> InductiveRangeCheck::computeSafeIterationSpace(ScalarEvolution &SE, const SCEVAddRecExpr *IndVar, IRBuilder<> &) const { // IndVar is of the form "A + B * I" (where "I" is the canonical induction // variable, that may or may not exist as a real llvm::Value in the loop) and // this inductive range check is a range check on the "C + D * I" ("C" is // getOffset() and "D" is getScale()). We rewrite the value being range // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA". // Currently we support this only for "B" = "D" = { 1 or -1 }, but the code // can be generalized as needed. // // The actual inequalities we solve are of the form // // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1) // // The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions // and subtractions are twos-complement wrapping and comparisons are signed. // // Proof: // // If there exists IndVar such that -M <= IndVar < (L - M) then it follows // that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows // then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have // overflown. // // This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t. // Hence 0 <= (IndVar + M) < L // [^1]: Note that the solution does _not_ apply if L < 0; consider values M = // 127, IndVar = 126 and L = -2 in an i8 world. if (!IndVar->isAffine()) return None; const SCEV *A = IndVar->getStart(); const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE)); if (!B) return None; const SCEV *C = getOffset(); const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale()); if (D != B) return None; ConstantInt *ConstD = D->getValue(); if (!(ConstD->isMinusOne() || ConstD->isOne())) return None; const SCEV *M = SE.getMinusSCEV(C, A); const SCEV *Begin = SE.getNegativeSCEV(M); const SCEV *UpperLimit = nullptr; // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L". // We can potentially do much better here. if (Value *V = getLength()) { UpperLimit = SE.getSCEV(V); } else { assert(Kind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!"); unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth(); UpperLimit = SE.getConstant(APInt::getSignedMaxValue(BitWidth)); } const SCEV *End = SE.getMinusSCEV(UpperLimit, M); return InductiveRangeCheck::Range(Begin, End); } static Optional<InductiveRangeCheck::Range> IntersectRange(ScalarEvolution &SE, const Optional<InductiveRangeCheck::Range> &R1, const InductiveRangeCheck::Range &R2, IRBuilder<> &B) { if (!R1.hasValue()) return R2; auto &R1Value = R1.getValue(); // TODO: we could widen the smaller range and have this work; but for now we // bail out to keep things simple. if (R1Value.getType() != R2.getType()) return None; const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin()); const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd()); return InductiveRangeCheck::Range(NewBegin, NewEnd); } bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) { if (L->getBlocks().size() >= LoopSizeCutoff) { DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";); return false; } BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) { DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); return false; } LLVMContext &Context = Preheader->getContext(); InductiveRangeCheck::AllocatorTy IRCAlloc; SmallVector<InductiveRangeCheck *, 16> RangeChecks; ScalarEvolution &SE = getAnalysis<ScalarEvolution>(); BranchProbabilityInfo &BPI = getAnalysis<BranchProbabilityInfo>(); for (auto BBI : L->getBlocks()) if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator())) if (InductiveRangeCheck *IRC = InductiveRangeCheck::create(IRCAlloc, TBI, L, SE, BPI)) RangeChecks.push_back(IRC); if (RangeChecks.empty()) return false; auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) { OS << "irce: looking at loop "; L->print(OS); OS << "irce: loop has " << RangeChecks.size() << " inductive range checks: \n"; for (InductiveRangeCheck *IRC : RangeChecks) IRC->print(OS); }; DEBUG(PrintRecognizedRangeChecks(dbgs())); if (PrintRangeChecks) PrintRecognizedRangeChecks(errs()); const char *FailureReason = nullptr; Optional<LoopStructure> MaybeLoopStructure = LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason); if (!MaybeLoopStructure.hasValue()) { DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason << "\n";); return false; } LoopStructure LS = MaybeLoopStructure.getValue(); bool Increasing = LS.IndVarIncreasing; const SCEV *MinusOne = SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true); const SCEVAddRecExpr *IndVar = cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne)); Optional<InductiveRangeCheck::Range> SafeIterRange; Instruction *ExprInsertPt = Preheader->getTerminator(); SmallVector<InductiveRangeCheck *, 4> RangeChecksToEliminate; IRBuilder<> B(ExprInsertPt); for (InductiveRangeCheck *IRC : RangeChecks) { auto Result = IRC->computeSafeIterationSpace(SE, IndVar, B); if (Result.hasValue()) { auto MaybeSafeIterRange = IntersectRange(SE, SafeIterRange, Result.getValue(), B); if (MaybeSafeIterRange.hasValue()) { RangeChecksToEliminate.push_back(IRC); SafeIterRange = MaybeSafeIterRange.getValue(); } } } if (!SafeIterRange.hasValue()) return false; LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LS, SE, SafeIterRange.getValue()); bool Changed = LC.run(); if (Changed) { auto PrintConstrainedLoopInfo = [L]() { dbgs() << "irce: in function "; dbgs() << L->getHeader()->getParent()->getName() << ": "; dbgs() << "constrained "; L->print(dbgs()); }; DEBUG(PrintConstrainedLoopInfo()); if (PrintChangedLoops) PrintConstrainedLoopInfo(); // Optimize away the now-redundant range checks. for (InductiveRangeCheck *IRC : RangeChecksToEliminate) { ConstantInt *FoldedRangeCheck = IRC->getPassingDirection() ? ConstantInt::getTrue(Context) : ConstantInt::getFalse(Context); IRC->getBranch()->setCondition(FoldedRangeCheck); } } return Changed; } Pass *llvm::createInductiveRangeCheckEliminationPass() { return new InductiveRangeCheckElimination; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilFixConstArrayInitializer.cpp
//===- DxilFixConstArrayInitializer.cpp - Special Construct Initializer //------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "dxc/DXIL/DxilModule.h" #include "dxc/HLSL/HLModule.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" #include <limits> #include <unordered_map> using namespace llvm; namespace { class DxilFixConstArrayInitializer : public ModulePass { public: static char ID; DxilFixConstArrayInitializer() : ModulePass(ID) { initializeDxilFixConstArrayInitializerPass( *PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; StringRef getPassName() const override { return "Dxil Fix Const Array Initializer"; } }; char DxilFixConstArrayInitializer::ID; } // namespace static bool TryFixGlobalVariable( GlobalVariable &GV, BasicBlock *EntryBlock, const std::unordered_map<Instruction *, unsigned> &InstOrder) { // Only proceed if the variable has an undef initializer if (!GV.hasInitializer() || !isa<UndefValue>(GV.getInitializer())) return false; // Only handle cases when it's an array of scalars. Type *Ty = GV.getType()->getPointerElementType(); if (!Ty->isArrayTy()) return false; // Don't handle arrays that are too big if (Ty->getArrayNumElements() > 1024) return false; Type *ElementTy = Ty->getArrayElementType(); // Only handle arrays of scalar types if (ElementTy->isAggregateType() || ElementTy->isVectorTy()) return false; // The instruction index at which point we no longer consider it // safe to fold Stores. It's the earliest store with non-constant index, // earliest store with non-constant value, or a load unsigned FirstUnsafeIndex = std::numeric_limits<unsigned>::max(); SmallVector<StoreInst *, 8> PossibleFoldableStores; // First do a pass to find the boundary for where we could fold stores. Get a // list of stores that may be folded. for (User *U : GV.users()) { if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { bool AllConstIndices = GEP->hasAllConstantIndices(); unsigned NumIndices = GEP->getNumIndices(); if (NumIndices != 2) return false; for (User *GEPUser : GEP->users()) { if (StoreInst *Store = dyn_cast<StoreInst>(GEPUser)) { if (Store->getParent() != EntryBlock) continue; unsigned StoreIndex = InstOrder.at(Store); if (!AllConstIndices || !isa<Constant>(Store->getValueOperand())) { FirstUnsafeIndex = std::min(StoreIndex, FirstUnsafeIndex); continue; } PossibleFoldableStores.push_back(Store); } else if (LoadInst *Load = dyn_cast<LoadInst>(GEPUser)) { if (Load->getParent() != EntryBlock) continue; FirstUnsafeIndex = std::min(FirstUnsafeIndex, InstOrder.at(Load)); } // If we have something weird like chained GEPS, or bitcasts, give up. else { return false; } } } } SmallVector<Constant *, 16> InitValue; SmallVector<unsigned, 16> LatestStores; SmallVector<StoreInst *, 8> StoresToRemove; InitValue.resize(Ty->getArrayNumElements()); LatestStores.resize(Ty->getArrayNumElements()); for (StoreInst *Store : PossibleFoldableStores) { unsigned StoreIndex = InstOrder.at(Store); // Skip stores that are out of bounds if (StoreIndex >= FirstUnsafeIndex) continue; GEPOperator *GEP = cast<GEPOperator>(Store->getPointerOperand()); uint64_t Index = cast<ConstantInt>(GEP->getOperand(2))->getLimitedValue(); if (Index >= LatestStores.size()) { // Skip out of bounds index. continue; } if (LatestStores[Index] <= StoreIndex) { InitValue[Index] = cast<Constant>(Store->getValueOperand()); LatestStores[Index] = StoreIndex; } StoresToRemove.push_back(Store); } // Give up if we have missing indices for (Constant *C : InitValue) if (!C) return false; GV.setInitializer(ConstantArray::get(cast<ArrayType>(Ty), InitValue)); for (StoreInst *Store : StoresToRemove) Store->eraseFromParent(); return true; } bool DxilFixConstArrayInitializer::runOnModule(Module &M) { BasicBlock *EntryBlock = nullptr; if (M.HasDxilModule()) { hlsl::DxilModule &DM = M.GetDxilModule(); if (DM.GetEntryFunction()) { EntryBlock = &DM.GetEntryFunction()->getEntryBlock(); } } else if (M.HasHLModule()) { hlsl::HLModule &HM = M.GetHLModule(); if (HM.GetEntryFunction()) EntryBlock = &HM.GetEntryFunction()->getEntryBlock(); } if (!EntryBlock) return false; // If some block might branch to the entry for some reason (like if it's a // loop header), give up now. Have to make sure this block is not preceeded by // anything. if (pred_begin(EntryBlock) != pred_end(EntryBlock)) return false; // Find the instruction order for everything in the entry block. std::unordered_map<Instruction *, unsigned> InstOrder; for (Instruction &I : *EntryBlock) { InstOrder[&I] = InstOrder.size(); } bool Changed = false; for (GlobalVariable &GV : M.globals()) { Changed = TryFixGlobalVariable(GV, EntryBlock, InstOrder); } return Changed; } Pass *llvm::createDxilFixConstArrayInitializerPass() { return new DxilFixConstArrayInitializer(); } INITIALIZE_PASS(DxilFixConstArrayInitializer, "dxil-fix-array-init", "Dxil Fix Array Initializer", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopUnrollPass.cpp
//===-- LoopUnroll.cpp - Loop unroller pass -------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass implements a simple loop unroller. It works best when loops have // been canonicalized by the -indvars pass, allowing it to determine the trip // counts of loops easily. //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CodeMetrics.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Metadata.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/UnrollLoop.h" #include "DxilRemoveUnstructuredLoopExits.h" // HLSL Change #include <climits> using namespace llvm; #define DEBUG_TYPE "loop-unroll" #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned> UnrollThreshold("unroll-threshold", cl::init(150), cl::Hidden, cl::desc("The baseline cost threshold for loop unrolling")); static cl::opt<unsigned> UnrollPercentDynamicCostSavedThreshold( "unroll-percent-dynamic-cost-saved-threshold", cl::init(20), cl::Hidden, cl::desc("The percentage of estimated dynamic cost which must be saved by " "unrolling to allow unrolling up to the max threshold.")); static cl::opt<unsigned> UnrollDynamicCostSavingsDiscount( "unroll-dynamic-cost-savings-discount", cl::init(2000), cl::Hidden, cl::desc("This is the amount discounted from the total unroll cost when " "the unrolled form has a high dynamic cost savings (triggered by " "the '-unroll-perecent-dynamic-cost-saved-threshold' flag).")); static cl::opt<unsigned> UnrollMaxIterationsCountToAnalyze( "unroll-max-iteration-count-to-analyze", cl::init(0), cl::Hidden, cl::desc("Don't allow loop unrolling to simulate more than this number of" "iterations when checking full unroll profitability")); static cl::opt<unsigned> UnrollCount("unroll-count", cl::init(0), cl::Hidden, cl::desc("Use this unroll count for all loops including those with " "unroll_count pragma values, for testing purposes")); static cl::opt<bool> UnrollAllowPartial("unroll-allow-partial", cl::init(false), cl::Hidden, cl::desc("Allows loops to be partially unrolled until " "-unroll-threshold loop size is reached.")); static cl::opt<bool> UnrollRuntime("unroll-runtime", cl::ZeroOrMore, cl::init(false), cl::Hidden, cl::desc("Unroll loops with run-time trip counts")); static cl::opt<unsigned> PragmaUnrollThreshold("pragma-unroll-threshold", cl::init(16 * 1024), cl::Hidden, cl::desc("Unrolled size limit for loops with an unroll(full) or " "unroll_count pragma.")); #else template <typename T> struct NullOpt { NullOpt(T val) : _val(val) {} T _val; unsigned getNumOccurrences() const { return 0; } operator T() const { return _val; } }; static const NullOpt<unsigned> UnrollThreshold = 150; static const NullOpt<unsigned> UnrollPercentDynamicCostSavedThreshold = 20; static const NullOpt<unsigned> UnrollDynamicCostSavingsDiscount = 2000; static const NullOpt<unsigned> UnrollMaxIterationsCountToAnalyze = 0; static const NullOpt<unsigned> UnrollCount = 0; static const NullOpt<bool> UnrollAllowPartial = false; static const NullOpt<bool> UnrollRuntime = false; static const NullOpt<unsigned> PragmaUnrollThreshold = 16 * 1024; #endif // HLSL Change Ends namespace { class LoopUnroll : public LoopPass { public: static char ID; // Pass ID, replacement for typeid LoopUnroll(int T = -1, int C = -1, int P = -1, int R = -1, /*HLSL change*/bool StructurizeLoopExits=false) : LoopPass(ID) { CurrentThreshold = (T == -1) ? unsigned(UnrollThreshold) : unsigned(T); CurrentPercentDynamicCostSavedThreshold = UnrollPercentDynamicCostSavedThreshold; CurrentDynamicCostSavingsDiscount = UnrollDynamicCostSavingsDiscount; CurrentCount = (C == -1) ? unsigned(UnrollCount) : unsigned(C); CurrentAllowPartial = (P == -1) ? (bool)UnrollAllowPartial : (bool)P; CurrentRuntime = (R == -1) ? (bool)UnrollRuntime : (bool)R; UserThreshold = (T != -1) || (UnrollThreshold.getNumOccurrences() > 0); UserPercentDynamicCostSavedThreshold = (UnrollPercentDynamicCostSavedThreshold.getNumOccurrences() > 0); UserDynamicCostSavingsDiscount = (UnrollDynamicCostSavingsDiscount.getNumOccurrences() > 0); UserAllowPartial = (P != -1) || (UnrollAllowPartial.getNumOccurrences() > 0); UserRuntime = (R != -1) || (UnrollRuntime.getNumOccurrences() > 0); UserCount = (C != -1) || (UnrollCount.getNumOccurrences() > 0); initializeLoopUnrollPass(*PassRegistry::getPassRegistry()); this->StructurizeLoopExits = StructurizeLoopExits; // HLSL Change } /// A magic value for use with the Threshold parameter to indicate /// that the loop unroll should be performed regardless of how much /// code expansion would result. static const unsigned NoThreshold = UINT_MAX; // Threshold to use when optsize is specified (and there is no // explicit -unroll-threshold). static const unsigned OptSizeUnrollThreshold = 50; // Default unroll count for loops with run-time trip count if // -unroll-count is not set static const unsigned UnrollRuntimeCount = 8; unsigned CurrentCount; unsigned CurrentThreshold; unsigned CurrentPercentDynamicCostSavedThreshold; unsigned CurrentDynamicCostSavingsDiscount; bool CurrentAllowPartial; bool CurrentRuntime; bool StructurizeLoopExits; // HLSL Change // Flags for whether the 'current' settings are user-specified. bool UserCount; bool UserThreshold; bool UserPercentDynamicCostSavedThreshold; bool UserDynamicCostSavingsDiscount; bool UserAllowPartial; bool UserRuntime; // HLSL Change - begin // Function overrides that resolve options when used for DxOpt void applyOptions(PassOptions O) override { GetPassOptionBool(O, "StructurizeLoopExits", &StructurizeLoopExits, false); } void dumpConfig(raw_ostream &OS) override { LoopPass::dumpConfig(OS); OS << ",StructurizeLoopExits=" << StructurizeLoopExits; } // HLSL Change - end bool runOnLoop(Loop *L, LPPassManager &LPM) override; /// This transformation requires natural loop information & requires that /// loop preheaders be inserted into the CFG... /// void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addRequiredID(LoopSimplifyID); AU.addPreservedID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); AU.addRequired<ScalarEvolution>(); AU.addRequired<DominatorTreeWrapperPass>(); // HLSL Change AU.addPreserved<ScalarEvolution>(); AU.addRequired<TargetTransformInfoWrapperPass>(); // FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info. // If loop unroll does not preserve dom info then LCSSA pass on next // loop will receive invalid dom info. // For now, recreate dom info, if loop is unrolled. AU.addPreserved<DominatorTreeWrapperPass>(); } // Fill in the UnrollingPreferences parameter with values from the // TargetTransformationInfo. void getUnrollingPreferences(Loop *L, const TargetTransformInfo &TTI, TargetTransformInfo::UnrollingPreferences &UP) { UP.Threshold = CurrentThreshold; UP.PercentDynamicCostSavedThreshold = CurrentPercentDynamicCostSavedThreshold; UP.DynamicCostSavingsDiscount = CurrentDynamicCostSavingsDiscount; UP.OptSizeThreshold = OptSizeUnrollThreshold; UP.PartialThreshold = CurrentThreshold; UP.PartialOptSizeThreshold = OptSizeUnrollThreshold; UP.Count = CurrentCount; UP.MaxCount = UINT_MAX; UP.Partial = CurrentAllowPartial; UP.Runtime = CurrentRuntime; UP.AllowExpensiveTripCount = false; TTI.getUnrollingPreferences(L, UP); } // Select and return an unroll count based on parameters from // user, unroll preferences, unroll pragmas, or a heuristic. // SetExplicitly is set to true if the unroll count is is set by // the user or a pragma rather than selected heuristically. unsigned selectUnrollCount(const Loop *L, unsigned TripCount, bool PragmaFullUnroll, unsigned PragmaCount, const TargetTransformInfo::UnrollingPreferences &UP, bool &SetExplicitly); // Select threshold values used to limit unrolling based on a // total unrolled size. Parameters Threshold and PartialThreshold // are set to the maximum unrolled size for fully and partially // unrolled loops respectively. void selectThresholds(const Loop *L, bool HasPragma, const TargetTransformInfo::UnrollingPreferences &UP, unsigned &Threshold, unsigned &PartialThreshold, unsigned &PercentDynamicCostSavedThreshold, unsigned &DynamicCostSavingsDiscount) { // Determine the current unrolling threshold. While this is // normally set from UnrollThreshold, it is overridden to a // smaller value if the current function is marked as // optimize-for-size, and the unroll threshold was not user // specified. Threshold = UserThreshold ? CurrentThreshold : UP.Threshold; PartialThreshold = UserThreshold ? CurrentThreshold : UP.PartialThreshold; PercentDynamicCostSavedThreshold = UserPercentDynamicCostSavedThreshold ? CurrentPercentDynamicCostSavedThreshold : UP.PercentDynamicCostSavedThreshold; DynamicCostSavingsDiscount = UserDynamicCostSavingsDiscount ? CurrentDynamicCostSavingsDiscount : UP.DynamicCostSavingsDiscount; if (!UserThreshold && L->getHeader()->getParent()->hasFnAttribute( Attribute::OptimizeForSize)) { Threshold = UP.OptSizeThreshold; PartialThreshold = UP.PartialOptSizeThreshold; } if (HasPragma) { // If the loop has an unrolling pragma, we want to be more // aggressive with unrolling limits. Set thresholds to at // least the PragmaTheshold value which is larger than the // default limits. if (Threshold != NoThreshold) Threshold = std::max<unsigned>(Threshold, PragmaUnrollThreshold); if (PartialThreshold != NoThreshold) PartialThreshold = std::max<unsigned>(PartialThreshold, PragmaUnrollThreshold); } } bool canUnrollCompletely(Loop *L, unsigned Threshold, unsigned PercentDynamicCostSavedThreshold, unsigned DynamicCostSavingsDiscount, uint64_t UnrolledCost, uint64_t RolledDynamicCost); }; } char LoopUnroll::ID = 0; INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false) Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial, int Runtime, /* HLSL Change */ bool StructurizeLoopExits) { return new LoopUnroll(Threshold, Count, AllowPartial, Runtime, /* HLSL Change */ StructurizeLoopExits); } Pass *llvm::createSimpleLoopUnrollPass() { return llvm::createLoopUnrollPass(-1, -1, 0, 0); } namespace { // This class is used to get an estimate of the optimization effects that we // could get from complete loop unrolling. It comes from the fact that some // loads might be replaced with concrete constant values and that could trigger // a chain of instruction simplifications. // // E.g. we might have: // int a[] = {0, 1, 0}; // v = 0; // for (i = 0; i < 3; i ++) // v += b[i]*a[i]; // If we completely unroll the loop, we would get: // v = b[0]*a[0] + b[1]*a[1] + b[2]*a[2] // Which then will be simplified to: // v = b[0]* 0 + b[1]* 1 + b[2]* 0 // And finally: // v = b[1] class UnrolledInstAnalyzer : private InstVisitor<UnrolledInstAnalyzer, bool> { typedef InstVisitor<UnrolledInstAnalyzer, bool> Base; friend class InstVisitor<UnrolledInstAnalyzer, bool>; struct SimplifiedAddress { Value *Base = nullptr; ConstantInt *Offset = nullptr; }; public: UnrolledInstAnalyzer(unsigned Iteration, DenseMap<Value *, Constant *> &SimplifiedValues, const Loop *L, ScalarEvolution &SE) : Iteration(Iteration), SimplifiedValues(SimplifiedValues), L(L), SE(SE) { IterationNumber = SE.getConstant(APInt(64, Iteration)); } // Allow access to the initial visit method. using Base::visit; private: /// \brief A cache of pointer bases and constant-folded offsets corresponding /// to GEP (or derived from GEP) instructions. /// /// In order to find the base pointer one needs to perform non-trivial /// traversal of the corresponding SCEV expression, so it's good to have the /// results saved. DenseMap<Value *, SimplifiedAddress> SimplifiedAddresses; /// \brief Number of currently simulated iteration. /// /// If an expression is ConstAddress+Constant, then the Constant is /// Start + Iteration*Step, where Start and Step could be obtained from /// SCEVGEPCache. unsigned Iteration; /// \brief SCEV expression corresponding to number of currently simulated /// iteration. const SCEV *IterationNumber; /// \brief A Value->Constant map for keeping values that we managed to /// constant-fold on the given iteration. /// /// While we walk the loop instructions, we build up and maintain a mapping /// of simplified values specific to this iteration. The idea is to propagate /// any special information we have about loads that can be replaced with /// constants after complete unrolling, and account for likely simplifications /// post-unrolling. DenseMap<Value *, Constant *> &SimplifiedValues; const Loop *L; ScalarEvolution &SE; /// \brief Try to simplify instruction \param I using its SCEV expression. /// /// The idea is that some AddRec expressions become constants, which then /// could trigger folding of other instructions. However, that only happens /// for expressions whose start value is also constant, which isn't always the /// case. In another common and important case the start value is just some /// address (i.e. SCEVUnknown) - in this case we compute the offset and save /// it along with the base address instead. bool simplifyInstWithSCEV(Instruction *I) { if (!SE.isSCEVable(I->getType())) return false; const SCEV *S = SE.getSCEV(I); if (auto *SC = dyn_cast<SCEVConstant>(S)) { SimplifiedValues[I] = SC->getValue(); return true; } auto *AR = dyn_cast<SCEVAddRecExpr>(S); if (!AR) return false; const SCEV *ValueAtIteration = AR->evaluateAtIteration(IterationNumber, SE); // Check if the AddRec expression becomes a constant. if (auto *SC = dyn_cast<SCEVConstant>(ValueAtIteration)) { SimplifiedValues[I] = SC->getValue(); return true; } // Check if the offset from the base address becomes a constant. auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(S)); if (!Base) return false; auto *Offset = dyn_cast<SCEVConstant>(SE.getMinusSCEV(ValueAtIteration, Base)); if (!Offset) return false; SimplifiedAddress Address; Address.Base = Base->getValue(); Address.Offset = Offset->getValue(); SimplifiedAddresses[I] = Address; return true; } /// Base case for the instruction visitor. bool visitInstruction(Instruction &I) { return simplifyInstWithSCEV(&I); } /// TODO: Add visitors for other instruction types, e.g. ZExt, SExt. /// Try to simplify binary operator I. /// /// TODO: Probaly it's worth to hoist the code for estimating the /// simplifications effects to a separate class, since we have a very similar /// code in InlineCost already. bool visitBinaryOperator(BinaryOperator &I) { Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); if (!isa<Constant>(LHS)) if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) LHS = SimpleLHS; if (!isa<Constant>(RHS)) if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) RHS = SimpleRHS; Value *SimpleV = nullptr; const DataLayout &DL = I.getModule()->getDataLayout(); if (auto FI = dyn_cast<FPMathOperator>(&I)) SimpleV = SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL); else SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL); if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) SimplifiedValues[&I] = C; if (SimpleV) return true; return Base::visitBinaryOperator(I); } /// Try to fold load I. bool visitLoad(LoadInst &I) { Value *AddrOp = I.getPointerOperand(); auto AddressIt = SimplifiedAddresses.find(AddrOp); if (AddressIt == SimplifiedAddresses.end()) return false; ConstantInt *SimplifiedAddrOp = AddressIt->second.Offset; auto *GV = dyn_cast<GlobalVariable>(AddressIt->second.Base); // We're only interested in loads that can be completely folded to a // constant. if (!GV || !GV->hasInitializer()) return false; ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(GV->getInitializer()); if (!CDS) return false; int ElemSize = CDS->getElementType()->getPrimitiveSizeInBits() / 8U; assert(SimplifiedAddrOp->getValue().getActiveBits() < 64 && "Unexpectedly large index value."); int64_t Index = SimplifiedAddrOp->getSExtValue() / ElemSize; if (Index >= CDS->getNumElements()) { // FIXME: For now we conservatively ignore out of bound accesses, but // we're allowed to perform the optimization in this case. return false; } Constant *CV = CDS->getElementAsConstant(Index); assert(CV && "Constant expected."); SimplifiedValues[&I] = CV; return true; } }; } // namespace namespace { struct EstimatedUnrollCost { /// \brief The estimated cost after unrolling. unsigned UnrolledCost; /// \brief The estimated dynamic cost of executing the instructions in the /// rolled form. unsigned RolledDynamicCost; }; } /// \brief Figure out if the loop is worth full unrolling. /// /// Complete loop unrolling can make some loads constant, and we need to know /// if that would expose any further optimization opportunities. This routine /// estimates this optimization. It computes cost of unrolled loop /// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By /// dynamic cost we mean that we won't count costs of blocks that are known not /// to be executed (i.e. if we have a branch in the loop and we know that at the /// given iteration its condition would be resolved to true, we won't add up the /// cost of the 'false'-block). /// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If /// the analysis failed (no benefits expected from the unrolling, or the loop is /// too big to analyze), the returned value is None. Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, ScalarEvolution &SE, const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize) { // We want to be able to scale offsets by the trip count and add more offsets // to them without checking for overflows, and we already don't want to // analyze *massive* trip counts, so we force the max to be reasonably small. assert(UnrollMaxIterationsCountToAnalyze < (INT_MAX / 2) && "The unroll iterations max is too large!"); // Don't simulate loops with a big or unknown tripcount if (!UnrollMaxIterationsCountToAnalyze || !TripCount || TripCount > UnrollMaxIterationsCountToAnalyze) return None; SmallSetVector<BasicBlock *, 16> BBWorklist; DenseMap<Value *, Constant *> SimplifiedValues; // The estimated cost of the unrolled form of the loop. We try to estimate // this by simplifying as much as we can while computing the estimate. unsigned UnrolledCost = 0; // We also track the estimated dynamic (that is, actually executed) cost in // the rolled form. This helps identify cases when the savings from unrolling // aren't just exposing dead control flows, but actual reduced dynamic // instructions due to the simplifications which we expect to occur after // unrolling. unsigned RolledDynamicCost = 0; // Simulate execution of each iteration of the loop counting instructions, // which would be simplified. // Since the same load will take different values on different iterations, // we literally have to go through all loop's iterations. for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) { SimplifiedValues.clear(); UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, L, SE); BBWorklist.clear(); BBWorklist.insert(L->getHeader()); // Note that we *must not* cache the size, this loop grows the worklist. for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { BasicBlock *BB = BBWorklist[Idx]; // Visit all instructions in the given basic block and try to simplify // it. We don't change the actual IR, just count optimization // opportunities. for (Instruction &I : *BB) { unsigned InstCost = TTI.getUserCost(&I); // Visit the instruction to analyze its loop cost after unrolling, // and if the visitor returns false, include this instruction in the // unrolled cost. if (!Analyzer.visit(I)) UnrolledCost += InstCost; // Also track this instructions expected cost when executing the rolled // loop form. RolledDynamicCost += InstCost; // If unrolled body turns out to be too big, bail out. if (UnrolledCost > MaxUnrolledLoopSize) return None; } // Add BB's successors to the worklist. for (BasicBlock *Succ : successors(BB)) if (L->contains(Succ)) BBWorklist.insert(Succ); } // If we found no optimization opportunities on the first iteration, we // won't find them on later ones too. if (UnrolledCost == RolledDynamicCost) return None; } return {{UnrolledCost, RolledDynamicCost}}; } /// ApproximateLoopSize - Approximate the size of the loop. static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls, bool &NotDuplicatable, const TargetTransformInfo &TTI, AssumptionCache *AC) { SmallPtrSet<const Value *, 32> EphValues; CodeMetrics::collectEphemeralValues(L, AC, EphValues); CodeMetrics Metrics; for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E; ++I) Metrics.analyzeBasicBlock(*I, TTI, EphValues); NumCalls = Metrics.NumInlineCandidates; NotDuplicatable = Metrics.notDuplicatable; unsigned LoopSize = Metrics.NumInsts; // Don't allow an estimate of size zero. This would allows unrolling of loops // with huge iteration counts, which is a compile time problem even if it's // not a problem for code quality. Also, the code using this size may assume // that each loop has at least three instructions (likely a conditional // branch, a comparison feeding that branch, and some kind of loop increment // feeding that comparison instruction). LoopSize = std::max(LoopSize, 3u); return LoopSize; } // Returns the loop hint metadata node with the given name (for example, // "llvm.loop.unroll.count"). If no such metadata node exists, then nullptr is // returned. static MDNode *GetUnrollMetadataForLoop(const Loop *L, StringRef Name) { if (MDNode *LoopID = L->getLoopID()) return GetUnrollMetadata(LoopID, Name); return nullptr; } // Returns true if the loop has an unroll(full) pragma. static bool HasUnrollFullPragma(const Loop *L) { return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.full"); } // Returns true if the loop has an unroll(disable) pragma. static bool HasUnrollDisablePragma(const Loop *L) { return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.disable"); } // Returns true if the loop has an runtime unroll(disable) pragma. static bool HasRuntimeUnrollDisablePragma(const Loop *L) { return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.runtime.disable"); } // If loop has an unroll_count pragma return the (necessarily // positive) value from the pragma. Otherwise return 0. static unsigned UnrollCountPragmaValue(const Loop *L) { MDNode *MD = GetUnrollMetadataForLoop(L, "llvm.loop.unroll.count"); if (MD) { assert(MD->getNumOperands() == 2 && "Unroll count hint metadata should have two operands."); unsigned Count = mdconst::extract<ConstantInt>(MD->getOperand(1))->getZExtValue(); assert(Count >= 1 && "Unroll count must be positive."); return Count; } return 0; } // Remove existing unroll metadata and add unroll disable metadata to // indicate the loop has already been unrolled. This prevents a loop // from being unrolled more than is directed by a pragma if the loop // unrolling pass is run more than once (which it generally is). static void SetLoopAlreadyUnrolled(Loop *L) { MDNode *LoopID = L->getLoopID(); if (!LoopID) return; // First remove any existing loop unrolling metadata. SmallVector<Metadata *, 4> MDs; // Reserve first location for self reference to the LoopID metadata node. MDs.push_back(nullptr); for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { bool IsUnrollMetadata = false; MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); if (MD) { const MDString *S = dyn_cast<MDString>(MD->getOperand(0)); IsUnrollMetadata = S && S->getString().startswith("llvm.loop.unroll."); } if (!IsUnrollMetadata) MDs.push_back(LoopID->getOperand(i)); } // Add unroll(disable) metadata to disable future unrolling. LLVMContext &Context = L->getHeader()->getContext(); SmallVector<Metadata *, 1> DisableOperands; DisableOperands.push_back(MDString::get(Context, "llvm.loop.unroll.disable")); MDNode *DisableNode = MDNode::get(Context, DisableOperands); MDs.push_back(DisableNode); MDNode *NewLoopID = MDNode::get(Context, MDs); // Set operand 0 to refer to the loop id itself. NewLoopID->replaceOperandWith(0, NewLoopID); L->setLoopID(NewLoopID); } bool LoopUnroll::canUnrollCompletely(Loop *L, unsigned Threshold, unsigned PercentDynamicCostSavedThreshold, unsigned DynamicCostSavingsDiscount, uint64_t UnrolledCost, uint64_t RolledDynamicCost) { if (Threshold == NoThreshold) { DEBUG(dbgs() << " Can fully unroll, because no threshold is set.\n"); return true; } if (UnrolledCost <= Threshold) { DEBUG(dbgs() << " Can fully unroll, because unrolled cost: " << UnrolledCost << "<" << Threshold << "\n"); return true; } assert(UnrolledCost && "UnrolledCost can't be 0 at this point."); assert(RolledDynamicCost >= UnrolledCost && "Cannot have a higher unrolled cost than a rolled cost!"); // Compute the percentage of the dynamic cost in the rolled form that is // saved when unrolled. If unrolling dramatically reduces the estimated // dynamic cost of the loop, we use a higher threshold to allow more // unrolling. unsigned PercentDynamicCostSaved = (uint64_t)(RolledDynamicCost - UnrolledCost) * 100ull / RolledDynamicCost; if (PercentDynamicCostSaved >= PercentDynamicCostSavedThreshold && (int64_t)UnrolledCost - (int64_t)DynamicCostSavingsDiscount <= (int64_t)Threshold) { DEBUG(dbgs() << " Can fully unroll, because unrolling will reduce the " "expected dynamic cost by " << PercentDynamicCostSaved << "% (threshold: " << PercentDynamicCostSavedThreshold << "%)\n" << " and the unrolled cost (" << UnrolledCost << ") is less than the max threshold (" << DynamicCostSavingsDiscount << ").\n"); return true; } DEBUG(dbgs() << " Too large to fully unroll:\n"); DEBUG(dbgs() << " Threshold: " << Threshold << "\n"); DEBUG(dbgs() << " Max threshold: " << DynamicCostSavingsDiscount << "\n"); DEBUG(dbgs() << " Percent cost saved threshold: " << PercentDynamicCostSavedThreshold << "%\n"); DEBUG(dbgs() << " Unrolled cost: " << UnrolledCost << "\n"); DEBUG(dbgs() << " Rolled dynamic cost: " << RolledDynamicCost << "\n"); DEBUG(dbgs() << " Percent cost saved: " << PercentDynamicCostSaved << "\n"); return false; } unsigned LoopUnroll::selectUnrollCount( const Loop *L, unsigned TripCount, bool PragmaFullUnroll, unsigned PragmaCount, const TargetTransformInfo::UnrollingPreferences &UP, bool &SetExplicitly) { SetExplicitly = true; // User-specified count (either as a command-line option or // constructor parameter) has highest precedence. unsigned Count = UserCount ? CurrentCount : 0; // If there is no user-specified count, unroll pragmas have the next // highest precendence. if (Count == 0) { if (PragmaCount) { Count = PragmaCount; } else if (PragmaFullUnroll) { Count = TripCount; } } if (Count == 0) Count = UP.Count; if (Count == 0) { SetExplicitly = false; if (TripCount == 0) // Runtime trip count. Count = UnrollRuntimeCount; else // Conservative heuristic: if we know the trip count, see if we can // completely unroll (subject to the threshold, checked below); otherwise // try to find greatest modulo of the trip count which is still under // threshold value. Count = TripCount; } if (TripCount && Count > TripCount) return TripCount; return Count; } bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; Function &F = *L->getHeader()->getParent(); LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); ScalarEvolution *SE = &getAnalysis<ScalarEvolution>(); const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); BasicBlock *Header = L->getHeader(); DEBUG(dbgs() << "Loop Unroll: F[" << Header->getParent()->getName() << "] Loop %" << Header->getName() << "\n"); if (HasUnrollDisablePragma(L)) { return false; } bool PragmaFullUnroll = HasUnrollFullPragma(L); unsigned PragmaCount = UnrollCountPragmaValue(L); bool HasPragma = PragmaFullUnroll || PragmaCount > 0; TargetTransformInfo::UnrollingPreferences UP; getUnrollingPreferences(L, TTI, UP); // Find trip count and trip multiple if count is not available unsigned TripCount = 0; unsigned TripMultiple = 1; // If there are multiple exiting blocks but one of them is the latch, use the // latch for the trip count estimation. Otherwise insist on a single exiting // block for the trip count estimation. BasicBlock *ExitingBlock = L->getLoopLatch(); if (!ExitingBlock || !L->isLoopExiting(ExitingBlock)) ExitingBlock = L->getExitingBlock(); if (ExitingBlock) { TripCount = SE->getSmallConstantTripCount(L, ExitingBlock); TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock); } // Select an initial unroll count. This may be reduced later based // on size thresholds. bool CountSetExplicitly; unsigned Count = selectUnrollCount(L, TripCount, PragmaFullUnroll, PragmaCount, UP, CountSetExplicitly); unsigned NumInlineCandidates; bool notDuplicatable; unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates, notDuplicatable, TTI, &AC); DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n"); // When computing the unrolled size, note that the conditional branch on the // backedge and the comparison feeding it are not replicated like the rest of // the loop body (which is why 2 is subtracted). uint64_t UnrolledSize = (uint64_t)(LoopSize-2) * Count + 2; if (notDuplicatable) { DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable" << " instructions.\n"); return false; } if (NumInlineCandidates != 0) { DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n"); return false; } unsigned Threshold, PartialThreshold; unsigned PercentDynamicCostSavedThreshold; unsigned DynamicCostSavingsDiscount; selectThresholds(L, HasPragma, UP, Threshold, PartialThreshold, PercentDynamicCostSavedThreshold, DynamicCostSavingsDiscount); // Given Count, TripCount and thresholds determine the type of // unrolling which is to be performed. enum { Full = 0, Partial = 1, Runtime = 2 }; int Unrolling; if (TripCount && Count == TripCount) { Unrolling = Partial; // If the loop is really small, we don't need to run an expensive analysis. if (canUnrollCompletely(L, Threshold, 100, DynamicCostSavingsDiscount, UnrolledSize, UnrolledSize)) { Unrolling = Full; } else { // The loop isn't that small, but we still can fully unroll it if that // helps to remove a significant number of instructions. // To check that, run additional analysis on the loop. if (Optional<EstimatedUnrollCost> Cost = analyzeLoopUnrollCost( L, TripCount, *SE, TTI, Threshold + DynamicCostSavingsDiscount)) if (canUnrollCompletely(L, Threshold, PercentDynamicCostSavedThreshold, DynamicCostSavingsDiscount, Cost->UnrolledCost, Cost->RolledDynamicCost)) { Unrolling = Full; } } } else if (TripCount && Count < TripCount) { Unrolling = Partial; } else { Unrolling = Runtime; } // Reduce count based on the type of unrolling and the threshold values. unsigned OriginalCount = Count; bool AllowRuntime = (PragmaCount > 0) || (UserRuntime ? CurrentRuntime : UP.Runtime); // Don't unroll a runtime trip count loop with unroll full pragma. if (HasRuntimeUnrollDisablePragma(L) || PragmaFullUnroll) { AllowRuntime = false; } if (Unrolling == Partial) { bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial; if (!AllowPartial && !CountSetExplicitly) { DEBUG(dbgs() << " will not try to unroll partially because " << "-unroll-allow-partial not given\n"); return false; } if (PartialThreshold != NoThreshold && UnrolledSize > PartialThreshold) { // Reduce unroll count to be modulo of TripCount for partial unrolling. Count = (std::max(PartialThreshold, 3u)-2) / (LoopSize-2); while (Count != 0 && TripCount % Count != 0) Count--; } } else if (Unrolling == Runtime) { if (!AllowRuntime && !CountSetExplicitly) { DEBUG(dbgs() << " will not try to unroll loop with runtime trip count " << "-unroll-runtime not given\n"); return false; } // Reduce unroll count to be the largest power-of-two factor of // the original count which satisfies the threshold limit. while (Count != 0 && UnrolledSize > PartialThreshold) { Count >>= 1; UnrolledSize = (LoopSize-2) * Count + 2; } if (Count > UP.MaxCount) Count = UP.MaxCount; DEBUG(dbgs() << " partially unrolling with count: " << Count << "\n"); } if (HasPragma) { if (PragmaCount != 0) // If loop has an unroll count pragma mark loop as unrolled to prevent // unrolling beyond that requested by the pragma. SetLoopAlreadyUnrolled(L); // Emit optimization remarks if we are unable to unroll the loop // as directed by a pragma. DebugLoc LoopLoc = L->getStartLoc(); Function *F = Header->getParent(); LLVMContext &Ctx = F->getContext(); if (PragmaFullUnroll && PragmaCount == 0) { if (TripCount && Count != TripCount) { emitOptimizationRemarkMissed( Ctx, DEBUG_TYPE, *F, LoopLoc, "Unable to fully unroll loop as directed by unroll(full) pragma " "because unrolled size is too large."); } else if (!TripCount) { emitOptimizationRemarkMissed( Ctx, DEBUG_TYPE, *F, LoopLoc, "Unable to fully unroll loop as directed by unroll(full) pragma " "because loop has a runtime trip count."); } } else if (PragmaCount > 0 && Count != OriginalCount) { emitOptimizationRemarkMissed( Ctx, DEBUG_TYPE, *F, LoopLoc, "Unable to unroll loop the number of times directed by " "unroll_count pragma because unrolled size is too large."); } } if (Unrolling != Full && Count < 2) { // Partial unrolling by 1 is a nop. For full unrolling, a factor // of 1 makes sense because loop control can be eliminated. return false; } if (StructurizeLoopExits) // HLSL Change hlsl::RemoveUnstructuredLoopExits(L, LI, &getAnalysis<DominatorTreeWrapperPass>().getDomTree()); // HLSL Change // Unroll the loop. if (!UnrollLoop(L, Count, TripCount, AllowRuntime, UP.AllowExpensiveTripCount, TripMultiple, LI, this, &LPM, &AC)) return false; return true; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/SROA.cpp
//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This transformation implements the well known scalar replacement of /// aggregates transformation. It tries to identify promotable elements of an /// aggregate alloca, and promote them to registers. It will also try to /// convert uses of an element (or set of elements) of an alloca into a vector /// or bitfield-style integer scalar if appropriate. /// /// It works to do this with minimal slicing of the alloca so that regions /// which are merely transferred in and out of external memory remain unchanged /// and are not decomposed to scalar code. /// /// Because this also performs alloca promotion, it can be thought of as also /// serving the purpose of SSA formation. The algorithm iterates on the /// function until all opportunities for promotion have been realized. /// //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/PtrUseVisitor.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/TimeValue.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include "dxc/DXIL/DxilUtil.h" // HLSL Change - don't sroa resource type. #include "dxc/DXIL/DxilMetadataHelper.h" // HLSL Change - support strided debug variables #include "dxc/HLSL/HLMatrixType.h" // HLSL Change - don't sroa matrix types. #if __cplusplus >= 201103L && !defined(NDEBUG) // We only use this for a debug check in C++11 #include <random> #endif using namespace llvm; #define DEBUG_TYPE "sroa" STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten"); STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition"); STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); STATISTIC(NumDeleted, "Number of instructions deleted"); STATISTIC(NumVectorized, "Number of vectorized aggregates"); #if 0 // HLSL Change Starts - option pending /// Hidden option to force the pass to not use DomTree and mem2reg, instead /// forming SSA values through the SSAUpdater infrastructure. static cl::opt<bool> ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); /// Hidden option to enable randomly shuffling the slices to help uncover /// instability in their order. static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices", cl::init(false), cl::Hidden); /// Hidden option to experiment with completely strict handling of inbounds /// GEPs. static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds", cl::init(false), cl::Hidden); #else static const bool ForceSSAUpdater = false; static const bool SROAStrictInbounds = false; #endif // HLSL Change Ends namespace { /// \brief A custom IRBuilder inserter which prefixes all names if they are /// preserved. template <bool preserveNames = true> class IRBuilderPrefixedInserter : public IRBuilderDefaultInserter<preserveNames> { std::string Prefix; public: void SetNamePrefix(const Twine &P) { Prefix = P.str(); } protected: void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, BasicBlock::iterator InsertPt) const { IRBuilderDefaultInserter<preserveNames>::InsertHelper( I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt); } }; // Specialization for not preserving the name is trivial. template <> class IRBuilderPrefixedInserter<false> : public IRBuilderDefaultInserter<false> { public: void SetNamePrefix(const Twine &P) {} }; /// \brief Provide a typedef for IRBuilder that drops names in release builds. #ifndef NDEBUG typedef llvm::IRBuilder<true, ConstantFolder, IRBuilderPrefixedInserter<true>> IRBuilderTy; #else typedef llvm::IRBuilder<false, ConstantFolder, IRBuilderPrefixedInserter<false>> IRBuilderTy; #endif } namespace { /// \brief A used slice of an alloca. /// /// This structure represents a slice of an alloca used by some instruction. It /// stores both the begin and end offsets of this use, a pointer to the use /// itself, and a flag indicating whether we can classify the use as splittable /// or not when forming partitions of the alloca. class Slice { /// \brief The beginning offset of the range. uint64_t BeginOffset; /// \brief The ending offset, not included in the range. uint64_t EndOffset; /// \brief Storage for both the use of this slice and whether it can be /// split. PointerIntPair<Use *, 1, bool> UseAndIsSplittable; public: Slice() : BeginOffset(), EndOffset() {} Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable) : BeginOffset(BeginOffset), EndOffset(EndOffset), UseAndIsSplittable(U, IsSplittable) {} uint64_t beginOffset() const { return BeginOffset; } uint64_t endOffset() const { return EndOffset; } bool isSplittable() const { return UseAndIsSplittable.getInt(); } void makeUnsplittable() { UseAndIsSplittable.setInt(false); } Use *getUse() const { return UseAndIsSplittable.getPointer(); } bool isDead() const { return getUse() == nullptr; } void kill() { UseAndIsSplittable.setPointer(nullptr); } /// \brief Support for ordering ranges. /// /// This provides an ordering over ranges such that start offsets are /// always increasing, and within equal start offsets, the end offsets are /// decreasing. Thus the spanning range comes first in a cluster with the /// same start position. bool operator<(const Slice &RHS) const { if (beginOffset() < RHS.beginOffset()) return true; if (beginOffset() > RHS.beginOffset()) return false; if (isSplittable() != RHS.isSplittable()) return !isSplittable(); if (endOffset() > RHS.endOffset()) return true; return false; } /// \brief Support comparison with a single offset to allow binary searches. friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS, uint64_t RHSOffset) { return LHS.beginOffset() < RHSOffset; } friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, const Slice &RHS) { return LHSOffset < RHS.beginOffset(); } bool operator==(const Slice &RHS) const { return isSplittable() == RHS.isSplittable() && beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset(); } bool operator!=(const Slice &RHS) const { return !operator==(RHS); } }; } // end anonymous namespace namespace llvm { template <typename T> struct isPodLike; template <> struct isPodLike<Slice> { static const bool value = true; }; } namespace { /// \brief Representation of the alloca slices. /// /// This class represents the slices of an alloca which are formed by its /// various uses. If a pointer escapes, we can't fully build a representation /// for the slices used and we reflect that in this structure. The uses are /// stored, sorted by increasing beginning offset and with unsplittable slices /// starting at a particular offset before splittable slices. class AllocaSlices { public: /// \brief Construct the slices of a particular alloca. AllocaSlices(const DataLayout &DL, AllocaInst &AI, const bool SkipHLSLMat); // HLSL Change - not sroa matrix type. /// \brief Test whether a pointer to the allocation escapes our analysis. /// /// If this is true, the slices are never fully built and should be /// ignored. bool isEscaped() const { return PointerEscapingInstr; } /// \brief Support for iterating over the slices. /// @{ typedef SmallVectorImpl<Slice>::iterator iterator; typedef iterator_range<iterator> range; iterator begin() { return Slices.begin(); } iterator end() { return Slices.end(); } typedef SmallVectorImpl<Slice>::const_iterator const_iterator; typedef iterator_range<const_iterator> const_range; const_iterator begin() const { return Slices.begin(); } const_iterator end() const { return Slices.end(); } /// @} /// \brief Erase a range of slices. void erase(iterator Start, iterator Stop) { Slices.erase(Start, Stop); } /// \brief Insert new slices for this alloca. /// /// This moves the slices into the alloca's slices collection, and re-sorts /// everything so that the usual ordering properties of the alloca's slices /// hold. void insert(ArrayRef<Slice> NewSlices) { int OldSize = Slices.size(); Slices.append(NewSlices.begin(), NewSlices.end()); auto SliceI = Slices.begin() + OldSize; std::sort(SliceI, Slices.end()); std::inplace_merge(Slices.begin(), SliceI, Slices.end()); } // Forward declare an iterator to befriend it. class partition_iterator; /// \brief A partition of the slices. /// /// An ephemeral representation for a range of slices which can be viewed as /// a partition of the alloca. This range represents a span of the alloca's /// memory which cannot be split, and provides access to all of the slices /// overlapping some part of the partition. /// /// Objects of this type are produced by traversing the alloca's slices, but /// are only ephemeral and not persistent. class Partition { private: friend class AllocaSlices; friend class AllocaSlices::partition_iterator; /// \brief The begining and ending offsets of the alloca for this partition. uint64_t BeginOffset, EndOffset; /// \brief The start end end iterators of this partition. iterator SI, SJ; /// \brief A collection of split slice tails overlapping the partition. SmallVector<Slice *, 4> SplitTails; /// \brief Raw constructor builds an empty partition starting and ending at /// the given iterator. Partition(iterator SI) : SI(SI), SJ(SI) {} public: /// \brief The start offset of this partition. /// /// All of the contained slices start at or after this offset. uint64_t beginOffset() const { return BeginOffset; } /// \brief The end offset of this partition. /// /// All of the contained slices end at or before this offset. uint64_t endOffset() const { return EndOffset; } /// \brief The size of the partition. /// /// Note that this can never be zero. uint64_t size() const { assert(BeginOffset < EndOffset && "Partitions must span some bytes!"); return EndOffset - BeginOffset; } /// \brief Test whether this partition contains no slices, and merely spans /// a region occupied by split slices. bool empty() const { return SI == SJ; } /// \name Iterate slices that start within the partition. /// These may be splittable or unsplittable. They have a begin offset >= the /// partition begin offset. /// @{ // FIXME: We should probably define a "concat_iterator" helper and use that // to stitch together pointee_iterators over the split tails and the // contiguous iterators of the partition. That would give a much nicer // interface here. We could then additionally expose filtered iterators for // split, unsplit, and unsplittable splices based on the usage patterns. iterator begin() const { return SI; } iterator end() const { return SJ; } /// @} /// \brief Get the sequence of split slice tails. /// /// These tails are of slices which start before this partition but are /// split and overlap into the partition. We accumulate these while forming /// partitions. ArrayRef<Slice *> splitSliceTails() const { return SplitTails; } }; /// \brief An iterator over partitions of the alloca's slices. /// /// This iterator implements the core algorithm for partitioning the alloca's /// slices. It is a forward iterator as we don't support backtracking for /// efficiency reasons, and re-use a single storage area to maintain the /// current set of split slices. /// /// It is templated on the slice iterator type to use so that it can operate /// with either const or non-const slice iterators. class partition_iterator : public iterator_facade_base<partition_iterator, std::forward_iterator_tag, Partition> { friend class AllocaSlices; /// \brief Most of the state for walking the partitions is held in a class /// with a nice interface for examining them. Partition P; /// \brief We need to keep the end of the slices to know when to stop. AllocaSlices::iterator SE; /// \brief We also need to keep track of the maximum split end offset seen. /// FIXME: Do we really? uint64_t MaxSplitSliceEndOffset; /// \brief Sets the partition to be empty at given iterator, and sets the /// end iterator. partition_iterator(AllocaSlices::iterator SI, AllocaSlices::iterator SE) : P(SI), SE(SE), MaxSplitSliceEndOffset(0) { // If not already at the end, advance our state to form the initial // partition. if (SI != SE) advance(); } /// \brief Advance the iterator to the next partition. /// /// Requires that the iterator not be at the end of the slices. void advance() { assert((P.SI != SE || !P.SplitTails.empty()) && "Cannot advance past the end of the slices!"); // Clear out any split uses which have ended. if (!P.SplitTails.empty()) { if (P.EndOffset >= MaxSplitSliceEndOffset) { // If we've finished all splits, this is easy. P.SplitTails.clear(); MaxSplitSliceEndOffset = 0; } else { // Remove the uses which have ended in the prior partition. This // cannot change the max split slice end because we just checked that // the prior partition ended prior to that max. P.SplitTails.erase( std::remove_if( P.SplitTails.begin(), P.SplitTails.end(), [&](Slice *S) { return S->endOffset() <= P.EndOffset; }), P.SplitTails.end()); assert(std::any_of(P.SplitTails.begin(), P.SplitTails.end(), [&](Slice *S) { return S->endOffset() == MaxSplitSliceEndOffset; }) && "Could not find the current max split slice offset!"); assert(std::all_of(P.SplitTails.begin(), P.SplitTails.end(), [&](Slice *S) { return S->endOffset() <= MaxSplitSliceEndOffset; }) && "Max split slice end offset is not actually the max!"); } } // If P.SI is already at the end, then we've cleared the split tail and // now have an end iterator. if (P.SI == SE) { assert(P.SplitTails.empty() && "Failed to clear the split slices!"); return; } // If we had a non-empty partition previously, set up the state for // subsequent partitions. if (P.SI != P.SJ) { // Accumulate all the splittable slices which started in the old // partition into the split list. for (Slice &S : P) if (S.isSplittable() && S.endOffset() > P.EndOffset) { P.SplitTails.push_back(&S); MaxSplitSliceEndOffset = std::max(S.endOffset(), MaxSplitSliceEndOffset); } // Start from the end of the previous partition. P.SI = P.SJ; // If P.SI is now at the end, we at most have a tail of split slices. if (P.SI == SE) { P.BeginOffset = P.EndOffset; P.EndOffset = MaxSplitSliceEndOffset; return; } // If the we have split slices and the next slice is after a gap and is // not splittable immediately form an empty partition for the split // slices up until the next slice begins. if (!P.SplitTails.empty() && P.SI->beginOffset() != P.EndOffset && !P.SI->isSplittable()) { P.BeginOffset = P.EndOffset; P.EndOffset = P.SI->beginOffset(); return; } } // OK, we need to consume new slices. Set the end offset based on the // current slice, and step SJ past it. The beginning offset of the // parttion is the beginning offset of the next slice unless we have // pre-existing split slices that are continuing, in which case we begin // at the prior end offset. P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; P.EndOffset = P.SI->endOffset(); ++P.SJ; // There are two strategies to form a partition based on whether the // partition starts with an unsplittable slice or a splittable slice. if (!P.SI->isSplittable()) { // When we're forming an unsplittable region, it must always start at // the first slice and will extend through its end. assert(P.BeginOffset == P.SI->beginOffset()); // Form a partition including all of the overlapping slices with this // unsplittable slice. while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { if (!P.SJ->isSplittable()) P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); ++P.SJ; } // We have a partition across a set of overlapping unsplittable // partitions. return; } // If we're starting with a splittable slice, then we need to form // a synthetic partition spanning it and any other overlapping splittable // splices. assert(P.SI->isSplittable() && "Forming a splittable partition!"); // Collect all of the overlapping splittable slices. while (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset && P.SJ->isSplittable()) { P.EndOffset = std::max(P.EndOffset, P.SJ->endOffset()); ++P.SJ; } // Back upiP.EndOffset if we ended the span early when encountering an // unsplittable slice. This synthesizes the early end offset of // a partition spanning only splittable slices. if (P.SJ != SE && P.SJ->beginOffset() < P.EndOffset) { assert(!P.SJ->isSplittable()); P.EndOffset = P.SJ->beginOffset(); } } public: bool operator==(const partition_iterator &RHS) const { assert(SE == RHS.SE && "End iterators don't match between compared partition iterators!"); // The observed positions of partitions is marked by the P.SI iterator and // the emptyness of the split slices. The latter is only relevant when // P.SI == SE, as the end iterator will additionally have an empty split // slices list, but the prior may have the same P.SI and a tail of split // slices. if (P.SI == RHS.P.SI && P.SplitTails.empty() == RHS.P.SplitTails.empty()) { assert(P.SJ == RHS.P.SJ && "Same set of slices formed two different sized partitions!"); assert(P.SplitTails.size() == RHS.P.SplitTails.size() && "Same slice position with differently sized non-empty split " "slice tails!"); return true; } return false; } partition_iterator &operator++() { advance(); return *this; } Partition &operator*() { return P; } }; /// \brief A forward range over the partitions of the alloca's slices. /// /// This accesses an iterator range over the partitions of the alloca's /// slices. It computes these partitions on the fly based on the overlapping /// offsets of the slices and the ability to split them. It will visit "empty" /// partitions to cover regions of the alloca only accessed via split /// slices. iterator_range<partition_iterator> partitions() { return make_range(partition_iterator(begin(), end()), partition_iterator(end(), end())); } /// \brief Access the dead users for this alloca. ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; } /// \brief Access the dead operands referring to this alloca. /// /// These are operands which have cannot actually be used to refer to the /// alloca as they are outside its range and the user doesn't correct for /// that. These mostly consist of PHI node inputs and the like which we just /// need to replace with undef. ArrayRef<Use *> getDeadOperands() const { return DeadOperands; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; void printSlice(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; void printUse(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; void print(raw_ostream &OS) const; void dump(const_iterator I) const; void dump() const; #endif private: template <typename DerivedT, typename RetT = void> class BuilderBase; class SliceBuilder; friend class AllocaSlices::SliceBuilder; #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// \brief Handle to alloca instruction to simplify method interfaces. AllocaInst &AI; #endif /// \brief The instruction responsible for this alloca not having a known set /// of slices. /// /// When an instruction (potentially) escapes the pointer to the alloca, we /// store a pointer to that here and abort trying to form slices of the /// alloca. This will be null if the alloca slices are analyzed successfully. Instruction *PointerEscapingInstr; /// \brief The slices of the alloca. /// /// We store a vector of the slices formed by uses of the alloca here. This /// vector is sorted by increasing begin offset, and then the unsplittable /// slices before the splittable ones. See the Slice inner class for more /// details. SmallVector<Slice, 8> Slices; /// \brief Instructions which will become dead if we rewrite the alloca. /// /// Note that these are not separated by slice. This is because we expect an /// alloca to be completely rewritten or not rewritten at all. If rewritten, /// all these instructions can simply be removed and replaced with undef as /// they come from outside of the allocated space. SmallVector<Instruction *, 8> DeadUsers; /// \brief Operands which will become dead if we rewrite the alloca. /// /// These are operands that in their particular use can be replaced with /// undef when we rewrite the alloca. These show up in out-of-bounds inputs /// to PHI nodes and the like. They aren't entirely dead (there might be /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we /// want to swap this particular input for undef to simplify the use lists of /// the alloca. SmallVector<Use *, 8> DeadOperands; }; } static Value *foldSelectInst(SelectInst &SI) { // If the condition being selected on is a constant or the same value is // being selected between, fold the select. Yes this does (rarely) happen // early on. if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition())) return SI.getOperand(1 + CI->isZero()); if (SI.getOperand(1) == SI.getOperand(2)) return SI.getOperand(1); return nullptr; } /// \brief A helper that folds a PHI node or a select. static Value *foldPHINodeOrSelectInst(Instruction &I) { if (PHINode *PN = dyn_cast<PHINode>(&I)) { // If PN merges together the same value, return that value. return PN->hasConstantValue(); } return foldSelectInst(cast<SelectInst>(I)); } // HLSL Change - Detect HLSL Object or Matrix [array] type // These types should be SROA'd elsewhere as necessary. bool SkipHLSLType(Type *Ty, bool SkipHLSLMat) { if (Ty->isPointerTy()) Ty = Ty->getPointerElementType(); while (Ty->isArrayTy()) Ty = Ty->getArrayElementType(); return (SkipHLSLMat && hlsl::HLMatrixType::isa(Ty)) || hlsl::dxilutil::IsHLSLObjectType(Ty); } /// \brief Builder for the alloca slices. /// /// This class builds a set of alloca slices by recursively visiting the uses /// of an alloca and making a slice for each load and store at each offset. class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> { friend class PtrUseVisitor<SliceBuilder>; friend class InstVisitor<SliceBuilder>; typedef PtrUseVisitor<SliceBuilder> Base; const bool SkipHLSLMat; // HLSL Change - not sroa matrix type. const uint64_t AllocSize; AllocaSlices &AS; SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap; SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes; /// \brief Set to de-duplicate dead instructions found in the use walk. SmallPtrSet<Instruction *, 4> VisitedDeadInsts; public: SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS, const bool SkipHLSLMat) : PtrUseVisitor<SliceBuilder>(DL), SkipHLSLMat(SkipHLSLMat), // HLSL Change - not sroa matrix type. AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {} private: void markAsDead(Instruction &I) { if (VisitedDeadInsts.insert(&I).second) AS.DeadUsers.push_back(&I); } void insertUse(Instruction &I, const APInt &Offset, uint64_t Size, bool IsSplittable = false) { // Completely skip uses which have a zero size or start either before or // past the end of the allocation. if (Size == 0 || Offset.uge(AllocSize)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset << " which has zero size or starts outside of the " << AllocSize << " byte alloca:\n" << " alloca: " << AS.AI << "\n" << " use: " << I << "\n"); return markAsDead(I); } uint64_t BeginOffset = Offset.getZExtValue(); uint64_t EndOffset = BeginOffset + Size; // Clamp the end offset to the end of the allocation. Note that this is // formulated to handle even the case where "BeginOffset + Size" overflows. // This may appear superficially to be something we could ignore entirely, // but that is not so! There may be widened loads or PHI-node uses where // some instructions are dead but not others. We can't completely ignore // them, and so have to record at least the information here. assert(AllocSize >= BeginOffset); // Established above. if (Size > AllocSize - BeginOffset) { DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset << " to remain within the " << AllocSize << " byte alloca:\n" << " alloca: " << AS.AI << "\n" << " use: " << I << "\n"); EndOffset = AllocSize; } AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable)); } void visitBitCastInst(BitCastInst &BC) { if (BC.use_empty()) return markAsDead(BC); // HLSL Change Begin - not sroa matrix type. if (SkipHLSLType(BC.getType(), SkipHLSLMat) || SkipHLSLType(BC.getSrcTy(), SkipHLSLMat)) { AS.PointerEscapingInstr = &BC; return; } // HLSL Change End. return Base::visitBitCastInst(BC); } void visitGetElementPtrInst(GetElementPtrInst &GEPI) { if (GEPI.use_empty()) return markAsDead(GEPI); if (SROAStrictInbounds && GEPI.isInBounds()) { // FIXME: This is a manually un-factored variant of the basic code inside // of GEPs with checking of the inbounds invariant specified in the // langref in a very strict sense. If we ever want to enable // SROAStrictInbounds, this code should be factored cleanly into // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds // by writing out the code here where we have tho underlying allocation // size readily available. APInt GEPOffset = Offset; const DataLayout &DL = GEPI.getModule()->getDataLayout(); for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); GTI != GTE; ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); if (!OpC) break; // Handle a struct index, which adds its field offset to the pointer. if (StructType *STy = dyn_cast<StructType>(*GTI)) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); GEPOffset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); } else { // For array or vector indices, scale the index by the size of the // type. APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); GEPOffset += Index * APInt(Offset.getBitWidth(), DL.getTypeAllocSize(GTI.getIndexedType())); } // If this index has computed an intermediate pointer which is not // inbounds, then the result of the GEP is a poison value and we can // delete it and all uses. if (GEPOffset.ugt(AllocSize)) return markAsDead(GEPI); } } return Base::visitGetElementPtrInst(GEPI); } void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset, uint64_t Size, bool IsVolatile) { // We allow splitting of non-volatile loads and stores where the type is an // integer type. These may be used to implement 'memcpy' or other "transfer // of bits" patterns. bool IsSplittable = Ty->isIntegerTy() && !IsVolatile; insertUse(I, Offset, Size, IsSplittable); } void visitLoadInst(LoadInst &LI) { // HLSL Change Begin - not sroa matrix type. if (SkipHLSLType(LI.getType(), SkipHLSLMat)) return PI.setEscapedAndAborted(&LI); // HLSL Change End. assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && "All simple FCA loads should have been pre-split"); if (!IsOffsetKnown) return PI.setAborted(&LI); const DataLayout &DL = LI.getModule()->getDataLayout(); uint64_t Size = DL.getTypeStoreSize(LI.getType()); return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); } void visitStoreInst(StoreInst &SI) { Value *ValOp = SI.getValueOperand(); if (ValOp == *U) return PI.setEscapedAndAborted(&SI); // HLSL Change Begin - not sroa matrix type. if (SkipHLSLType(ValOp->getType(), SkipHLSLMat)) return PI.setEscapedAndAborted(&SI); // HLSL Change End. if (!IsOffsetKnown) return PI.setAborted(&SI); const DataLayout &DL = SI.getModule()->getDataLayout(); uint64_t Size = DL.getTypeStoreSize(ValOp->getType()); // If this memory access can be shown to *statically* extend outside the // bounds of of the allocation, it's behavior is undefined, so simply // ignore it. Note that this is more strict than the generic clamping // behavior of insertUse. We also try to handle cases which might run the // risk of overflow. // FIXME: We should instead consider the pointer to have escaped if this // function is being instrumented for addressing bugs or race conditions. if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset << " which extends past the end of the " << AllocSize << " byte alloca:\n" << " alloca: " << AS.AI << "\n" << " use: " << SI << "\n"); return markAsDead(SI); } assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && "All simple FCA stores should have been pre-split"); handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); } void visitMemSetInst(MemSetInst &II) { assert(II.getRawDest() == *U && "Pointer use is not the destination?"); ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); if ((Length && Length->getValue() == 0) || (IsOffsetKnown && Offset.uge(AllocSize))) // Zero-length mem transfer intrinsics can be ignored entirely. return markAsDead(II); if (!IsOffsetKnown) return PI.setAborted(&II); insertUse(II, Offset, Length ? Length->getLimitedValue() : AllocSize - Offset.getLimitedValue(), (bool)Length); } void visitMemTransferInst(MemTransferInst &II) { ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength()); if (Length && Length->getValue() == 0) // Zero-length mem transfer intrinsics can be ignored entirely. return markAsDead(II); // Because we can visit these intrinsics twice, also check to see if the // first time marked this instruction as dead. If so, skip it. if (VisitedDeadInsts.count(&II)) return; if (!IsOffsetKnown) return PI.setAborted(&II); // This side of the transfer is completely out-of-bounds, and so we can // nuke the entire transfer. However, we also need to nuke the other side // if already added to our partitions. // FIXME: Yet another place we really should bypass this when // instrumenting for ASan. if (Offset.uge(AllocSize)) { SmallDenseMap<Instruction *, unsigned>::iterator MTPI = MemTransferSliceMap.find(&II); if (MTPI != MemTransferSliceMap.end()) AS.Slices[MTPI->second].kill(); return markAsDead(II); } uint64_t RawOffset = Offset.getLimitedValue(); uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; // Check for the special case where the same exact value is used for both // source and dest. if (*U == II.getRawDest() && *U == II.getRawSource()) { // For non-volatile transfers this is a no-op. if (!II.isVolatile()) return markAsDead(II); return insertUse(II, Offset, Size, /*IsSplittable=*/false); } // If we have seen both source and destination for a mem transfer, then // they both point to the same alloca. bool Inserted; SmallDenseMap<Instruction *, unsigned>::iterator MTPI; std::tie(MTPI, Inserted) = MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size())); unsigned PrevIdx = MTPI->second; if (!Inserted) { Slice &PrevP = AS.Slices[PrevIdx]; // Check if the begin offsets match and this is a non-volatile transfer. // In that case, we can completely elide the transfer. if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) { PrevP.kill(); return markAsDead(II); } // Otherwise we have an offset transfer within the same alloca. We can't // split those. PrevP.makeUnsplittable(); } // Insert the use now that we've fixed up the splittable nature. insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length); // Check that we ended up with a valid index in the map. assert(AS.Slices[PrevIdx].getUse()->getUser() == &II && "Map index doesn't point back to a slice with this user."); } // Disable SRoA for any intrinsics except for lifetime invariants. // FIXME: What about debug intrinsics? This matches old behavior, but // doesn't make sense. void visitIntrinsicInst(IntrinsicInst &II) { if (!IsOffsetKnown) return PI.setAborted(&II); if (II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end) { ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0)); uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(), Length->getLimitedValue()); insertUse(II, Offset, Size, true); return; } Base::visitIntrinsicInst(II); } Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { // We consider any PHI or select that results in a direct load or store of // the same offset to be a viable use for slicing purposes. These uses // are considered unsplittable and the size is the maximum loaded or stored // size. SmallPtrSet<Instruction *, 4> Visited; SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses; Visited.insert(Root); Uses.push_back(std::make_pair(cast<Instruction>(*U), Root)); const DataLayout &DL = Root->getModule()->getDataLayout(); // If there are no loads or stores, the access is dead. We mark that as // a size zero access. Size = 0; do { Instruction *I, *UsedI; std::tie(UsedI, I) = Uses.pop_back_val(); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); continue; } if (StoreInst *SI = dyn_cast<StoreInst>(I)) { Value *Op = SI->getOperand(0); if (Op == UsedI) return SI; Size = std::max(Size, DL.getTypeStoreSize(Op->getType())); continue; } if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { if (!GEP->hasAllZeroIndices()) return GEP; } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) && !isa<SelectInst>(I)) { return I; } for (User *U : I->users()) if (Visited.insert(cast<Instruction>(U)).second) Uses.push_back(std::make_pair(I, cast<Instruction>(U))); } while (!Uses.empty()); return nullptr; } void visitPHINodeOrSelectInst(Instruction &I) { assert(isa<PHINode>(I) || isa<SelectInst>(I)); if (I.use_empty()) return markAsDead(I); // TODO: We could use SimplifyInstruction here to fold PHINodes and // SelectInsts. However, doing so requires to change the current // dead-operand-tracking mechanism. For instance, suppose neither loading // from %U nor %other traps. Then "load (select undef, %U, %other)" does not // trap either. However, if we simply replace %U with undef using the // current dead-operand-tracking mechanism, "load (select undef, undef, // %other)" may trap because the select may return the first operand // "undef". if (Value *Result = foldPHINodeOrSelectInst(I)) { if (Result == *U) // If the result of the constant fold will be the pointer, recurse // through the PHI/select as if we had RAUW'ed it. enqueueUsers(I); else // Otherwise the operand to the PHI/select is dead, and we can replace // it with undef. AS.DeadOperands.push_back(U); return; } if (!IsOffsetKnown) return PI.setAborted(&I); // See if we already have computed info on this node. uint64_t &Size = PHIOrSelectSizes[&I]; if (!Size) { // This is a new PHI/Select, check for an unsafe use of it. if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size)) return PI.setAborted(UnsafeI); } // For PHI and select operands outside the alloca, we can't nuke the entire // phi or select -- the other side might still be relevant, so we special // case them here and use a separate structure to track the operands // themselves which should be replaced with undef. // FIXME: This should instead be escaped in the event we're instrumenting // for address sanitization. if (Offset.uge(AllocSize)) { AS.DeadOperands.push_back(U); return; } insertUse(I, Offset, Size); } void visitPHINode(PHINode &PN) { visitPHINodeOrSelectInst(PN); } void visitSelectInst(SelectInst &SI) { visitPHINodeOrSelectInst(SI); } /// \brief Disable SROA entirely if there are unhandled users of the alloca. void visitInstruction(Instruction &I) { PI.setAborted(&I); } }; AllocaSlices::AllocaSlices( const DataLayout &DL, AllocaInst &AI, const bool SkipHLSLMat) // HLSL Change - not sroa matrix type. : #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) AI(AI), #endif PointerEscapingInstr(nullptr) { SliceBuilder PB(DL, AI, *this, SkipHLSLMat); SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); if (PtrI.isEscaped() || PtrI.isAborted()) { // FIXME: We should sink the escape vs. abort info into the caller nicely, // possibly by just storing the PtrInfo in the AllocaSlices. PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst() : PtrI.getAbortingInst(); assert(PointerEscapingInstr && "Did not track a bad instruction"); return; } Slices.erase(std::remove_if(Slices.begin(), Slices.end(), [](const Slice &S) { return S.isDead(); }), Slices.end()); #if 0 // HLSL Change Starts - option pending #if __cplusplus >= 201103L && !defined(NDEBUG) if (SROARandomShuffleSlices) { std::mt19937 MT(static_cast<unsigned>(sys::TimeValue::now().msec())); std::shuffle(Slices.begin(), Slices.end(), MT); } #endif #endif // HLSL Change Ends - option pending // Sort the uses. This arranges for the offsets to be in ascending order, // and the sizes to be in descending order. std::sort(Slices.begin(), Slices.end()); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void AllocaSlices::print(raw_ostream &OS, const_iterator I, StringRef Indent) const { printSlice(OS, I, Indent); OS << "\n"; printUse(OS, I, Indent); } void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I, StringRef Indent) const { OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")" << " slice #" << (I - begin()) << (I->isSplittable() ? " (splittable)" : ""); } void AllocaSlices::printUse(raw_ostream &OS, const_iterator I, StringRef Indent) const { OS << Indent << " used by: " << *I->getUse()->getUser() << "\n"; } void AllocaSlices::print(raw_ostream &OS) const { if (PointerEscapingInstr) { OS << "Can't analyze slices for alloca: " << AI << "\n" << " A pointer to this alloca escaped by:\n" << " " << *PointerEscapingInstr << "\n"; return; } OS << "Slices of alloca: " << AI << "\n"; for (const_iterator I = begin(), E = end(); I != E; ++I) print(OS, I); } LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const { print(dbgs(), I); } LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); } #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) namespace { /// \brief Implementation of LoadAndStorePromoter for promoting allocas. /// /// This subclass of LoadAndStorePromoter adds overrides to handle promoting /// the loads and stores of an alloca instruction, as well as updating its /// debug information. This is used when a domtree is unavailable and thus /// mem2reg in its full form can't be used to handle promotion of allocas to /// scalar values. class AllocaPromoter : public LoadAndStorePromoter { AllocaInst &AI; DIBuilder &DIB; SmallVector<DbgDeclareInst *, 4> DDIs; SmallVector<DbgValueInst *, 4> DVIs; public: AllocaPromoter(ArrayRef<const Instruction *> Insts, SSAUpdater &S, AllocaInst &AI, DIBuilder &DIB) : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} void run(const SmallVectorImpl<Instruction *> &Insts) { // Retain the debug information attached to the alloca for use when // rewriting loads and stores. if (auto *L = LocalAsMetadata::getIfExists(&AI)) { if (auto *DINode = MetadataAsValue::getIfExists(AI.getContext(), L)) { for (User *U : DINode->users()) if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) DDIs.push_back(DDI); else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) DVIs.push_back(DVI); } } LoadAndStorePromoter::run(Insts); // While we have the debug information, clear it off of the alloca. The // caller takes care of deleting the alloca. while (!DDIs.empty()) DDIs.pop_back_val()->eraseFromParent(); while (!DVIs.empty()) DVIs.pop_back_val()->eraseFromParent(); } bool isInstInList(Instruction *I, const SmallVectorImpl<Instruction *> &Insts) const override { Value *Ptr; if (LoadInst *LI = dyn_cast<LoadInst>(I)) Ptr = LI->getOperand(0); else Ptr = cast<StoreInst>(I)->getPointerOperand(); // Only used to detect cycles, which will be rare and quickly found as // we're walking up a chain of defs rather than down through uses. SmallPtrSet<Value *, 4> Visited; do { if (Ptr == &AI) return true; if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr)) Ptr = BCI->getOperand(0); else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) Ptr = GEPI->getPointerOperand(); else return false; } while (Visited.insert(Ptr).second); return false; } void updateDebugInfo(Instruction *Inst) const override { for (DbgDeclareInst *DDI : DDIs) if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) ConvertDebugDeclareToDebugValue(DDI, SI, DIB); else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) ConvertDebugDeclareToDebugValue(DDI, LI, DIB); for (DbgValueInst *DVI : DVIs) { Value *Arg = nullptr; if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { // If an argument is zero extended then use argument directly. The ZExt // may be zapped by an optimization pass in future. if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) Arg = dyn_cast<Argument>(ZExt->getOperand(0)); else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) Arg = dyn_cast<Argument>(SExt->getOperand(0)); if (!Arg) Arg = SI->getValueOperand(); } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { Arg = LI->getPointerOperand(); } else { continue; } DIB.insertDbgValueIntrinsic(Arg, 0, DVI->getVariable(), DVI->getExpression(), DVI->getDebugLoc(), Inst); } } }; } // end anon namespace namespace { /// \brief An optimization pass providing Scalar Replacement of Aggregates. /// /// This pass takes allocations which can be completely analyzed (that is, they /// don't escape) and tries to turn them into scalar SSA values. There are /// a few steps to this process. /// /// 1) It takes allocations of aggregates and analyzes the ways in which they /// are used to try to split them into smaller allocations, ideally of /// a single scalar data type. It will split up memcpy and memset accesses /// as necessary and try to isolate individual scalar accesses. /// 2) It will transform accesses into forms which are suitable for SSA value /// promotion. This can be replacing a memset with a scalar store of an /// integer value, or it can involve speculating operations on a PHI or /// select to be a PHI or select of the results. /// 3) Finally, this will try to detect a pattern of accesses which map cleanly /// onto insert and extract operations on a vector value, and convert them to /// this form. By doing so, it will enable promotion of vector aggregates to /// SSA vector values. class SROA : public FunctionPass { const bool RequiresDomTree; const bool SkipHLSLMat; // HLSL Change - not sroa matrix type. LLVMContext *C; DominatorTree *DT; AssumptionCache *AC; /// \brief Worklist of alloca instructions to simplify. /// /// Each alloca in the function is added to this. Each new alloca formed gets /// added to it as well to recursively simplify unless that alloca can be /// directly promoted. Finally, each time we rewrite a use of an alloca other /// the one being actively rewritten, we add it back onto the list if not /// already present to ensure it is re-visited. SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> Worklist; /// \brief A collection of instructions to delete. /// We try to batch deletions to simplify code and make things a bit more /// efficient. SetVector<Instruction *, SmallVector<Instruction *, 8>> DeadInsts; /// \brief Post-promotion worklist. /// /// Sometimes we discover an alloca which has a high probability of becoming /// viable for SROA after a round of promotion takes place. In those cases, /// the alloca is enqueued here for re-processing. /// /// Note that we have to be very careful to clear allocas out of this list in /// the event they are deleted. SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> PostPromotionWorklist; /// \brief A collection of alloca instructions we can directly promote. std::vector<AllocaInst *> PromotableAllocas; /// \brief A worklist of PHIs to speculate prior to promoting allocas. /// /// All of these PHIs have been checked for the safety of speculation and by /// being speculated will allow promoting allocas currently in the promotable /// queue. SetVector<PHINode *, SmallVector<PHINode *, 2>> SpeculatablePHIs; /// \brief A worklist of select instructions to speculate prior to promoting /// allocas. /// /// All of these select instructions have been checked for the safety of /// speculation and by being speculated will allow promoting allocas /// currently in the promotable queue. SetVector<SelectInst *, SmallVector<SelectInst *, 2>> SpeculatableSelects; public: SROA(bool RequiresDomTree = true, bool SkipHLSLMat = true) : FunctionPass(ID), RequiresDomTree(RequiresDomTree), SkipHLSLMat(SkipHLSLMat), // HLSL Change - not sroa matrix type. C(nullptr), DT(nullptr) { initializeSROAPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override; StringRef getPassName() const override { return "SROA"; } static char ID; private: friend class PHIOrSelectSpeculator; friend class AllocaSliceRewriter; bool runOnFunctionImp(Function &F); bool presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS); AllocaInst *rewritePartition(AllocaInst &AI, AllocaSlices &AS, AllocaSlices::Partition &P); bool splitAlloca(AllocaInst &AI, AllocaSlices &AS); bool runOnAlloca(AllocaInst &AI); void clobberUse(Use &U); void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas); bool promoteAllocas(Function &F); }; } char SROA::ID = 0; FunctionPass *llvm::createSROAPass(bool RequiresDomTree, bool SkipHLSLMat) { return new SROA(RequiresDomTree, SkipHLSLMat); } INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", false, false) /// Walk the range of a partitioning looking for a common type to cover this /// sequence of slices. static Type *findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, uint64_t EndOffset) { Type *Ty = nullptr; bool TyIsCommon = true; IntegerType *ITy = nullptr; // Note that we need to look at *every* alloca slice's Use to ensure we // always get consistent results regardless of the order of slices. for (AllocaSlices::const_iterator I = B; I != E; ++I) { Use *U = I->getUse(); if (isa<IntrinsicInst>(*U->getUser())) continue; if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) continue; Type *UserTy = nullptr; if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { UserTy = LI->getType(); } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { UserTy = SI->getValueOperand()->getType(); } if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) { // If the type is larger than the partition, skip it. We only encounter // this for split integer operations where we want to use the type of the // entity causing the split. Also skip if the type is not a byte width // multiple. if (UserITy->getBitWidth() % 8 != 0 || UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset())) continue; // Track the largest bitwidth integer type used in this way in case there // is no common type. if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth()) ITy = UserITy; } // To avoid depending on the order of slices, Ty and TyIsCommon must not // depend on types skipped above. if (!UserTy || (Ty && Ty != UserTy)) TyIsCommon = false; // Give up on anything but an iN type. else Ty = UserTy; } return TyIsCommon ? Ty : ITy; } /// PHI instructions that use an alloca and are subsequently loaded can be /// rewritten to load both input pointers in the pred blocks and then PHI the /// results, allowing the load of the alloca to be promoted. /// From this: /// %P2 = phi [i32* %Alloca, i32* %Other] /// %V = load i32* %P2 /// to: /// %V1 = load i32* %Alloca -> will be mem2reg'd /// ... /// %V2 = load i32* %Other /// ... /// %V = phi [i32 %V1, i32 %V2] /// /// We can do this to a select if its only uses are loads and if the operands /// to the select can be loaded unconditionally. /// /// FIXME: This should be hoisted into a generic utility, likely in /// Transforms/Util/Local.h static bool isSafePHIToSpeculate(PHINode &PN) { // For now, we can only do this promotion if the load is in the same block // as the PHI, and if there are no stores between the phi and load. // TODO: Allow recursive phi users. // TODO: Allow stores. BasicBlock *BB = PN.getParent(); unsigned MaxAlign = 0; bool HaveLoad = false; for (User *U : PN.users()) { LoadInst *LI = dyn_cast<LoadInst>(U); if (!LI || !LI->isSimple()) return false; // For now we only allow loads in the same block as the PHI. This is // a common case that happens when instcombine merges two loads through // a PHI. if (LI->getParent() != BB) return false; // Ensure that there are no instructions between the PHI and the load that // could store. for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) if (BBI->mayWriteToMemory()) return false; MaxAlign = std::max(MaxAlign, LI->getAlignment()); HaveLoad = true; } if (!HaveLoad) return false; const DataLayout &DL = PN.getModule()->getDataLayout(); // We can only transform this if it is safe to push the loads into the // predecessor blocks. The only thing to watch out for is that we can't put // a possibly trapping load in the predecessor if it is a critical edge. for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); Value *InVal = PN.getIncomingValue(Idx); // If the value is produced by the terminator of the predecessor (an // invoke) or it has side-effects, there is no valid place to put a load // in the predecessor. if (TI == InVal || TI->mayHaveSideEffects()) return false; // If the predecessor has a single successor, then the edge isn't // critical. if (TI->getNumSuccessors() == 1) continue; // If this pointer is always safe to load, or if we can prove that there // is already a load in the block, then we can move the load to the pred // block. if (isDereferenceablePointer(InVal, DL) || isSafeToLoadUnconditionally(InVal, TI, MaxAlign)) continue; return false; } return true; } static void speculatePHINodeLoads(PHINode &PN) { DEBUG(dbgs() << " original: " << PN << "\n"); Type *LoadTy = cast<PointerType>(PN.getType())->getElementType(); IRBuilderTy PHIBuilder(&PN); PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), PN.getName() + ".sroa.speculated"); // Get the AA tags and alignment to use from one of the loads. It doesn't // matter which one we get and if any differ. LoadInst *SomeLoad = cast<LoadInst>(PN.user_back()); AAMDNodes AATags; SomeLoad->getAAMetadata(AATags); unsigned Align = SomeLoad->getAlignment(); // Rewrite all loads of the PN to use the new PHI. while (!PN.use_empty()) { LoadInst *LI = cast<LoadInst>(PN.user_back()); LI->replaceAllUsesWith(NewPN); LI->eraseFromParent(); } // Inject loads into all of the pred blocks. for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { BasicBlock *Pred = PN.getIncomingBlock(Idx); TerminatorInst *TI = Pred->getTerminator(); Value *InVal = PN.getIncomingValue(Idx); IRBuilderTy PredBuilder(TI); LoadInst *Load = PredBuilder.CreateLoad( InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName())); ++NumLoadsSpeculated; Load->setAlignment(Align); if (AATags) Load->setAAMetadata(AATags); NewPN->addIncoming(Load, Pred); } DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); PN.eraseFromParent(); } /// Select instructions that use an alloca and are subsequently loaded can be /// rewritten to load both input pointers and then select between the result, /// allowing the load of the alloca to be promoted. /// From this: /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other /// %V = load i32* %P2 /// to: /// %V1 = load i32* %Alloca -> will be mem2reg'd /// %V2 = load i32* %Other /// %V = select i1 %cond, i32 %V1, i32 %V2 /// /// We can do this to a select if its only uses are loads and if the operand /// to the select can be loaded unconditionally. static bool isSafeSelectToSpeculate(SelectInst &SI) { Value *TValue = SI.getTrueValue(); Value *FValue = SI.getFalseValue(); const DataLayout &DL = SI.getModule()->getDataLayout(); bool TDerefable = isDereferenceablePointer(TValue, DL); bool FDerefable = isDereferenceablePointer(FValue, DL); for (User *U : SI.users()) { LoadInst *LI = dyn_cast<LoadInst>(U); if (!LI || !LI->isSimple()) return false; // Both operands to the select need to be dereferencable, either // absolutely (e.g. allocas) or at this point because we can see other // accesses to it. if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment())) return false; if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment())) return false; } return true; } static void speculateSelectInstLoads(SelectInst &SI) { DEBUG(dbgs() << " original: " << SI << "\n"); IRBuilderTy IRB(&SI); Value *TV = SI.getTrueValue(); Value *FV = SI.getFalseValue(); // Replace the loads of the select with a select of two loads. while (!SI.use_empty()) { LoadInst *LI = cast<LoadInst>(SI.user_back()); assert(LI->isSimple() && "We only speculate simple loads"); IRB.SetInsertPoint(LI); LoadInst *TL = IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); LoadInst *FL = IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); NumLoadsSpeculated += 2; // Transfer alignment and AA info if present. TL->setAlignment(LI->getAlignment()); FL->setAlignment(LI->getAlignment()); AAMDNodes Tags; LI->getAAMetadata(Tags); if (Tags) { TL->setAAMetadata(Tags); FL->setAAMetadata(Tags); } Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, LI->getName() + ".sroa.speculated"); DEBUG(dbgs() << " speculated to: " << *V << "\n"); LI->replaceAllUsesWith(V); LI->eraseFromParent(); } SI.eraseFromParent(); } /// \brief Build a GEP out of a base pointer and indices. /// /// This will return the BasePtr if that is valid, or build a new GEP /// instruction using the IRBuilder if GEP-ing is needed. static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { if (Indices.empty()) return BasePtr; // A single zero index is a no-op, so check for this and avoid building a GEP // in that case. if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero()) return BasePtr; return IRB.CreateInBoundsGEP(nullptr, BasePtr, Indices, NamePrefix + "sroa_idx"); } /// \brief Get a natural GEP off of the BasePtr walking through Ty toward /// TargetTy without changing the offset of the pointer. /// /// This routine assumes we've already established a properly offset GEP with /// Indices, and arrived at the Ty type. The goal is to continue to GEP with /// zero-indices down through type layers until we find one the same as /// TargetTy. If we can't find one with the same type, we at least try to use /// one with the same size. If none of that works, we just produce the GEP as /// indicated by Indices to have the correct offset. static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, Value *BasePtr, Type *Ty, Type *TargetTy, SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { if (Ty == TargetTy) return buildGEP(IRB, BasePtr, Indices, NamePrefix); // Pointer size to use for the indices. unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType()); // See if we can descend into a struct and locate a field with the correct // type. unsigned NumLayers = 0; Type *ElementTy = Ty; do { if (ElementTy->isPointerTy()) break; if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) { ElementTy = ArrayTy->getElementType(); Indices.push_back(IRB.getIntN(PtrSize, 0)); } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) { ElementTy = VectorTy->getElementType(); Indices.push_back(IRB.getInt32(0)); } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) { if (STy->element_begin() == STy->element_end()) break; // Nothing left to descend into. ElementTy = *STy->element_begin(); Indices.push_back(IRB.getInt32(0)); } else { break; } ++NumLayers; } while (ElementTy != TargetTy); if (ElementTy != TargetTy) Indices.erase(Indices.end() - NumLayers, Indices.end()); return buildGEP(IRB, BasePtr, Indices, NamePrefix); } /// \brief Recursively compute indices for a natural GEP. /// /// This is the recursive step for getNaturalGEPWithOffset that walks down the /// element types adding appropriate indices for the GEP. static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { if (Offset == 0) return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix); // We can't recurse through pointer types. if (Ty->isPointerTy()) return nullptr; // We try to analyze GEPs over vectors here, but note that these GEPs are // extremely poorly defined currently. The long-term goal is to remove GEPing // over a vector from the IR completely. if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) { unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); if (ElementSizeInBits % 8 != 0) { // GEPs over non-multiple of 8 size vector elements are invalid. return nullptr; } APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(VecTy->getNumElements())) return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), Offset, TargetTy, Indices, NamePrefix); } if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { Type *ElementTy = ArrTy->getElementType(); APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(ArrTy->getNumElements())) return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, Indices, NamePrefix); } StructType *STy = dyn_cast<StructType>(Ty); if (!STy) return nullptr; const StructLayout *SL = DL.getStructLayout(STy); uint64_t StructOffset = Offset.getZExtValue(); if (StructOffset >= SL->getSizeInBytes()) return nullptr; unsigned Index = SL->getElementContainingOffset(StructOffset); Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); Type *ElementTy = STy->getElementType(Index); if (Offset.uge(DL.getTypeAllocSize(ElementTy))) return nullptr; // The offset points into alignment padding. Indices.push_back(IRB.getInt32(Index)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, Indices, NamePrefix); } /// \brief Get a natural GEP from a base pointer to a particular offset and /// resulting in a particular type. /// /// The goal is to produce a "natural" looking GEP that works with the existing /// composite types to arrive at the appropriate offset and element type for /// a pointer. TargetTy is the element type the returned GEP should point-to if /// possible. We recurse by decreasing Offset, adding the appropriate index to /// Indices, and setting Ty to the result subtype. /// /// If no natural GEP can be constructed, this function returns null. static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *TargetTy, SmallVectorImpl<Value *> &Indices, Twine NamePrefix) { PointerType *Ty = cast<PointerType>(Ptr->getType()); // Don't consider any GEPs through an i8* as natural unless the TargetTy is // an i8. if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) return nullptr; Type *ElementTy = Ty->getElementType(); if (!ElementTy->isSized()) return nullptr; // We can't GEP through an unsized element. APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); if (ElementSize == 0) return nullptr; // Zero-length arrays can't help us build a natural GEP. APInt NumSkippedElements = Offset.sdiv(ElementSize); Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, Indices, NamePrefix); } /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the /// resulting pointer has PointerTy. /// /// This tries very hard to compute a "natural" GEP which arrives at the offset /// and produces the pointer type desired. Where it cannot, it will try to use /// the natural GEP to arrive at the offset and bitcast to the type. Where that /// fails, it will try to use an existing i8* and GEP to the byte offset and /// bitcast to the type. /// /// The strategy for finding the more natural GEPs is to peel off layers of the /// pointer, walking back through bit casts and GEPs, searching for a base /// pointer from which we can compute a natural GEP with the desired /// properties. The algorithm tries to fold as many constant indices into /// a single GEP as possible, thus making each GEP more independent of the /// surrounding code. static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *PointerTy, Twine NamePrefix) { // Even though we don't look through PHI nodes, we could be called on an // instruction in an unreachable block, which may be on a cycle. SmallPtrSet<Value *, 4> Visited; Visited.insert(Ptr); SmallVector<Value *, 4> Indices; // We may end up computing an offset pointer that has the wrong type. If we // never are able to compute one directly that has the correct type, we'll // fall back to it, so keep it and the base it was computed from around here. Value *OffsetPtr = nullptr; Value *OffsetBasePtr; // Remember any i8 pointer we come across to re-use if we need to do a raw // byte offset. Value *Int8Ptr = nullptr; APInt Int8PtrOffset(Offset.getBitWidth(), 0); Type *TargetTy = PointerTy->getPointerElementType(); do { // First fold any existing GEPs into the offset. while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { APInt GEPOffset(Offset.getBitWidth(), 0); if (!GEP->accumulateConstantOffset(DL, GEPOffset)) break; Offset += GEPOffset; Ptr = GEP->getPointerOperand(); if (!Visited.insert(Ptr).second) break; } // See if we can perform a natural GEP here. Indices.clear(); if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, Indices, NamePrefix)) { // If we have a new natural pointer at the offset, clear out any old // offset pointer we computed. Unless it is the base pointer or // a non-instruction, we built a GEP we don't need. Zap it. if (OffsetPtr && OffsetPtr != OffsetBasePtr) if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) { assert(I->use_empty() && "Built a GEP with uses some how!"); I->eraseFromParent(); } OffsetPtr = P; OffsetBasePtr = Ptr; // If we also found a pointer of the right type, we're done. if (P->getType() == PointerTy) return P; } // Stash this pointer if we've found an i8*. if (Ptr->getType()->isIntegerTy(8)) { Int8Ptr = Ptr; Int8PtrOffset = Offset; } // Peel off a layer of the pointer and update the offset appropriately. if (Operator::getOpcode(Ptr) == Instruction::BitCast) { Ptr = cast<Operator>(Ptr)->getOperand(0); } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { if (GA->mayBeOverridden()) break; Ptr = GA->getAliasee(); } else { break; } assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); } while (Visited.insert(Ptr).second); if (!OffsetPtr) { if (!Int8Ptr) { Int8Ptr = IRB.CreateBitCast( Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), NamePrefix + "sroa_raw_cast"); Int8PtrOffset = Offset; } OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr, IRB.getInt(Int8PtrOffset), NamePrefix + "sroa_raw_idx"); } Ptr = OffsetPtr; // On the off chance we were targeting i8*, guard the bitcast here. if (Ptr->getType() != PointerTy) Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast"); return Ptr; } /// \brief Compute the adjusted alignment for a load or store from an offset. static unsigned getAdjustedAlignment(Instruction *I, uint64_t Offset, const DataLayout &DL) { unsigned Alignment; Type *Ty; if (auto *LI = dyn_cast<LoadInst>(I)) { Alignment = LI->getAlignment(); Ty = LI->getType(); } else if (auto *SI = dyn_cast<StoreInst>(I)) { Alignment = SI->getAlignment(); Ty = SI->getValueOperand()->getType(); } else { llvm_unreachable("Only loads and stores are allowed!"); } if (!Alignment) Alignment = DL.getABITypeAlignment(Ty); return MinAlign(Alignment, Offset); } /// \brief Test whether we can convert a value from the old to the new type. /// /// This predicate should be used to guard calls to convertValue in order to /// ensure that we only try to convert viable values. The strategy is that we /// will peel off single element struct and array wrappings to get to an /// underlying value, and convert that value. static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { if (OldTy == NewTy) return true; // For integer types, we can't handle any bit-width differences. This would // break both vector conversions with extension and introduce endianness // issues when in conjunction with loads and stores. if (isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) { assert(cast<IntegerType>(OldTy)->getBitWidth() != cast<IntegerType>(NewTy)->getBitWidth() && "We can't have the same bitwidth for different int types"); return false; } if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) return false; if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) return false; // We can convert pointers to integers and vice-versa. Same for vectors // of pointers and integers. OldTy = OldTy->getScalarType(); NewTy = NewTy->getScalarType(); if (NewTy->isPointerTy() || OldTy->isPointerTy()) { if (NewTy->isPointerTy() && OldTy->isPointerTy()) return true; if (NewTy->isIntegerTy() || OldTy->isIntegerTy()) return true; return false; } return true; } /// \brief Generic routine to convert an SSA value to a value of a different /// type. /// /// This will try various different casting techniques, such as bitcasts, /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test /// two types for viability with this routine. static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, Type *NewTy) { Type *OldTy = V->getType(); assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type"); if (OldTy == NewTy) return V; assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) && "Integer types must be the exact same to convert."); // See if we need inttoptr for this type pair. A cast involving both scalars // and vectors requires and additional bitcast. if (OldTy->getScalarType()->isIntegerTy() && NewTy->getScalarType()->isPointerTy()) { // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8* if (OldTy->isVectorTy() && !NewTy->isVectorTy()) return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), NewTy); // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*> if (!OldTy->isVectorTy() && NewTy->isVectorTy()) return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)), NewTy); return IRB.CreateIntToPtr(V, NewTy); } // See if we need ptrtoint for this type pair. A cast involving both scalars // and vectors requires and additional bitcast. if (OldTy->getScalarType()->isPointerTy() && NewTy->getScalarType()->isIntegerTy()) { // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128 if (OldTy->isVectorTy() && !NewTy->isVectorTy()) return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), NewTy); // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32> if (!OldTy->isVectorTy() && NewTy->isVectorTy()) return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)), NewTy); return IRB.CreatePtrToInt(V, NewTy); } return IRB.CreateBitCast(V, NewTy); } /// \brief Test whether the given slice use can be promoted to a vector. /// /// This function is called to test each entry in a partioning which is slated /// for a single slice. static bool isVectorPromotionViableForSlice(AllocaSlices::Partition &P, const Slice &S, VectorType *Ty, uint64_t ElementSize, const DataLayout &DL) { // First validate the slice offsets. uint64_t BeginOffset = std::max(S.beginOffset(), P.beginOffset()) - P.beginOffset(); uint64_t BeginIndex = BeginOffset / ElementSize; if (BeginIndex * ElementSize != BeginOffset || BeginIndex >= Ty->getNumElements()) return false; uint64_t EndOffset = std::min(S.endOffset(), P.endOffset()) - P.beginOffset(); uint64_t EndIndex = EndOffset / ElementSize; if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements()) return false; assert(EndIndex > BeginIndex && "Empty vector!"); uint64_t NumElements = EndIndex - BeginIndex; Type *SliceTy = (NumElements == 1) ? Ty->getElementType() : VectorType::get(Ty->getElementType(), NumElements); Type *SplitIntTy = Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8); Use *U = S.getUse(); if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { if (MI->isVolatile()) return false; if (!S.isSplittable()) return false; // Skip any unsplittable intrinsics. } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { if (II->getIntrinsicID() != Intrinsic::lifetime_start && II->getIntrinsicID() != Intrinsic::lifetime_end) return false; } else if (U->get()->getType()->getPointerElementType()->isStructTy()) { // Disable vector promotion when there are loads or stores of an FCA. return false; } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { if (LI->isVolatile()) return false; Type *LTy = LI->getType(); if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { assert(LTy->isIntegerTy()); LTy = SplitIntTy; } if (!canConvertValue(DL, SliceTy, LTy)) return false; } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { if (SI->isVolatile()) return false; Type *STy = SI->getValueOperand()->getType(); if (P.beginOffset() > S.beginOffset() || P.endOffset() < S.endOffset()) { assert(STy->isIntegerTy()); STy = SplitIntTy; } if (!canConvertValue(DL, STy, SliceTy)) return false; } else { return false; } return true; } /// \brief Test whether the given alloca partitioning and range of slices can be /// promoted to a vector. /// /// This is a quick test to check whether we can rewrite a particular alloca /// partition (and its newly formed alloca) into a vector alloca with only /// whole-vector loads and stores such that it could be promoted to a vector /// SSA value. We only can ensure this for a limited set of operations, and we /// don't want to do the rewrites unless we are confident that the result will /// be promotable, so we have an early test here. static VectorType *isVectorPromotionViable(AllocaSlices::Partition &P, const DataLayout &DL) { // Collect the candidate types for vector-based promotion. Also track whether // we have different element types. SmallVector<VectorType *, 4> CandidateTys; Type *CommonEltTy = nullptr; bool HaveCommonEltTy = true; auto CheckCandidateType = [&](Type *Ty) { if (auto *VTy = dyn_cast<VectorType>(Ty)) { CandidateTys.push_back(VTy); if (!CommonEltTy) CommonEltTy = VTy->getElementType(); else if (CommonEltTy != VTy->getElementType()) HaveCommonEltTy = false; } }; // Consider any loads or stores that are the exact size of the slice. for (const Slice &S : P) if (S.beginOffset() == P.beginOffset() && S.endOffset() == P.endOffset()) { if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser())) CheckCandidateType(LI->getType()); else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) CheckCandidateType(SI->getValueOperand()->getType()); } // If we didn't find a vector type, nothing to do here. if (CandidateTys.empty()) return nullptr; // Remove non-integer vector types if we had multiple common element types. // FIXME: It'd be nice to replace them with integer vector types, but we can't // do that until all the backends are known to produce good code for all // integer vector types. if (!HaveCommonEltTy) { CandidateTys.erase(std::remove_if(CandidateTys.begin(), CandidateTys.end(), [](VectorType *VTy) { return !VTy->getElementType()->isIntegerTy(); }), CandidateTys.end()); // If there were no integer vector types, give up. if (CandidateTys.empty()) return nullptr; // Rank the remaining candidate vector types. This is easy because we know // they're all integer vectors. We sort by ascending number of elements. auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) && "Cannot have vector types of different sizes!"); assert(RHSTy->getElementType()->isIntegerTy() && "All non-integer types eliminated!"); assert(LHSTy->getElementType()->isIntegerTy() && "All non-integer types eliminated!"); (void)DL;// HLSL Change - unused var return RHSTy->getNumElements() < LHSTy->getNumElements(); }; std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes); CandidateTys.erase( std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), CandidateTys.end()); } else { // The only way to have the same element type in every vector type is to // have the same vector type. Check that and remove all but one. #ifndef NDEBUG for (VectorType *VTy : CandidateTys) { assert(VTy->getElementType() == CommonEltTy && "Unaccounted for element type!"); assert(VTy == CandidateTys[0] && "Different vector types with the same element type!"); } #endif CandidateTys.resize(1); } // Try each vector type, and return the one which works. auto CheckVectorTypeForPromotion = [&](VectorType *VTy) { uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType()); // While the definition of LLVM vectors is bitpacked, we don't support sizes // that aren't byte sized. if (ElementSize % 8) return false; assert((DL.getTypeSizeInBits(VTy) % 8) == 0 && "vector size not a multiple of element size?"); ElementSize /= 8; for (const Slice &S : P) if (!isVectorPromotionViableForSlice(P, S, VTy, ElementSize, DL)) return false; for (const Slice *S : P.splitSliceTails()) if (!isVectorPromotionViableForSlice(P, *S, VTy, ElementSize, DL)) return false; return true; }; for (VectorType *VTy : CandidateTys) if (CheckVectorTypeForPromotion(VTy)) return VTy; return nullptr; } /// \brief Test whether a slice of an alloca is valid for integer widening. /// /// This implements the necessary checking for the \c isIntegerWideningViable /// test below on a single slice of the alloca. static bool isIntegerWideningViableForSlice(const Slice &S, uint64_t AllocBeginOffset, Type *AllocaTy, const DataLayout &DL, bool &WholeAllocaOp) { uint64_t Size = DL.getTypeStoreSize(AllocaTy); uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; uint64_t RelEnd = S.endOffset() - AllocBeginOffset; // We can't reasonably handle cases where the load or store extends past // the end of the aloca's type and into its padding. if (RelEnd > Size) return false; Use *U = S.getUse(); if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) { if (LI->isVolatile()) return false; // We can't handle loads that extend past the allocated memory. if (DL.getTypeStoreSize(LI->getType()) > Size) return false; // Note that we don't count vector loads or stores as whole-alloca // operations which enable integer widening because we would prefer to use // vector widening instead. if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size) WholeAllocaOp = true; if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) return false; } else if (RelBegin != 0 || RelEnd != Size || !canConvertValue(DL, AllocaTy, LI->getType())) { // Non-integer loads need to be convertible from the alloca type so that // they are promotable. return false; } } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) { Type *ValueTy = SI->getValueOperand()->getType(); if (SI->isVolatile()) return false; // We can't handle stores that extend past the allocated memory. if (DL.getTypeStoreSize(ValueTy) > Size) return false; // Note that we don't count vector loads or stores as whole-alloca // operations which enable integer widening because we would prefer to use // vector widening instead. if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size) WholeAllocaOp = true; if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) { if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy)) return false; } else if (RelBegin != 0 || RelEnd != Size || !canConvertValue(DL, ValueTy, AllocaTy)) { // Non-integer stores need to be convertible to the alloca type so that // they are promotable. return false; } } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) { if (MI->isVolatile() || !isa<Constant>(MI->getLength())) return false; if (!S.isSplittable()) return false; // Skip any unsplittable intrinsics. } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) { if (II->getIntrinsicID() != Intrinsic::lifetime_start && II->getIntrinsicID() != Intrinsic::lifetime_end) return false; } else { return false; } return true; } /// \brief Test whether the given alloca partition's integer operations can be /// widened to promotable ones. /// /// This is a quick test to check whether we can rewrite the integer loads and /// stores to a particular alloca into wider loads and stores and be able to /// promote the resulting alloca. static bool isIntegerWideningViable(AllocaSlices::Partition &P, Type *AllocaTy, const DataLayout &DL) { uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy); // Don't create integer types larger than the maximum bitwidth. if (SizeInBits > IntegerType::MAX_INT_BITS) return false; // Don't try to handle allocas with bit-padding. if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy)) return false; // We need to ensure that an integer type with the appropriate bitwidth can // be converted to the alloca type, whatever that is. We don't want to force // the alloca itself to have an integer type if there is a more suitable one. Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); if (!canConvertValue(DL, AllocaTy, IntTy) || !canConvertValue(DL, IntTy, AllocaTy)) return false; // While examining uses, we ensure that the alloca has a covering load or // store. We don't want to widen the integer operations only to fail to // promote due to some other unsplittable entry (which we may make splittable // later). However, if there are only splittable uses, go ahead and assume // that we cover the alloca. // FIXME: We shouldn't consider split slices that happen to start in the // partition here... bool WholeAllocaOp = P.begin() != P.end() ? false : DL.isLegalInteger(SizeInBits); for (const Slice &S : P) if (!isIntegerWideningViableForSlice(S, P.beginOffset(), AllocaTy, DL, WholeAllocaOp)) return false; for (const Slice *S : P.splitSliceTails()) if (!isIntegerWideningViableForSlice(*S, P.beginOffset(), AllocaTy, DL, WholeAllocaOp)) return false; return WholeAllocaOp; } static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V, IntegerType *Ty, uint64_t Offset, const Twine &Name) { DEBUG(dbgs() << " start: " << *V << "\n"); IntegerType *IntTy = cast<IntegerType>(V->getType()); assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && "Element extends past full value"); uint64_t ShAmt = 8 * Offset; if (DL.isBigEndian()) ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); if (ShAmt) { V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); DEBUG(dbgs() << " shifted: " << *V << "\n"); } assert(Ty->getBitWidth() <= IntTy->getBitWidth() && "Cannot extract to a larger integer!"); if (Ty != IntTy) { V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); DEBUG(dbgs() << " trunced: " << *V << "\n"); } return V; } static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old, Value *V, uint64_t Offset, const Twine &Name) { IntegerType *IntTy = cast<IntegerType>(Old->getType()); IntegerType *Ty = cast<IntegerType>(V->getType()); assert(Ty->getBitWidth() <= IntTy->getBitWidth() && "Cannot insert a larger integer!"); DEBUG(dbgs() << " start: " << *V << "\n"); if (Ty != IntTy) { V = IRB.CreateZExt(V, IntTy, Name + ".ext"); DEBUG(dbgs() << " extended: " << *V << "\n"); } assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && "Element store outside of alloca store"); uint64_t ShAmt = 8 * Offset; if (DL.isBigEndian()) ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); if (ShAmt) { V = IRB.CreateShl(V, ShAmt, Name + ".shift"); DEBUG(dbgs() << " shifted: " << *V << "\n"); } if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); DEBUG(dbgs() << " masked: " << *Old << "\n"); V = IRB.CreateOr(Old, V, Name + ".insert"); DEBUG(dbgs() << " inserted: " << *V << "\n"); } return V; } static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, unsigned EndIndex, const Twine &Name) { VectorType *VecTy = cast<VectorType>(V->getType()); unsigned NumElements = EndIndex - BeginIndex; assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); if (NumElements == VecTy->getNumElements()) return V; if (NumElements == 1) { V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex), Name + ".extract"); DEBUG(dbgs() << " extract: " << *V << "\n"); return V; } SmallVector<Constant *, 8> Mask; Mask.reserve(NumElements); for (unsigned i = BeginIndex; i != EndIndex; ++i) Mask.push_back(IRB.getInt32(i)); V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), ConstantVector::get(Mask), Name + ".extract"); DEBUG(dbgs() << " shuffle: " << *V << "\n"); return V; } static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V, unsigned BeginIndex, const Twine &Name) { VectorType *VecTy = cast<VectorType>(Old->getType()); assert(VecTy && "Can only insert a vector into a vector"); VectorType *Ty = dyn_cast<VectorType>(V->getType()); if (!Ty) { // Single element to insert. V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex), Name + ".insert"); DEBUG(dbgs() << " insert: " << *V << "\n"); return V; } assert(Ty->getNumElements() <= VecTy->getNumElements() && "Too many elements!"); if (Ty->getNumElements() == VecTy->getNumElements()) { assert(V->getType() == VecTy && "Vector type mismatch"); return V; } unsigned EndIndex = BeginIndex + Ty->getNumElements(); // When inserting a smaller vector into the larger to store, we first // use a shuffle vector to widen it with undef elements, and then // a second shuffle vector to select between the loaded vector and the // incoming vector. SmallVector<Constant *, 8> Mask; Mask.reserve(VecTy->getNumElements()); for (unsigned i = 0; i != VecTy->getNumElements(); ++i) if (i >= BeginIndex && i < EndIndex) Mask.push_back(IRB.getInt32(i - BeginIndex)); else Mask.push_back(UndefValue::get(IRB.getInt32Ty())); V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()), ConstantVector::get(Mask), Name + ".expand"); DEBUG(dbgs() << " shuffle: " << *V << "\n"); Mask.clear(); for (unsigned i = 0; i != VecTy->getNumElements(); ++i) Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex)); V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend"); DEBUG(dbgs() << " blend: " << *V << "\n"); return V; } namespace { /// \brief Visitor to rewrite instructions using p particular slice of an alloca /// to use a new alloca. /// /// Also implements the rewriting to vector-based accesses when the partition /// passes the isVectorPromotionViable predicate. Most of the rewriting logic /// lives here. class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> { // Befriend the base class so it can delegate to private visit methods. friend class llvm::InstVisitor<AllocaSliceRewriter, bool>; typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base; const DataLayout &DL; AllocaSlices &AS; SROA &Pass; AllocaInst &OldAI, &NewAI; const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; Type *NewAllocaTy; // This is a convenience and flag variable that will be null unless the new // alloca's integer operations should be widened to this integer type due to // passing isIntegerWideningViable above. If it is non-null, the desired // integer type will be stored here for easy access during rewriting. IntegerType *IntTy; // If we are rewriting an alloca partition which can be written as pure // vector operations, we stash extra information here. When VecTy is // non-null, we have some strict guarantees about the rewritten alloca: // - The new alloca is exactly the size of the vector type here. // - The accesses all either map to the entire vector or to a single // element. // - The set of accessing instructions is only one of those handled above // in isVectorPromotionViable. Generally these are the same access kinds // which are promotable via mem2reg. VectorType *VecTy; Type *ElementTy; uint64_t ElementSize; // The original offset of the slice currently being rewritten relative to // the original alloca. uint64_t BeginOffset, EndOffset; // The new offsets of the slice currently being rewritten relative to the // original alloca. uint64_t NewBeginOffset, NewEndOffset; uint64_t SliceSize; bool IsSplittable; bool IsSplit; Use *OldUse; Instruction *OldPtr; // Track post-rewrite users which are PHI nodes and Selects. SmallPtrSetImpl<PHINode *> &PHIUsers; SmallPtrSetImpl<SelectInst *> &SelectUsers; // Utility IR builder, whose name prefix is setup for each visited use, and // the insertion point is set to point to the user. IRBuilderTy IRB; public: AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI, uint64_t NewAllocaBeginOffset, uint64_t NewAllocaEndOffset, bool IsIntegerPromotable, VectorType *PromotableVecTy, SmallPtrSetImpl<PHINode *> &PHIUsers, SmallPtrSetImpl<SelectInst *> &SelectUsers) : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI), NewAllocaBeginOffset(NewAllocaBeginOffset), NewAllocaEndOffset(NewAllocaEndOffset), NewAllocaTy(NewAI.getAllocatedType()), IntTy(IsIntegerPromotable ? Type::getIntNTy( NewAI.getContext(), DL.getTypeSizeInBits(NewAI.getAllocatedType())) : nullptr), VecTy(PromotableVecTy), ElementTy(VecTy ? VecTy->getElementType() : nullptr), ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(), OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers), IRB(NewAI.getContext(), ConstantFolder()) { if (VecTy) { assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 && "Only multiple-of-8 sized vector elements are viable"); ++NumVectorized; } assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy)); } bool visit(AllocaSlices::const_iterator I) { bool CanSROA = true; BeginOffset = I->beginOffset(); EndOffset = I->endOffset(); IsSplittable = I->isSplittable(); IsSplit = BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; DEBUG(dbgs() << " rewriting " << (IsSplit ? "split " : "")); DEBUG(AS.printSlice(dbgs(), I, "")); DEBUG(dbgs() << "\n"); // Compute the intersecting offset range. assert(BeginOffset < NewAllocaEndOffset); assert(EndOffset > NewAllocaBeginOffset); NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); SliceSize = NewEndOffset - NewBeginOffset; OldUse = I->getUse(); OldPtr = cast<Instruction>(OldUse->get()); Instruction *OldUserI = cast<Instruction>(OldUse->getUser()); IRB.SetInsertPoint(OldUserI); IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc()); IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + "."); CanSROA &= visit(cast<Instruction>(OldUse->getUser())); if (VecTy || IntTy) assert(CanSROA); return CanSROA; } private: // Make sure the other visit overloads are visible. using Base::visit; // Every instruction which can end up as a user must have a rewrite rule. bool visitInstruction(Instruction &I) { DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); llvm_unreachable("No rewrite rule for this instruction!"); } Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { // Note that the offset computation can use BeginOffset or NewBeginOffset // interchangeably for unsplit slices. assert(IsSplit || BeginOffset == NewBeginOffset); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; #ifndef NDEBUG StringRef OldName = OldPtr->getName(); // Skip through the last '.sroa.' component of the name. size_t LastSROAPrefix = OldName.rfind(".sroa."); if (LastSROAPrefix != StringRef::npos) { OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); // Look for an SROA slice index. size_t IndexEnd = OldName.find_first_not_of("0123456789"); if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { // Strip the index and look for the offset. OldName = OldName.substr(IndexEnd + 1); size_t OffsetEnd = OldName.find_first_not_of("0123456789"); if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') // Strip the offset. OldName = OldName.substr(OffsetEnd + 1); } } // Strip any SROA suffixes as well. OldName = OldName.substr(0, OldName.find(".sroa_")); #endif return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(), Offset), PointerTy, #ifndef NDEBUG Twine(OldName) + "." #else Twine() #endif ); } /// \brief Compute suitable alignment to access this slice of the *new* /// alloca. /// /// You can optionally pass a type to this routine and if that type's ABI /// alignment is itself suitable, this will return zero. unsigned getSliceAlign(Type *Ty = nullptr) { unsigned NewAIAlign = NewAI.getAlignment(); if (!NewAIAlign) NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; } unsigned getIndex(uint64_t Offset) { assert(VecTy && "Can only call getIndex when rewriting a vector"); uint64_t RelOffset = Offset - NewAllocaBeginOffset; assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); uint32_t Index = RelOffset / ElementSize; assert(Index * ElementSize == RelOffset); return Index; } void deleteIfTriviallyDead(Value *V) { Instruction *I = cast<Instruction>(V); if (isInstructionTriviallyDead(I)) Pass.DeadInsts.insert(I); } Value *rewriteVectorizedLoadInst() { unsigned BeginIndex = getIndex(NewBeginOffset); unsigned EndIndex = getIndex(NewEndOffset); assert(EndIndex > BeginIndex && "Empty vector!"); Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); } Value *rewriteIntegerLoad(LoadInst &LI) { assert(IntTy && "We cannot insert an integer to the alloca"); assert(!LI.isVolatile()); Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); V = convertValue(DL, IRB, V, IntTy); assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; if (Offset > 0 || NewEndOffset < NewAllocaEndOffset) V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset, "extract"); return V; } bool visitLoadInst(LoadInst &LI) { DEBUG(dbgs() << " original: " << LI << "\n"); Value *OldOp = LI.getOperand(0); assert(OldOp == OldPtr); Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) : LI.getType(); const bool IsLoadPastEnd = DL.getTypeStoreSize(TargetTy) > SliceSize; bool IsPtrAdjusted = false; Value *V; if (VecTy) { V = rewriteVectorizedLoadInst(); } else if (IntTy && LI.getType()->isIntegerTy()) { V = rewriteIntegerLoad(LI); } else if (NewBeginOffset == NewAllocaBeginOffset && NewEndOffset == NewAllocaEndOffset && (canConvertValue(DL, NewAllocaTy, TargetTy) || (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && TargetTy->isIntegerTy()))) { LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), LI.isVolatile(), LI.getName()); if (LI.isVolatile()) NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); V = NewLI; // If this is an integer load past the end of the slice (which means the // bytes outside the slice are undef or this load is dead) just forcibly // fix the integer size with correct handling of endianness. if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) if (auto *TITy = dyn_cast<IntegerType>(TargetTy)) if (AITy->getBitWidth() < TITy->getBitWidth()) { V = IRB.CreateZExt(V, TITy, "load.ext"); if (DL.isBigEndian()) V = IRB.CreateShl(V, TITy->getBitWidth() - AITy->getBitWidth(), "endian_shift"); } } else { Type *LTy = TargetTy->getPointerTo(); LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy), LI.isVolatile(), LI.getName()); if (LI.isVolatile()) NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope()); V = NewLI; IsPtrAdjusted = true; } V = convertValue(DL, IRB, V, TargetTy); if (IsSplit) { assert(!LI.isVolatile()); assert(LI.getType()->isIntegerTy() && "Only integer type loads and stores are split"); assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && "Split load isn't smaller than original load"); assert(LI.getType()->getIntegerBitWidth() == DL.getTypeStoreSizeInBits(LI.getType()) && "Non-byte-multiple bit width"); // Move the insertion point just past the load so that we can refer to it. IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI))); // Create a placeholder value with the same type as LI to use as the // basis for the new value. This allows us to replace the uses of LI with // the computed value, and then replace the placeholder with LI, leaving // LI only used for this computation. Value *Placeholder = new LoadInst(UndefValue::get(LI.getType()->getPointerTo())); V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, "insert"); LI.replaceAllUsesWith(V); Placeholder->replaceAllUsesWith(&LI); delete Placeholder; } else { LI.replaceAllUsesWith(V); } Pass.DeadInsts.insert(&LI); deleteIfTriviallyDead(OldOp); DEBUG(dbgs() << " to: " << *V << "\n"); return !LI.isVolatile() && !IsPtrAdjusted; } bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) { if (V->getType() != VecTy) { unsigned BeginIndex = getIndex(NewBeginOffset); unsigned EndIndex = getIndex(NewEndOffset); assert(EndIndex > BeginIndex && "Empty vector!"); unsigned NumElements = EndIndex - BeginIndex; assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); Type *SliceTy = (NumElements == 1) ? ElementTy : VectorType::get(ElementTy, NumElements); if (V->getType() != SliceTy) V = convertValue(DL, IRB, V, SliceTy); // Mix in the existing elements. Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); V = insertVector(IRB, Old, V, BeginIndex, "vec"); } StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); Pass.DeadInsts.insert(&SI); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return true; } bool rewriteIntegerStore(Value *V, StoreInst &SI) { assert(IntTy && "We cannot extract an integer from the alloca"); assert(!SI.isVolatile()); if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); Old = convertValue(DL, IRB, Old, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset, "insert"); } V = convertValue(DL, IRB, V, NewAllocaTy); StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); Pass.DeadInsts.insert(&SI); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return true; } bool visitStoreInst(StoreInst &SI) { DEBUG(dbgs() << " original: " << SI << "\n"); Value *OldOp = SI.getOperand(1); assert(OldOp == OldPtr); Value *V = SI.getValueOperand(); // Strip all inbounds GEPs and pointer casts to try to dig out any root // alloca that should be re-examined after promoting this alloca. if (V->getType()->isPointerTy()) if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets())) Pass.PostPromotionWorklist.insert(AI); if (SliceSize < DL.getTypeStoreSize(V->getType())) { assert(!SI.isVolatile()); assert(V->getType()->isIntegerTy() && "Only integer type loads and stores are split"); assert(V->getType()->getIntegerBitWidth() == DL.getTypeStoreSizeInBits(V->getType()) && "Non-byte-multiple bit width"); IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset - BeginOffset, "extract"); } if (VecTy) return rewriteVectorizedStoreInst(V, SI, OldOp); if (IntTy && V->getType()->isIntegerTy()) return rewriteIntegerStore(V, SI); const bool IsStorePastEnd = DL.getTypeStoreSize(V->getType()) > SliceSize; StoreInst *NewSI; if (NewBeginOffset == NewAllocaBeginOffset && NewEndOffset == NewAllocaEndOffset && (canConvertValue(DL, V->getType(), NewAllocaTy) || (IsStorePastEnd && NewAllocaTy->isIntegerTy() && V->getType()->isIntegerTy()))) { // If this is an integer store past the end of slice (and thus the bytes // past that point are irrelevant or this is unreachable), truncate the // value prior to storing. if (auto *VITy = dyn_cast<IntegerType>(V->getType())) if (auto *AITy = dyn_cast<IntegerType>(NewAllocaTy)) if (VITy->getBitWidth() > AITy->getBitWidth()) { if (DL.isBigEndian()) V = IRB.CreateLShr(V, VITy->getBitWidth() - AITy->getBitWidth(), "endian_shift"); V = IRB.CreateTrunc(V, AITy, "load.trunc"); } V = convertValue(DL, IRB, V, NewAllocaTy); NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), SI.isVolatile()); } else { Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo()); NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), SI.isVolatile()); } if (SI.isVolatile()) NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope()); Pass.DeadInsts.insert(&SI); deleteIfTriviallyDead(OldOp); DEBUG(dbgs() << " to: " << *NewSI << "\n"); return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); } /// \brief Compute an integer value from splatting an i8 across the given /// number of bytes. /// /// Note that this routine assumes an i8 is a byte. If that isn't true, don't /// call this routine. /// FIXME: Heed the advice above. /// /// \param V The i8 value to splat. /// \param Size The number of bytes in the output (assuming i8 is one byte) Value *getIntegerSplat(Value *V, unsigned Size) { assert(Size > 0 && "Expected a positive number of bytes."); IntegerType *VTy = cast<IntegerType>(V->getType()); assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte"); if (Size == 1) return V; Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size * 8); V = IRB.CreateMul( IRB.CreateZExt(V, SplatIntTy, "zext"), ConstantExpr::getUDiv( Constant::getAllOnesValue(SplatIntTy), ConstantExpr::getZExt(Constant::getAllOnesValue(V->getType()), SplatIntTy)), "isplat"); return V; } /// \brief Compute a vector splat for a given element value. Value *getVectorSplat(Value *V, unsigned NumElements) { V = IRB.CreateVectorSplat(NumElements, V, "vsplat"); DEBUG(dbgs() << " splat: " << *V << "\n"); return V; } bool visitMemSetInst(MemSetInst &II) { DEBUG(dbgs() << " original: " << II << "\n"); assert(II.getRawDest() == OldPtr); // If the memset has a variable size, it cannot be split, just adjust the // pointer to the new alloca. if (!isa<Constant>(II.getLength())) { assert(!IsSplit); assert(NewBeginOffset == BeginOffset); II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); Type *CstTy = II.getAlignmentCst()->getType(); II.setAlignment(ConstantInt::get(CstTy, getSliceAlign())); deleteIfTriviallyDead(OldPtr); return false; } // Record this instruction for deletion. Pass.DeadInsts.insert(&II); Type *AllocaTy = NewAI.getAllocatedType(); Type *ScalarTy = AllocaTy->getScalarType(); // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memset. if (!VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || SliceSize != DL.getTypeStoreSize(AllocaTy) || !AllocaTy->isSingleValueType() || !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) || DL.getTypeSizeInBits(ScalarTy) % 8 != 0)) { Type *SizeTy = II.getLength()->getType(); Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); CallInst *New = IRB.CreateMemSet( getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, getSliceAlign(), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; } // If we can represent this as a simple value, we have to build the actual // value to store, which requires expanding the byte present in memset to // a sensible representation for the alloca type. This is essentially // splatting the byte to a sufficiently wide integer, splatting it across // any desired vector width, and bitcasting to the final type. Value *V; if (VecTy) { // If this is a memset of a vectorized alloca, insert it. assert(ElementTy == ScalarTy); unsigned BeginIndex = getIndex(NewBeginOffset); unsigned EndIndex = getIndex(NewEndOffset); assert(EndIndex > BeginIndex && "Empty vector!"); unsigned NumElements = EndIndex - BeginIndex; assert(NumElements <= VecTy->getNumElements() && "Too many elements!"); Value *Splat = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8); Splat = convertValue(DL, IRB, Splat, ElementTy); if (NumElements > 1) Splat = getVectorSplat(Splat, NumElements); Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); } else if (IntTy) { // If this is a memset on an alloca where we can widen stores, insert the // set integer. assert(!II.isVolatile()); uint64_t Size = NewEndOffset - NewBeginOffset; V = getIntegerSplat(II.getValue(), Size); if (IntTy && (BeginOffset != NewAllocaBeginOffset || EndOffset != NewAllocaBeginOffset)) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); Old = convertValue(DL, IRB, Old, IntTy); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; V = insertInteger(DL, IRB, Old, V, Offset, "insert"); } else { assert(V->getType() == IntTy && "Wrong type for an alloca wide integer!"); } V = convertValue(DL, IRB, V, AllocaTy); } else { // Established these invariants above. assert(NewBeginOffset == NewAllocaBeginOffset); assert(NewEndOffset == NewAllocaEndOffset); V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8); if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy)) V = getVectorSplat(V, AllocaVecTy->getNumElements()); V = convertValue(DL, IRB, V, AllocaTy); } Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return !II.isVolatile(); } bool visitMemTransferInst(MemTransferInst &II) { // Rewriting of memory transfer instructions can be a bit tricky. We break // them into two categories: split intrinsics and unsplit intrinsics. DEBUG(dbgs() << " original: " << II << "\n"); bool IsDest = &II.getRawDestUse() == OldUse; assert((IsDest && II.getRawDest() == OldPtr) || (!IsDest && II.getRawSource() == OldPtr)); unsigned SliceAlign = getSliceAlign(); // For unsplit intrinsics, we simply modify the source and destination // pointers in place. This isn't just an optimization, it is a matter of // correctness. With unsplit intrinsics we may be dealing with transfers // within a single alloca before SROA ran, or with transfers that have // a variable length. We may also be dealing with memmove instead of // memcpy, and so simply updating the pointers is the necessary for us to // update both source and dest of a single call. if (!IsSplittable) { Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); if (IsDest) II.setDest(AdjustedPtr); else II.setSource(AdjustedPtr); if (II.getAlignment() > SliceAlign) { Type *CstTy = II.getAlignmentCst()->getType(); II.setAlignment( ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign))); } DEBUG(dbgs() << " to: " << II << "\n"); deleteIfTriviallyDead(OldPtr); return false; } // For split transfer intrinsics we have an incredibly useful assurance: // the source and destination do not reside within the same alloca, and at // least one of them does not escape. This means that we can replace // memmove with memcpy, and we don't need to worry about all manner of // downsides to splitting and transforming the operations. // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memcpy. bool EmitMemCpy = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) || !NewAI.getAllocatedType()->isSingleValueType()); // If we're just going to emit a memcpy, the alloca hasn't changed, and the // size hasn't been shrunk based on analysis of the viable range, this is // a no-op. if (EmitMemCpy && &OldAI == &NewAI) { // Ensure the start lines up. assert(NewBeginOffset == BeginOffset); // Rewrite the size as needed. if (NewEndOffset != EndOffset) II.setLength(ConstantInt::get(II.getLength()->getType(), NewEndOffset - NewBeginOffset)); return false; } // Record this instruction for deletion. Pass.DeadInsts.insert(&II); // Strip all inbounds GEPs and pointer casts to try to dig out any root // alloca that should be re-examined after rewriting this instruction. Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); if (AllocaInst *AI = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) { assert(AI != &OldAI && AI != &NewAI && "Splittable transfers cannot reach the same alloca on both ends."); Pass.Worklist.insert(AI); } Type *OtherPtrTy = OtherPtr->getType(); unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); // Compute the relative offset for the other pointer within the transfer. unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS); APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset); unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1, OtherOffset.zextOrTrunc(64).getZExtValue()); if (EmitMemCpy) { // Compute the other pointer, folding as much as possible to produce // a single, simple GEP in most cases. OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, OtherPtr->getName() + "."); Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Type *SizeTy = II.getLength()->getType(); Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); CallInst *New = IRB.CreateMemCpy( IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size, MinAlign(SliceAlign, OtherAlign), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; } bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && NewEndOffset == NewAllocaEndOffset; uint64_t Size = NewEndOffset - NewBeginOffset; unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0; unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; unsigned NumElements = EndIndex - BeginIndex; IntegerType *SubIntTy = IntTy ? Type::getIntNTy(IntTy->getContext(), Size * 8) : nullptr; // Reset the other pointer type to match the register type we're going to // use, but using the address space of the original other pointer. if (VecTy && !IsWholeAlloca) { if (NumElements == 1) OtherPtrTy = VecTy->getElementType(); else OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS); } else if (IntTy && !IsWholeAlloca) { OtherPtrTy = SubIntTy->getPointerTo(OtherAS); } else { OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS); } Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, OtherPtr->getName() + "."); unsigned SrcAlign = OtherAlign; Value *DstPtr = &NewAI; unsigned DstAlign = SliceAlign; if (!IsDest) { std::swap(SrcPtr, DstPtr); std::swap(SrcAlign, DstAlign); } Value *Src; if (VecTy && !IsWholeAlloca && !IsDest) { Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); } else if (IntTy && !IsWholeAlloca && !IsDest) { Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "load"); Src = convertValue(DL, IRB, Src, IntTy); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); } else { Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), "copyload"); } if (VecTy && !IsWholeAlloca && IsDest) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); } else if (IntTy && !IsWholeAlloca && IsDest) { Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), "oldload"); Old = convertValue(DL, IRB, Old, IntTy); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); Src = convertValue(DL, IRB, Src, NewAllocaTy); } StoreInst *Store = cast<StoreInst>( IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return !II.isVolatile(); } bool visitIntrinsicInst(IntrinsicInst &II) { assert(II.getIntrinsicID() == Intrinsic::lifetime_start || II.getIntrinsicID() == Intrinsic::lifetime_end); DEBUG(dbgs() << " original: " << II << "\n"); assert(II.getArgOperand(1) == OldPtr); // Record this instruction for deletion. Pass.DeadInsts.insert(&II); ConstantInt *Size = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()), NewEndOffset - NewBeginOffset); Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Value *New; if (II.getIntrinsicID() == Intrinsic::lifetime_start) New = IRB.CreateLifetimeStart(Ptr, Size); else New = IRB.CreateLifetimeEnd(Ptr, Size); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return true; } bool visitPHINode(PHINode &PN) { DEBUG(dbgs() << " original: " << PN << "\n"); assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable"); assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable"); // We would like to compute a new pointer in only one place, but have it be // as local as possible to the PHI. To do that, we re-use the location of // the old pointer, which necessarily must be in the right position to // dominate the PHI. IRBuilderTy PtrBuilder(IRB); if (isa<PHINode>(OldPtr)) PtrBuilder.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt()); else PtrBuilder.SetInsertPoint(OldPtr); PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); // Replace the operands which were using the old pointer. std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr); DEBUG(dbgs() << " to: " << PN << "\n"); deleteIfTriviallyDead(OldPtr); // PHIs can't be promoted on their own, but often can be speculated. We // check the speculation outside of the rewriter so that we see the // fully-rewritten alloca. PHIUsers.insert(&PN); return true; } bool visitSelectInst(SelectInst &SI) { DEBUG(dbgs() << " original: " << SI << "\n"); assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) && "Pointer isn't an operand!"); assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); // Replace the operands which were using the old pointer. if (SI.getOperand(1) == OldPtr) SI.setOperand(1, NewPtr); if (SI.getOperand(2) == OldPtr) SI.setOperand(2, NewPtr); DEBUG(dbgs() << " to: " << SI << "\n"); deleteIfTriviallyDead(OldPtr); // Selects can't be promoted on their own, but often can be speculated. We // check the speculation outside of the rewriter so that we see the // fully-rewritten alloca. SelectUsers.insert(&SI); return true; } }; } namespace { /// \brief Visitor to rewrite aggregate loads and stores as scalar. /// /// This pass aggressively rewrites all aggregate loads and stores on /// a particular pointer (or any pointer derived from it which we can identify) /// with scalar loads and stores. class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> { // Befriend the base class so it can delegate to private visit methods. friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>; const DataLayout &DL; const bool SkipHLSLMat; // HLSL Change - not sroa matrix type. /// Queue of pointer uses to analyze and potentially rewrite. SmallVector<Use *, 8> Queue; /// Set to prevent us from cycling with phi nodes and loops. SmallPtrSet<User *, 8> Visited; /// The current pointer use being rewritten. This is used to dig up the used /// value (as opposed to the user). Use *U; public: AggLoadStoreRewriter(const DataLayout &DL, const bool SkipHLSLMat) // HLSL Change - not sroa matrix type. : DL(DL), SkipHLSLMat(SkipHLSLMat) {} /// Rewrite loads and stores through a pointer and all pointers derived from /// it. bool rewrite(Instruction &I) { DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); enqueueUsers(I); bool Changed = false; while (!Queue.empty()) { U = Queue.pop_back_val(); Changed |= visit(cast<Instruction>(U->getUser())); } return Changed; } private: /// Enqueue all the users of the given instruction for further processing. /// This uses a set to de-duplicate users. void enqueueUsers(Instruction &I) { for (Use &U : I.uses()) if (Visited.insert(U.getUser()).second) Queue.push_back(&U); } // Conservative default is to not rewrite anything. bool visitInstruction(Instruction &I) { return false; } /// \brief Generic recursive split emission class. template <typename Derived> class OpSplitter { protected: /// The builder used to form new instructions. IRBuilderTy IRB; /// The indices which to be used with insert- or extractvalue to select the /// appropriate value within the aggregate. SmallVector<unsigned, 4> Indices; /// The indices to a GEP instruction which will move Ptr to the correct slot /// within the aggregate. SmallVector<Value *, 4> GEPIndices; /// The base pointer of the original op, used as a base for GEPing the /// split operations. Value *Ptr; /// Initialize the splitter with an insertion point, Ptr and start with a /// single zero GEP index. OpSplitter(Instruction *InsertionPoint, Value *Ptr) : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} public: /// \brief Generic recursive split emission routine. /// /// This method recursively splits an aggregate op (load or store) into /// scalar or vector ops. It splits recursively until it hits a single value /// and emits that single value operation via the template argument. /// /// The logic of this routine relies on GEPs and insertvalue and /// extractvalue all operating with the same fundamental index list, merely /// formatted differently (GEPs need actual values). /// /// \param Ty The type being split recursively into smaller ops. /// \param Agg The aggregate value being built up or stored, depending on /// whether this is splitting a load or a store respectively. void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { if (Ty->isSingleValueType()) return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name); if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { unsigned OldSize = Indices.size(); (void)OldSize; for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; ++Idx) { assert(Indices.size() == OldSize && "Did not return to the old size"); Indices.push_back(Idx); GEPIndices.push_back(IRB.getInt32(Idx)); emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); GEPIndices.pop_back(); Indices.pop_back(); } return; } if (StructType *STy = dyn_cast<StructType>(Ty)) { unsigned OldSize = Indices.size(); (void)OldSize; for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; ++Idx) { assert(Indices.size() == OldSize && "Did not return to the old size"); Indices.push_back(Idx); GEPIndices.push_back(IRB.getInt32(Idx)); emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); GEPIndices.pop_back(); Indices.pop_back(); } return; } llvm_unreachable("Only arrays and structs are aggregate loadable types"); } }; struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> { LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr) : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {} /// Emit a leaf load of a single value. This is called at the leaves of the /// recursive emission to actually load values. void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { assert(Ty->isSingleValueType()); // Load the single value and insert it using the indices. Value *GEP = IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep"); Value *Load = IRB.CreateLoad(GEP, Name + ".load"); Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); DEBUG(dbgs() << " to: " << *Load << "\n"); } }; bool visitLoadInst(LoadInst &LI) { assert(LI.getPointerOperand() == *U); if (!LI.isSimple() || LI.getType()->isSingleValueType()) return false; // HLSL Change Begin - not sroa matrix type. if (SkipHLSLType(LI.getType(), SkipHLSLMat)) return false; // HLSL Change End. // We have an aggregate being loaded, split it apart. DEBUG(dbgs() << " original: " << LI << "\n"); LoadOpSplitter Splitter(&LI, *U); Value *V = UndefValue::get(LI.getType()); Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); LI.replaceAllUsesWith(V); LI.eraseFromParent(); return true; } struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> { StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr) : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {} /// Emit a leaf store of a single value. This is called at the leaves of the /// recursive emission to actually produce stores. void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { assert(Ty->isSingleValueType()); // Extract the single value and store it using the indices. Value *Store = IRB.CreateStore( IRB.CreateExtractValue(Agg, Indices, Name + ".extract"), IRB.CreateInBoundsGEP(nullptr, Ptr, GEPIndices, Name + ".gep")); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); } }; bool visitStoreInst(StoreInst &SI) { if (!SI.isSimple() || SI.getPointerOperand() != *U) return false; Value *V = SI.getValueOperand(); if (V->getType()->isSingleValueType()) return false; // HLSL Change Begin - not sroa matrix type. if (SkipHLSLType(V->getType(), SkipHLSLMat)) return false; // HLSL Change End. // We have an aggregate being stored, split it apart. DEBUG(dbgs() << " original: " << SI << "\n"); StoreOpSplitter Splitter(&SI, *U); Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); SI.eraseFromParent(); return true; } bool visitBitCastInst(BitCastInst &BC) { // HLSL Change Begin - not sroa matrix type. if (SkipHLSLType(BC.getType(), SkipHLSLMat) || SkipHLSLType(BC.getSrcTy(), SkipHLSLMat)) { return false; } // HLSL Change End. enqueueUsers(BC); return false; } bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { enqueueUsers(GEPI); return false; } bool visitPHINode(PHINode &PN) { enqueueUsers(PN); return false; } bool visitSelectInst(SelectInst &SI) { enqueueUsers(SI); return false; } }; } /// \brief Strip aggregate type wrapping. /// /// This removes no-op aggregate types wrapping an underlying type. It will /// strip as many layers of types as it can without changing either the type /// size or the allocated size. static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { if (Ty->isSingleValueType()) return Ty; uint64_t AllocSize = DL.getTypeAllocSize(Ty); uint64_t TypeSize = DL.getTypeSizeInBits(Ty); Type *InnerTy; if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) { InnerTy = ArrTy->getElementType(); } else if (StructType *STy = dyn_cast<StructType>(Ty)) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Index = SL->getElementContainingOffset(0); InnerTy = STy->getElementType(Index); } else { return Ty; } if (AllocSize > DL.getTypeAllocSize(InnerTy) || TypeSize > DL.getTypeSizeInBits(InnerTy)) return Ty; return stripAggregateTypeWrapping(DL, InnerTy); } /// \brief Try to find a partition of the aggregate type passed in for a given /// offset and size. /// /// This recurses through the aggregate type and tries to compute a subtype /// based on the offset and size. When the offset and size span a sub-section /// of an array, it will even compute a new array type for that sub-section, /// and the same for structs. /// /// Note that this routine is very strict and tries to find a partition of the /// type which produces the *exact* right offset and size. It is not forgiving /// when the size or offset cause either end of type-based partition to be off. /// Also, this is a best-effort routine. It is reasonable to give up and not /// return a type if necessary. static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, uint64_t Size) { if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size) return stripAggregateTypeWrapping(DL, Ty); if (Offset > DL.getTypeAllocSize(Ty) || (DL.getTypeAllocSize(Ty) - Offset) < Size) return nullptr; if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) { // We can't partition pointers... if (SeqTy->isPointerTy()) return nullptr; Type *ElementTy = SeqTy->getElementType(); uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); uint64_t NumSkippedElements = Offset / ElementSize; if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) { if (NumSkippedElements >= ArrTy->getNumElements()) return nullptr; } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) { if (NumSkippedElements >= VecTy->getNumElements()) return nullptr; } Offset -= NumSkippedElements * ElementSize; // First check if we need to recurse. if (Offset > 0 || Size < ElementSize) { // Bail if the partition ends in a different array element. if ((Offset + Size) > ElementSize) return nullptr; // Recurse through the element type trying to peel off offset bytes. return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); if (Size == ElementSize) return stripAggregateTypeWrapping(DL, ElementTy); assert(Size > ElementSize); uint64_t NumElements = Size / ElementSize; if (NumElements * ElementSize != Size) return nullptr; return ArrayType::get(ElementTy, NumElements); } StructType *STy = dyn_cast<StructType>(Ty); if (!STy) return nullptr; const StructLayout *SL = DL.getStructLayout(STy); if (Offset >= SL->getSizeInBytes()) return nullptr; uint64_t EndOffset = Offset + Size; if (EndOffset > SL->getSizeInBytes()) return nullptr; unsigned Index = SL->getElementContainingOffset(Offset); Offset -= SL->getElementOffset(Index); Type *ElementTy = STy->getElementType(Index); uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); if (Offset >= ElementSize) return nullptr; // The offset points into alignment padding. // See if any partition must be contained by the element. if (Offset > 0 || Size < ElementSize) { if ((Offset + Size) > ElementSize) return nullptr; return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); if (Size == ElementSize) return stripAggregateTypeWrapping(DL, ElementTy); StructType::element_iterator EI = STy->element_begin() + Index, EE = STy->element_end(); if (EndOffset < SL->getSizeInBytes()) { unsigned EndIndex = SL->getElementContainingOffset(EndOffset); if (Index == EndIndex) return nullptr; // Within a single element and its padding. // Don't try to form "natural" types if the elements don't line up with the // expected size. // FIXME: We could potentially recurse down through the last element in the // sub-struct to find a natural end point. if (SL->getElementOffset(EndIndex) != EndOffset) return nullptr; assert(Index < EndIndex); EE = STy->element_begin() + EndIndex; } // Try to build up a sub-structure. StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE), STy->isPacked()); const StructLayout *SubSL = DL.getStructLayout(SubTy); if (Size != SubSL->getSizeInBytes()) return nullptr; // The sub-struct doesn't have quite the size needed. return SubTy; } /// \brief Pre-split loads and stores to simplify rewriting. /// /// We want to break up the splittable load+store pairs as much as /// possible. This is important to do as a preprocessing step, as once we /// start rewriting the accesses to partitions of the alloca we lose the /// necessary information to correctly split apart paired loads and stores /// which both point into this alloca. The case to consider is something like /// the following: /// /// %a = alloca [12 x i8] /// %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0 /// %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4 /// %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8 /// %iptr1 = bitcast i8* %gep1 to i64* /// %iptr2 = bitcast i8* %gep2 to i64* /// %fptr1 = bitcast i8* %gep1 to float* /// %fptr2 = bitcast i8* %gep2 to float* /// %fptr3 = bitcast i8* %gep3 to float* /// store float 0.0, float* %fptr1 /// store float 1.0, float* %fptr2 /// %v = load i64* %iptr1 /// store i64 %v, i64* %iptr2 /// %f1 = load float* %fptr2 /// %f2 = load float* %fptr3 /// /// Here we want to form 3 partitions of the alloca, each 4 bytes large, and /// promote everything so we recover the 2 SSA values that should have been /// there all along. /// /// \returns true if any changes are made. bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { DEBUG(dbgs() << "Pre-splitting loads and stores\n"); // Track the loads and stores which are candidates for pre-splitting here, in // the order they first appear during the partition scan. These give stable // iteration order and a basis for tracking which loads and stores we // actually split. SmallVector<LoadInst *, 4> Loads; SmallVector<StoreInst *, 4> Stores; // We need to accumulate the splits required of each load or store where we // can find them via a direct lookup. This is important to cross-check loads // and stores against each other. We also track the slice so that we can kill // all the slices that end up split. struct SplitOffsets { Slice *S; std::vector<uint64_t> Splits; }; SmallDenseMap<Instruction *, SplitOffsets, 8> SplitOffsetsMap; // Track loads out of this alloca which cannot, for any reason, be pre-split. // This is important as we also cannot pre-split stores of those loads! // FIXME: This is all pretty gross. It means that we can be more aggressive // in pre-splitting when the load feeding the store happens to come from // a separate alloca. Put another way, the effectiveness of SROA would be // decreased by a frontend which just concatenated all of its local allocas // into one big flat alloca. But defeating such patterns is exactly the job // SROA is tasked with! Sadly, to not have this discrepancy we would have // change store pre-splitting to actually force pre-splitting of the load // that feeds it *and all stores*. That makes pre-splitting much harder, but // maybe it would make it more principled? SmallPtrSet<LoadInst *, 8> UnsplittableLoads; DEBUG(dbgs() << " Searching for candidate loads and stores\n"); for (auto &P : AS.partitions()) { for (Slice &S : P) { Instruction *I = cast<Instruction>(S.getUse()->getUser()); if (!S.isSplittable() ||S.endOffset() <= P.endOffset()) { // If this was a load we have to track that it can't participate in any // pre-splitting! if (auto *LI = dyn_cast<LoadInst>(I)) UnsplittableLoads.insert(LI); continue; } assert(P.endOffset() > S.beginOffset() && "Empty or backwards partition!"); // Determine if this is a pre-splittable slice. if (auto *LI = dyn_cast<LoadInst>(I)) { assert(!LI->isVolatile() && "Cannot split volatile loads!"); // The load must be used exclusively to store into other pointers for // us to be able to arbitrarily pre-split it. The stores must also be // simple to avoid changing semantics. auto IsLoadSimplyStored = [](LoadInst *LI) { for (User *LU : LI->users()) { auto *SI = dyn_cast<StoreInst>(LU); if (!SI || !SI->isSimple()) return false; } return true; }; if (!IsLoadSimplyStored(LI)) { UnsplittableLoads.insert(LI); continue; } Loads.push_back(LI); } else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser())) { if (!SI || S.getUse() != &SI->getOperandUse(SI->getPointerOperandIndex())) continue; auto *StoredLoad = dyn_cast<LoadInst>(SI->getValueOperand()); if (!StoredLoad || !StoredLoad->isSimple()) continue; assert(!SI->isVolatile() && "Cannot split volatile stores!"); Stores.push_back(SI); } else { // Other uses cannot be pre-split. continue; } // Record the initial split. DEBUG(dbgs() << " Candidate: " << *I << "\n"); auto &Offsets = SplitOffsetsMap[I]; assert(Offsets.Splits.empty() && "Should not have splits the first time we see an instruction!"); Offsets.S = &S; Offsets.Splits.push_back(P.endOffset() - S.beginOffset()); } // Now scan the already split slices, and add a split for any of them which // we're going to pre-split. for (Slice *S : P.splitSliceTails()) { auto SplitOffsetsMapI = SplitOffsetsMap.find(cast<Instruction>(S->getUse()->getUser())); if (SplitOffsetsMapI == SplitOffsetsMap.end()) continue; auto &Offsets = SplitOffsetsMapI->second; assert(Offsets.S == S && "Found a mismatched slice!"); assert(!Offsets.Splits.empty() && "Cannot have an empty set of splits on the second partition!"); assert(Offsets.Splits.back() == P.beginOffset() - Offsets.S->beginOffset() && "Previous split does not end where this one begins!"); // Record each split. The last partition's end isn't needed as the size // of the slice dictates that. if (S->endOffset() > P.endOffset()) Offsets.Splits.push_back(P.endOffset() - Offsets.S->beginOffset()); } } // We may have split loads where some of their stores are split stores. For // such loads and stores, we can only pre-split them if their splits exactly // match relative to their starting offset. We have to verify this prior to // any rewriting. Stores.erase( std::remove_if(Stores.begin(), Stores.end(), [&UnsplittableLoads, &SplitOffsetsMap](StoreInst *SI) { // Lookup the load we are storing in our map of split // offsets. auto *LI = cast<LoadInst>(SI->getValueOperand()); // If it was completely unsplittable, then we're done, // and this store can't be pre-split. if (UnsplittableLoads.count(LI)) return true; auto LoadOffsetsI = SplitOffsetsMap.find(LI); if (LoadOffsetsI == SplitOffsetsMap.end()) return false; // Unrelated loads are definitely safe. auto &LoadOffsets = LoadOffsetsI->second; // Now lookup the store's offsets. auto &StoreOffsets = SplitOffsetsMap[SI]; // If the relative offsets of each split in the load and // store match exactly, then we can split them and we // don't need to remove them here. if (LoadOffsets.Splits == StoreOffsets.Splits) return false; DEBUG(dbgs() << " Mismatched splits for load and store:\n" << " " << *LI << "\n" << " " << *SI << "\n"); // We've found a store and load that we need to split // with mismatched relative splits. Just give up on them // and remove both instructions from our list of // candidates. UnsplittableLoads.insert(LI); return true; }), Stores.end()); // Now we have to go *back* through all te stores, because a later store may // have caused an earlier store's load to become unsplittable and if it is // unsplittable for the later store, then we can't rely on it being split in // the earlier store either. Stores.erase(std::remove_if(Stores.begin(), Stores.end(), [&UnsplittableLoads](StoreInst *SI) { auto *LI = cast<LoadInst>(SI->getValueOperand()); return UnsplittableLoads.count(LI); }), Stores.end()); // Once we've established all the loads that can't be split for some reason, // filter any that made it into our list out. Loads.erase(std::remove_if(Loads.begin(), Loads.end(), [&UnsplittableLoads](LoadInst *LI) { return UnsplittableLoads.count(LI); }), Loads.end()); // If no loads or stores are left, there is no pre-splitting to be done for // this alloca. if (Loads.empty() && Stores.empty()) return false; // From here on, we can't fail and will be building new accesses, so rig up // an IR builder. IRBuilderTy IRB(&AI); // Collect the new slices which we will merge into the alloca slices. SmallVector<Slice, 4> NewSlices; // Track any allocas we end up splitting loads and stores for so we iterate // on them. SmallPtrSet<AllocaInst *, 4> ResplitPromotableAllocas; // At this point, we have collected all of the loads and stores we can // pre-split, and the specific splits needed for them. We actually do the // splitting in a specific order in order to handle when one of the loads in // the value operand to one of the stores. // // First, we rewrite all of the split loads, and just accumulate each split // load in a parallel structure. We also build the slices for them and append // them to the alloca slices. SmallDenseMap<LoadInst *, std::vector<LoadInst *>, 1> SplitLoadsMap; std::vector<LoadInst *> SplitLoads; const DataLayout &DL = AI.getModule()->getDataLayout(); for (LoadInst *LI : Loads) { SplitLoads.clear(); IntegerType *Ty = cast<IntegerType>(LI->getType()); uint64_t LoadSize = Ty->getBitWidth() / 8; assert(LoadSize > 0 && "Cannot have a zero-sized integer load!"); auto &Offsets = SplitOffsetsMap[LI]; assert(LoadSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && "Slice size should always match load size exactly!"); uint64_t BaseOffset = Offsets.S->beginOffset(); assert(BaseOffset + LoadSize > BaseOffset && "Cannot represent alloca access size using 64-bit integers!"); Instruction *BasePtr = cast<Instruction>(LI->getPointerOperand()); IRB.SetInsertPoint(BasicBlock::iterator(LI)); DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); int Idx = 0, Size = Offsets.Splits.size(); for (;;) { auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); auto *PartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace()); LoadInst *PLoad = IRB.CreateAlignedLoad( getAdjustedPtr(IRB, DL, BasePtr, APInt(DL.getPointerSizeInBits(), PartOffset), PartPtrTy, BasePtr->getName() + "."), getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, LI->getName()); // Append this load onto the list of split loads so we can find it later // to rewrite the stores. SplitLoads.push_back(PLoad); // Now build a new slice for the alloca. NewSlices.push_back( Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, &PLoad->getOperandUse(PLoad->getPointerOperandIndex()), /*IsSplittable*/ false)); DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() << ", " << NewSlices.back().endOffset() << "): " << *PLoad << "\n"); // See if we've handled all the splits. if (Idx >= Size) break; // Setup the next partition. PartOffset = Offsets.Splits[Idx]; ++Idx; PartSize = (Idx < Size ? Offsets.Splits[Idx] : LoadSize) - PartOffset; } // Now that we have the split loads, do the slow walk over all uses of the // load and rewrite them as split stores, or save the split loads to use // below if the store is going to be split there anyways. bool DeferredStores = false; for (User *LU : LI->users()) { StoreInst *SI = cast<StoreInst>(LU); if (!Stores.empty() && SplitOffsetsMap.count(SI)) { DeferredStores = true; DEBUG(dbgs() << " Deferred splitting of store: " << *SI << "\n"); continue; } Value *StoreBasePtr = SI->getPointerOperand(); IRB.SetInsertPoint(BasicBlock::iterator(SI)); DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) { LoadInst *PLoad = SplitLoads[Idx]; uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1]; auto *PartPtrTy = PLoad->getType()->getPointerTo(SI->getPointerAddressSpace()); StoreInst *PStore = IRB.CreateAlignedStore( PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr, APInt(DL.getPointerSizeInBits(), PartOffset), PartPtrTy, StoreBasePtr->getName() + "."), getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); (void)PStore; DEBUG(dbgs() << " +" << PartOffset << ":" << *PStore << "\n"); } // We want to immediately iterate on any allocas impacted by splitting // this store, and we have to track any promotable alloca (indicated by // a direct store) as needing to be resplit because it is no longer // promotable. if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(StoreBasePtr)) { ResplitPromotableAllocas.insert(OtherAI); Worklist.insert(OtherAI); } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( StoreBasePtr->stripInBoundsOffsets())) { Worklist.insert(OtherAI); } // Mark the original store as dead. DeadInsts.insert(SI); } // Save the split loads if there are deferred stores among the users. if (DeferredStores) SplitLoadsMap.insert(std::make_pair(LI, std::move(SplitLoads))); // Mark the original load as dead and kill the original slice. DeadInsts.insert(LI); Offsets.S->kill(); } // Second, we rewrite all of the split stores. At this point, we know that // all loads from this alloca have been split already. For stores of such // loads, we can simply look up the pre-existing split loads. For stores of // other loads, we split those loads first and then write split stores of // them. for (StoreInst *SI : Stores) { auto *LI = cast<LoadInst>(SI->getValueOperand()); IntegerType *Ty = cast<IntegerType>(LI->getType()); uint64_t StoreSize = Ty->getBitWidth() / 8; assert(StoreSize > 0 && "Cannot have a zero-sized integer store!"); auto &Offsets = SplitOffsetsMap[SI]; assert(StoreSize == Offsets.S->endOffset() - Offsets.S->beginOffset() && "Slice size should always match load size exactly!"); uint64_t BaseOffset = Offsets.S->beginOffset(); assert(BaseOffset + StoreSize > BaseOffset && "Cannot represent alloca access size using 64-bit integers!"); Value *LoadBasePtr = LI->getPointerOperand(); Instruction *StoreBasePtr = cast<Instruction>(SI->getPointerOperand()); DEBUG(dbgs() << " Splitting store: " << *SI << "\n"); // Check whether we have an already split load. auto SplitLoadsMapI = SplitLoadsMap.find(LI); std::vector<LoadInst *> *SplitLoads = nullptr; if (SplitLoadsMapI != SplitLoadsMap.end()) { SplitLoads = &SplitLoadsMapI->second; assert(SplitLoads->size() == Offsets.Splits.size() + 1 && "Too few split loads for the number of splits in the store!"); } else { DEBUG(dbgs() << " of load: " << *LI << "\n"); } uint64_t PartOffset = 0, PartSize = Offsets.Splits.front(); int Idx = 0, Size = Offsets.Splits.size(); for (;;) { auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8); auto *PartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace()); // Either lookup a split load or create one. LoadInst *PLoad; if (SplitLoads) { PLoad = (*SplitLoads)[Idx]; } else { IRB.SetInsertPoint(BasicBlock::iterator(LI)); PLoad = IRB.CreateAlignedLoad( getAdjustedPtr(IRB, DL, LoadBasePtr, APInt(DL.getPointerSizeInBits(), PartOffset), PartPtrTy, LoadBasePtr->getName() + "."), getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, LI->getName()); } // And store this partition. IRB.SetInsertPoint(BasicBlock::iterator(SI)); StoreInst *PStore = IRB.CreateAlignedStore( PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr, APInt(DL.getPointerSizeInBits(), PartOffset), PartPtrTy, StoreBasePtr->getName() + "."), getAdjustedAlignment(SI, PartOffset, DL), /*IsVolatile*/ false); // Now build a new slice for the alloca. NewSlices.push_back( Slice(BaseOffset + PartOffset, BaseOffset + PartOffset + PartSize, &PStore->getOperandUse(PStore->getPointerOperandIndex()), /*IsSplittable*/ false)); DEBUG(dbgs() << " new slice [" << NewSlices.back().beginOffset() << ", " << NewSlices.back().endOffset() << "): " << *PStore << "\n"); if (!SplitLoads) { DEBUG(dbgs() << " of split load: " << *PLoad << "\n"); } // See if we've finished all the splits. if (Idx >= Size) break; // Setup the next partition. PartOffset = Offsets.Splits[Idx]; ++Idx; PartSize = (Idx < Size ? Offsets.Splits[Idx] : StoreSize) - PartOffset; } // We want to immediately iterate on any allocas impacted by splitting // this load, which is only relevant if it isn't a load of this alloca and // thus we didn't already split the loads above. We also have to keep track // of any promotable allocas we split loads on as they can no longer be // promoted. if (!SplitLoads) { if (AllocaInst *OtherAI = dyn_cast<AllocaInst>(LoadBasePtr)) { assert(OtherAI != &AI && "We can't re-split our own alloca!"); ResplitPromotableAllocas.insert(OtherAI); Worklist.insert(OtherAI); } else if (AllocaInst *OtherAI = dyn_cast<AllocaInst>( LoadBasePtr->stripInBoundsOffsets())) { assert(OtherAI != &AI && "We can't re-split our own alloca!"); Worklist.insert(OtherAI); } } // Mark the original store as dead now that we've split it up and kill its // slice. Note that we leave the original load in place unless this store // was its ownly use. It may in turn be split up if it is an alloca load // for some other alloca, but it may be a normal load. This may introduce // redundant loads, but where those can be merged the rest of the optimizer // should handle the merging, and this uncovers SSA splits which is more // important. In practice, the original loads will almost always be fully // split and removed eventually, and the splits will be merged by any // trivial CSE, including instcombine. if (LI->hasOneUse()) { assert(*LI->user_begin() == SI && "Single use isn't this store!"); DeadInsts.insert(LI); } DeadInsts.insert(SI); Offsets.S->kill(); } // Remove the killed slices that have ben pre-split. AS.erase(std::remove_if(AS.begin(), AS.end(), [](const Slice &S) { return S.isDead(); }), AS.end()); // Insert our new slices. This will sort and merge them into the sorted // sequence. AS.insert(NewSlices); DEBUG(dbgs() << " Pre-split slices:\n"); #ifndef NDEBUG for (auto I = AS.begin(), E = AS.end(); I != E; ++I) DEBUG(AS.print(dbgs(), I, " ")); #endif // Finally, don't try to promote any allocas that new require re-splitting. // They have already been added to the worklist above. PromotableAllocas.erase( std::remove_if( PromotableAllocas.begin(), PromotableAllocas.end(), [&](AllocaInst *AI) { return ResplitPromotableAllocas.count(AI); }), PromotableAllocas.end()); return true; } /// \brief Rewrite an alloca partition's users. /// /// This routine drives both of the rewriting goals of the SROA pass. It tries /// to rewrite uses of an alloca partition to be conducive for SSA value /// promotion. If the partition needs a new, more refined alloca, this will /// build that new alloca, preserving as much type information as possible, and /// rewrite the uses of the old alloca to point at the new one and have the /// appropriate new offsets. It also evaluates how successful the rewrite was /// at enabling promotion and if it was successful queues the alloca to be /// promoted. AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS, AllocaSlices::Partition &P) { // Try to compute a friendly type for this partition of the alloca. This // won't always succeed, in which case we fall back to a legal integer type // or an i8 array of an appropriate size. Type *SliceTy = nullptr; const DataLayout &DL = AI.getModule()->getDataLayout(); if (Type *CommonUseTy = findCommonType(P.begin(), P.end(), P.endOffset())) if (DL.getTypeAllocSize(CommonUseTy) >= P.size()) SliceTy = CommonUseTy; if (!SliceTy) if (Type *TypePartitionTy = getTypePartition(DL, AI.getAllocatedType(), P.beginOffset(), P.size())) SliceTy = TypePartitionTy; if ((!SliceTy || (SliceTy->isArrayTy() && SliceTy->getArrayElementType()->isIntegerTy())) && DL.isLegalInteger(P.size() * 8)) SliceTy = Type::getIntNTy(*C, P.size() * 8); if (!SliceTy) SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); assert(DL.getTypeAllocSize(SliceTy) >= P.size()); bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); VectorType *VecTy = IsIntegerPromotable ? nullptr : isVectorPromotionViable(P, DL); if (VecTy) SliceTy = VecTy; // Check for the case where we're going to rewrite to a new alloca of the // exact same type as the original, and with the same access offsets. In that // case, re-use the existing alloca, but still run through the rewriter to // perform phi and select speculation. AllocaInst *NewAI; if (SliceTy == AI.getAllocatedType()) { assert(P.beginOffset() == 0 && "Non-zero begin offset but same alloca type"); NewAI = &AI; // FIXME: We should be able to bail at this point with "nothing changed". // FIXME: We might want to defer PHI speculation until after here. // FIXME: return nullptr; } else { unsigned Alignment = AI.getAlignment(); if (!Alignment) { // The minimum alignment which users can rely on when the explicit // alignment is omitted or zero is that required by the ABI for this // type. Alignment = DL.getABITypeAlignment(AI.getAllocatedType()); } Alignment = MinAlign(Alignment, P.beginOffset()); // If we will get at least this much alignment from the type alone, leave // the alloca's alignment unconstrained. if (Alignment <= DL.getABITypeAlignment(SliceTy)) Alignment = 0; NewAI = new AllocaInst( SliceTy, nullptr, Alignment, AI.getName() + ".sroa." + Twine(P.begin() - AS.begin()), &AI); ++NumNewAllocas; } DEBUG(dbgs() << "Rewriting alloca partition " << "[" << P.beginOffset() << "," << P.endOffset() << ") to: " << *NewAI << "\n"); // Track the high watermark on the worklist as it is only relevant for // promoted allocas. We will reset it to this point if the alloca is not in // fact scheduled for promotion. unsigned PPWOldSize = PostPromotionWorklist.size(); unsigned NumUses = 0; SmallPtrSet<PHINode *, 8> PHIUsers; SmallPtrSet<SelectInst *, 8> SelectUsers; AllocaSliceRewriter Rewriter(DL, AS, *this, AI, *NewAI, P.beginOffset(), P.endOffset(), IsIntegerPromotable, VecTy, PHIUsers, SelectUsers); bool Promotable = true; for (Slice *S : P.splitSliceTails()) { Promotable &= Rewriter.visit(S); ++NumUses; } for (Slice &S : P) { Promotable &= Rewriter.visit(&S); ++NumUses; } NumAllocaPartitionUses += NumUses; MaxUsesPerAllocaPartition = std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition); // Now that we've processed all the slices in the new partition, check if any // PHIs or Selects would block promotion. for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(), E = PHIUsers.end(); I != E; ++I) if (!isSafePHIToSpeculate(**I)) { Promotable = false; PHIUsers.clear(); SelectUsers.clear(); break; } for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(), E = SelectUsers.end(); I != E; ++I) if (!isSafeSelectToSpeculate(**I)) { Promotable = false; PHIUsers.clear(); SelectUsers.clear(); break; } if (Promotable) { if (PHIUsers.empty() && SelectUsers.empty()) { // Promote the alloca. PromotableAllocas.push_back(NewAI); } else { // If we have either PHIs or Selects to speculate, add them to those // worklists and re-queue the new alloca so that we promote in on the // next iteration. for (PHINode *PHIUser : PHIUsers) SpeculatablePHIs.insert(PHIUser); for (SelectInst *SelectUser : SelectUsers) SpeculatableSelects.insert(SelectUser); Worklist.insert(NewAI); } } else { // If we can't promote the alloca, iterate on it to check for new // refinements exposed by splitting the current alloca. Don't iterate on an // alloca which didn't actually change and didn't get promoted. if (NewAI != &AI) Worklist.insert(NewAI); // Drop any post-promotion work items if promotion didn't happen. while (PostPromotionWorklist.size() > PPWOldSize) PostPromotionWorklist.pop_back(); } return NewAI; } /// \brief Walks the slices of an alloca and form partitions based on them, /// rewriting each of their uses. bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { if (AS.begin() == AS.end()) return false; unsigned NumPartitions = 0; bool Changed = false; const DataLayout &DL = AI.getModule()->getDataLayout(); // First try to pre-split loads and stores. Changed |= presplitLoadsAndStores(AI, AS); // Now that we have identified any pre-splitting opportunities, mark any // splittable (non-whole-alloca) loads and stores as unsplittable. If we fail // to split these during pre-splitting, we want to force them to be // rewritten into a partition. bool IsSorted = true; for (Slice &S : AS) { if (!S.isSplittable()) continue; // FIXME: We currently leave whole-alloca splittable loads and stores. This // used to be the only splittable loads and stores and we need to be // confident that the above handling of splittable loads and stores is // completely sufficient before we forcibly disable the remaining handling. if (S.beginOffset() == 0 && S.endOffset() >= DL.getTypeAllocSize(AI.getAllocatedType())) continue; if (isa<LoadInst>(S.getUse()->getUser()) || isa<StoreInst>(S.getUse()->getUser())) { S.makeUnsplittable(); IsSorted = false; } } if (!IsSorted) std::sort(AS.begin(), AS.end()); /// \brief Describes the allocas introduced by rewritePartition /// in order to migrate the debug info. struct Piece { AllocaInst *Alloca; uint64_t Offset; uint64_t Size; Piece(AllocaInst *AI, uint64_t O, uint64_t S) : Alloca(AI), Offset(O), Size(S) {} }; SmallVector<Piece, 4> Pieces; // Rewrite each partition. for (auto &P : AS.partitions()) { if (AllocaInst *NewAI = rewritePartition(AI, AS, P)) { Changed = true; if (NewAI != &AI) { uint64_t SizeOfByte = 8; uint64_t AllocaSize = DL.getTypeSizeInBits(NewAI->getAllocatedType()); // Don't include any padding. uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); Pieces.push_back(Piece(NewAI, P.beginOffset() * SizeOfByte, Size)); } } ++NumPartitions; } NumAllocaPartitions += NumPartitions; MaxPartitionsPerAlloca = std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca); // Migrate debug information from the old alloca to the new alloca(s) // and the individial partitions. if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(&AI)) { auto *Var = DbgDecl->getVariable(); auto *Expr = DbgDecl->getExpression(); DIBuilder DIB(*AI.getParent()->getParent()->getParent(), /*AllowUnresolved*/ false); bool IsSplit = Pieces.size() > 1; // HLSL Change Begins // Take into account debug stride in extra metadata std::vector<hlsl::DxilDIArrayDim> ArrayDims; unsigned FirstFragmentOffsetInBits = 0; if (!hlsl::DxilMDHelper::GetVariableDebugLayout(DbgDecl, FirstFragmentOffsetInBits, ArrayDims) && Expr->isBitPiece()) { FirstFragmentOffsetInBits = Expr->getBitPieceOffset(); } unsigned FragmentSizeInBits = DL.getTypeAllocSizeInBits(AI.getAllocatedType()); for (const hlsl::DxilDIArrayDim& ArrayDim : ArrayDims) { assert(FragmentSizeInBits % ArrayDim.NumElements == 0); FragmentSizeInBits /= ArrayDim.NumElements; } // HLSL Change Ends for (auto Piece : Pieces) { // Create a piece expression describing the new partition or reuse AI's // expression if there is only one partition. auto *PieceExpr = Expr; if (IsSplit || Expr->isBitPiece()) { #if 0 // HLSL Change - Handle Strides // If this alloca is already a scalar replacement of a larger aggregate, // Piece.Offset describes the offset inside the scalar. uint64_t Offset = Expr->isBitPiece() ? Expr->getBitPieceOffset() : 0; uint64_t Start = Offset + Piece.Offset; uint64_t Size = Piece.Size; if (Expr->isBitPiece()) { uint64_t AbsEnd = Expr->getBitPieceOffset() + Expr->getBitPieceSize(); if (Start >= AbsEnd) // No need to describe a SROAed padding. continue; Size = std::min(Size, AbsEnd - Start); } // HLSL Change Begins #else // Find the fragment from the original user variable in which this piece falls uint64_t PieceFragmentIndex = Piece.Offset / FragmentSizeInBits; // Compute the offset in the original user variable uint64_t StartInFragment = Piece.Offset % FragmentSizeInBits; uint64_t Start = FirstFragmentOffsetInBits + Piece.Offset % FragmentSizeInBits; for (auto ArrayDimIter = ArrayDims.rbegin(); ArrayDimIter != ArrayDims.rend(); ++ArrayDimIter) { Start += ArrayDimIter->StrideInBits * (PieceFragmentIndex % ArrayDimIter->NumElements); PieceFragmentIndex /= ArrayDimIter->NumElements; } uint64_t Size = std::min<uint64_t>(Piece.Size, FragmentSizeInBits - StartInFragment); #endif // HLSL Change Ends PieceExpr = DIB.createBitPieceExpression(Start, Size); } // Remove any existing dbg.declare intrinsic describing the same alloca. if (DbgDeclareInst *OldDDI = FindAllocaDbgDeclare(Piece.Alloca)) OldDDI->eraseFromParent(); DIB.insertDeclare(Piece.Alloca, Var, PieceExpr, DbgDecl->getDebugLoc(), &AI); } } return Changed; } /// \brief Clobber a use with undef, deleting the used value if it becomes dead. void SROA::clobberUse(Use &U) { Value *OldV = U; // Replace the use with an undef value. U = UndefValue::get(OldV->getType()); // Check for this making an instruction dead. We have to garbage collect // all the dead instructions to ensure the uses of any alloca end up being // minimal. if (Instruction *OldI = dyn_cast<Instruction>(OldV)) if (isInstructionTriviallyDead(OldI)) { DeadInsts.insert(OldI); } } /// \brief Analyze an alloca for SROA. /// /// This analyzes the alloca to ensure we can reason about it, builds /// the slices of the alloca, and then hands it off to be split and /// rewritten as needed. bool SROA::runOnAlloca(AllocaInst &AI) { DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); ++NumAllocasAnalyzed; // Special case dead allocas, as they're trivial. if (AI.use_empty()) { AI.eraseFromParent(); return true; } const DataLayout &DL = AI.getModule()->getDataLayout(); // HLSL Change Begin // This passes only deals with byte-sized types. // We can have i1 allocas for a bool return value when compiling without optimizations // If we let this run, it'll get turned into an i8, which is invalid dxil. if (AI.getAllocatedType()->isIntegerTy(1)) return false; // HLSL Change End // Skip alloca forms that this analysis can't handle. if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || hlsl::dxilutil::IsHLSLObjectType( AI.getAllocatedType()) || // HLSL Change - not sroa resource type. // HLSL Change Begin - not sroa matrix type. SkipHLSLType(AI.getAllocatedType(), SkipHLSLMat) || // HLSL Change End. DL.getTypeAllocSize(AI.getAllocatedType()) == 0) return false; bool Changed = false; // First, split any FCA loads and stores touching this alloca to promote // better splitting and promotion opportunities. AggLoadStoreRewriter AggRewriter(DL, SkipHLSLMat); Changed |= AggRewriter.rewrite(AI); // Build the slices using a recursive instruction-visiting builder. AllocaSlices AS(DL, AI, SkipHLSLMat); DEBUG(AS.print(dbgs())); if (AS.isEscaped()) return Changed; // Delete all the dead users of this alloca before splitting and rewriting it. for (Instruction *DeadUser : AS.getDeadUsers()) { // Free up everything used by this instruction. for (Use &DeadOp : DeadUser->operands()) clobberUse(DeadOp); // Now replace the uses of this instruction. DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType())); // And mark it for deletion. DeadInsts.insert(DeadUser); Changed = true; } for (Use *DeadOp : AS.getDeadOperands()) { clobberUse(*DeadOp); Changed = true; } // No slices to split. Leave the dead alloca for a later pass to clean up. if (AS.begin() == AS.end()) return Changed; Changed |= splitAlloca(AI, AS); DEBUG(dbgs() << " Speculating PHIs\n"); while (!SpeculatablePHIs.empty()) speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val()); DEBUG(dbgs() << " Speculating Selects\n"); while (!SpeculatableSelects.empty()) speculateSelectInstLoads(*SpeculatableSelects.pop_back_val()); return Changed; } /// \brief Delete the dead instructions accumulated in this run. /// /// Recursively deletes the dead instructions we've accumulated. This is done /// at the very end to maximize locality of the recursive delete and to /// minimize the problems of invalidated instruction pointers as such pointers /// are used heavily in the intermediate stages of the algorithm. /// /// We also record the alloca instructions deleted here so that they aren't /// subsequently handed to mem2reg to promote. void SROA::deleteDeadInstructions( SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) { while (!DeadInsts.empty()) { Instruction *I = DeadInsts.pop_back_val(); DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); // HLSL Change Begins // If the instruction is an alloca, find the possible dbg.declare connected // to it, and remove it too. We must do this before calling RAUW or we will // not be able to find it. if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { DeletedAllocas.insert(AI); if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(AI)) DbgDecl->eraseFromParent(); } // HLSL Change Ends I->replaceAllUsesWith(UndefValue::get(I->getType())); for (Use &Operand : I->operands()) if (Instruction *U = dyn_cast<Instruction>(Operand)) { // Zero out the operand and see if it becomes trivially dead. Operand = nullptr; if (isInstructionTriviallyDead(U)) DeadInsts.insert(U); } #if 0 // HLSL Change - blocked moved before replaceAllUsesWith if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) { DeletedAllocas.insert(AI); if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(AI)) DbgDecl->eraseFromParent(); } #endif // HLSL Change ++NumDeleted; I->eraseFromParent(); } } static void enqueueUsersInWorklist(Instruction &I, SmallVectorImpl<Instruction *> &Worklist, SmallPtrSetImpl<Instruction *> &Visited) { for (User *U : I.users()) if (Visited.insert(cast<Instruction>(U)).second) Worklist.push_back(cast<Instruction>(U)); } /// \brief Promote the allocas, using the best available technique. /// /// This attempts to promote whatever allocas have been identified as viable in /// the PromotableAllocas list. If that list is empty, there is nothing to do. /// If there is a domtree available, we attempt to promote using the full power /// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is /// based on the SSAUpdater utilities. This function returns whether any /// promotion occurred. bool SROA::promoteAllocas(Function &F) { if (PromotableAllocas.empty()) return false; NumPromoted += PromotableAllocas.size(); if (DT && !ForceSSAUpdater) { DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); PromoteMemToReg(PromotableAllocas, *DT, nullptr, AC); PromotableAllocas.clear(); return true; } DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n"); SSAUpdater SSA; DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); SmallVector<Instruction *, 64> Insts; // We need a worklist to walk the uses of each alloca. SmallVector<Instruction *, 8> Worklist; SmallPtrSet<Instruction *, 8> Visited; SmallVector<Instruction *, 32> DeadInsts; for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) { AllocaInst *AI = PromotableAllocas[Idx]; Insts.clear(); Worklist.clear(); Visited.clear(); enqueueUsersInWorklist(*AI, Worklist, Visited); while (!Worklist.empty()) { Instruction *I = Worklist.pop_back_val(); // FIXME: Currently the SSAUpdater infrastructure doesn't reason about // lifetime intrinsics and so we strip them (and the bitcasts+GEPs // leading to them) here. Eventually it should use them to optimize the // scalar values produced. if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { assert(II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end); II->eraseFromParent(); continue; } // Push the loads and stores we find onto the list. SROA will already // have validated that all loads and stores are viable candidates for // promotion. if (LoadInst *LI = dyn_cast<LoadInst>(I)) { assert(LI->getType() == AI->getAllocatedType()); Insts.push_back(LI); continue; } if (StoreInst *SI = dyn_cast<StoreInst>(I)) { assert(SI->getValueOperand()->getType() == AI->getAllocatedType()); Insts.push_back(SI); continue; } // For everything else, we know that only no-op bitcasts and GEPs will // make it this far, just recurse through them and recall them for later // removal. DeadInsts.push_back(I); enqueueUsersInWorklist(*I, Worklist, Visited); } AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts); while (!DeadInsts.empty()) DeadInsts.pop_back_val()->eraseFromParent(); AI->eraseFromParent(); } PromotableAllocas.clear(); return true; } // HLSL Change - run SROA more than once if updated. bool SROA::runOnFunctionImp(Function &F) { if (skipOptnoneFunction(F)) return false; DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); C = &F.getContext(); DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DT = DTWP ? &DTWP->getDomTree() : nullptr; AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); BasicBlock &EntryBB = F.getEntryBlock(); for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); I != E; ++I) { if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) Worklist.insert(AI); } bool Changed = false; // A set of deleted alloca instruction pointers which should be removed from // the list of promotable allocas. SmallPtrSet<AllocaInst *, 4> DeletedAllocas; do { while (!Worklist.empty()) { Changed |= runOnAlloca(*Worklist.pop_back_val()); deleteDeadInstructions(DeletedAllocas); // Remove the deleted allocas from various lists so that we don't try to // continue processing them. if (!DeletedAllocas.empty()) { auto IsInSet = [&](AllocaInst *AI) { return DeletedAllocas.count(AI); }; Worklist.remove_if(IsInSet); PostPromotionWorklist.remove_if(IsInSet); PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(), PromotableAllocas.end(), IsInSet), PromotableAllocas.end()); DeletedAllocas.clear(); } } Changed |= promoteAllocas(F); Worklist = PostPromotionWorklist; PostPromotionWorklist.clear(); } while (!Worklist.empty()); return Changed; } // HLSL Change Begin. // In some case, alloca fail to optimized early will be ready to optimize after // other alloca is optimized. bool SROA::runOnFunction(Function &F) { unsigned count = 0; const unsigned kMaxCount = 3; while ((count++) < kMaxCount) { if (!runOnFunctionImp(F)) break; } return count > 1; } // HLSL Change End. void SROA::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<AssumptionCacheTracker>(); if (RequiresDomTree) AU.addRequired<DominatorTreeWrapperPass>(); AU.setPreservesCFG(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/SimplifyCFGPass.cpp
//===- SimplifyCFGPass.cpp - CFG Simplification Pass ----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements dead code elimination and basic block merging, along // with a collection of other peephole control flow optimizations. For example: // // * Removes basic blocks with no predecessors. // * Merges a basic block into its predecessor if there is only one and the // predecessor only has one successor. // * Eliminates PHI nodes for basic blocks with a single predecessor. // * Eliminates a basic block that only contains an unconditional branch. // * Changes invoke instructions to nounwind functions to be calls. // * Change things like "if (x) if (y)" into "if (x&y)". // * etc.. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/SimplifyCFG.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Scalar.h" using namespace llvm; #define DEBUG_TYPE "simplifycfg" #if 0 // HLSL Change Starts static cl::opt<unsigned> UserBonusInstThreshold("bonus-inst-threshold", cl::Hidden, cl::init(1), cl::desc("Control the number of bonus instructions (default = 1)")); #else unsigned UserBonusInstThreshold = 1; #endif STATISTIC(NumSimpl, "Number of blocks simplified"); /// If we have more than one empty (other than phi node) return blocks, /// merge them together to promote recursive block merging. static bool mergeEmptyReturnBlocks(Function &F) { bool Changed = false; BasicBlock *RetBlock = nullptr; // Scan all the blocks in the function, looking for empty return blocks. for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; ) { BasicBlock &BB = *BBI++; // Only look at return blocks. ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()); if (!Ret) continue; // Only look at the block if it is empty or the only other thing in it is a // single PHI node that is the operand to the return. if (Ret != &BB.front()) { // Check for something else in the block. BasicBlock::iterator I = Ret; --I; // Skip over debug info. while (isa<DbgInfoIntrinsic>(I) && I != BB.begin()) --I; if (!isa<DbgInfoIntrinsic>(I) && (!isa<PHINode>(I) || I != BB.begin() || Ret->getNumOperands() == 0 || Ret->getOperand(0) != I)) continue; } // If this is the first returning block, remember it and keep going. if (!RetBlock) { RetBlock = &BB; continue; } // Otherwise, we found a duplicate return block. Merge the two. Changed = true; // Case when there is no input to the return or when the returned values // agree is trivial. Note that they can't agree if there are phis in the // blocks. if (Ret->getNumOperands() == 0 || Ret->getOperand(0) == cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0)) { BB.replaceAllUsesWith(RetBlock); BB.eraseFromParent(); continue; } // If the canonical return block has no PHI node, create one now. PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin()); if (!RetBlockPHI) { Value *InVal = cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0); pred_iterator PB = pred_begin(RetBlock), PE = pred_end(RetBlock); RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(), std::distance(PB, PE), "merge", &RetBlock->front()); for (pred_iterator PI = PB; PI != PE; ++PI) RetBlockPHI->addIncoming(InVal, *PI); RetBlock->getTerminator()->setOperand(0, RetBlockPHI); } // Turn BB into a block that just unconditionally branches to the return // block. This handles the case when the two return blocks have a common // predecessor but that return different things. RetBlockPHI->addIncoming(Ret->getOperand(0), &BB); BB.getTerminator()->eraseFromParent(); BranchInst::Create(RetBlock, &BB); } return Changed; } /// Call SimplifyCFG on all the blocks in the function, /// iterating until no more changes are made. static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI, AssumptionCache *AC, unsigned BonusInstThreshold) { bool Changed = false; bool LocalChange = true; while (LocalChange) { LocalChange = false; // Loop over all of the basic blocks and remove them if they are unneeded. for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) { if (SimplifyCFG(BBIt++, TTI, BonusInstThreshold, AC)) { LocalChange = true; ++NumSimpl; } } Changed |= LocalChange; } return Changed; } static bool simplifyFunctionCFG(Function &F, const TargetTransformInfo &TTI, AssumptionCache *AC, int BonusInstThreshold) { bool EverChanged = removeUnreachableBlocks(F); EverChanged |= mergeEmptyReturnBlocks(F); EverChanged |= iterativelySimplifyCFG(F, TTI, AC, BonusInstThreshold); // If neither pass changed anything, we're done. if (!EverChanged) return false; // iterativelySimplifyCFG can (rarely) make some loops dead. If this happens, // removeUnreachableBlocks is needed to nuke them, which means we should // iterate between the two optimizations. We structure the code like this to // avoid rerunning iterativelySimplifyCFG if the second pass of // removeUnreachableBlocks doesn't do anything. if (!removeUnreachableBlocks(F)) return true; do { EverChanged = iterativelySimplifyCFG(F, TTI, AC, BonusInstThreshold); EverChanged |= removeUnreachableBlocks(F); } while (EverChanged); return true; } SimplifyCFGPass::SimplifyCFGPass() : BonusInstThreshold(UserBonusInstThreshold) {} SimplifyCFGPass::SimplifyCFGPass(int BonusInstThreshold) : BonusInstThreshold(BonusInstThreshold) {} PreservedAnalyses SimplifyCFGPass::run(Function &F, AnalysisManager<Function> *AM) { auto &TTI = AM->getResult<TargetIRAnalysis>(F); auto &AC = AM->getResult<AssumptionAnalysis>(F); if (!simplifyFunctionCFG(F, TTI, &AC, BonusInstThreshold)) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } namespace { struct CFGSimplifyPass : public FunctionPass { static char ID; // Pass identification, replacement for typeid unsigned BonusInstThreshold; std::function<bool(const Function &)> PredicateFtor; CFGSimplifyPass(int T = -1, std::function<bool(const Function &)> Ftor = nullptr) : FunctionPass(ID), PredicateFtor(Ftor) { BonusInstThreshold = (T == -1) ? UserBonusInstThreshold : unsigned(T); initializeCFGSimplifyPassPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override { if (PredicateFtor && !PredicateFtor(F)) return false; if (skipOptnoneFunction(F)) return false; AssumptionCache *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); return simplifyFunctionCFG(F, TTI, AC, BonusInstThreshold); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<TargetTransformInfoWrapperPass>(); } }; } char CFGSimplifyPass::ID = 0; INITIALIZE_PASS_BEGIN(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_END(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false, false) // Public interface to the CFGSimplification pass FunctionPass * llvm::createCFGSimplificationPass(int Threshold, std::function<bool(const Function &)> Ftor) { return new CFGSimplifyPass(Threshold, Ftor); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
//===----------------------- AlignmentFromAssumptions.cpp -----------------===// // Set Load/Store Alignments From Assumptions // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a ScalarEvolution-based transformation to set // the alignments of load, stores and memory intrinsics based on the truth // expressions of assume intrinsics. The primary motivation is to handle // complex alignment assumptions that apply to vector loads and stores that // appear after vectorization and unrolling. // //===----------------------------------------------------------------------===// #define AA_NAME "alignment-from-assumptions" #define DEBUG_TYPE AA_NAME #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; STATISTIC(NumLoadAlignChanged, "Number of loads changed by alignment assumptions"); STATISTIC(NumStoreAlignChanged, "Number of stores changed by alignment assumptions"); STATISTIC(NumMemIntAlignChanged, "Number of memory intrinsics changed by alignment assumptions"); namespace { struct AlignmentFromAssumptions : public FunctionPass { static char ID; // Pass identification, replacement for typeid AlignmentFromAssumptions() : FunctionPass(ID) { initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<ScalarEvolution>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.setPreservesCFG(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<ScalarEvolution>(); } // For memory transfers, we need a common alignment for both the source and // destination. If we have a new alignment for only one operand of a transfer // instruction, save it in these maps. If we reach the other operand through // another assumption later, then we may change the alignment at that point. DenseMap<MemTransferInst *, unsigned> NewDestAlignments, NewSrcAlignments; ScalarEvolution *SE; DominatorTree *DT; bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV, const SCEV *&OffSCEV); bool processAssumption(CallInst *I); }; } char AlignmentFromAssumptions::ID = 0; static const char aip_name[] = "Alignment from assumptions"; INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME, aip_name, false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME, aip_name, false, false) FunctionPass *llvm::createAlignmentFromAssumptionsPass() { return new AlignmentFromAssumptions(); } // Given an expression for the (constant) alignment, AlignSCEV, and an // expression for the displacement between a pointer and the aligned address, // DiffSCEV, compute the alignment of the displaced pointer if it can be reduced // to a constant. Using SCEV to compute alignment handles the case where // DiffSCEV is a recurrence with constant start such that the aligned offset // is constant. e.g. {16,+,32} % 32 -> 16. static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV, const SCEV *AlignSCEV, ScalarEvolution *SE) { // DiffUnits = Diff % int64_t(Alignment) const SCEV *DiffAlignDiv = SE->getUDivExpr(DiffSCEV, AlignSCEV); const SCEV *DiffAlign = SE->getMulExpr(DiffAlignDiv, AlignSCEV); const SCEV *DiffUnitsSCEV = SE->getMinusSCEV(DiffAlign, DiffSCEV); DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is " << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n"); if (const SCEVConstant *ConstDUSCEV = dyn_cast<SCEVConstant>(DiffUnitsSCEV)) { int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue(); // If the displacement is an exact multiple of the alignment, then the // displaced pointer has the same alignment as the aligned pointer, so // return the alignment value. if (!DiffUnits) return (unsigned) cast<SCEVConstant>(AlignSCEV)->getValue()->getSExtValue(); // If the displacement is not an exact multiple, but the remainder is a // constant, then return this remainder (but only if it is a power of 2). uint64_t DiffUnitsAbs = std::abs(DiffUnits); if (isPowerOf2_64(DiffUnitsAbs)) return (unsigned) DiffUnitsAbs; } return 0; } // There is an address given by an offset OffSCEV from AASCEV which has an // alignment AlignSCEV. Use that information, if possible, to compute a new // alignment for Ptr. static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, const SCEV *OffSCEV, Value *Ptr, ScalarEvolution *SE) { const SCEV *PtrSCEV = SE->getSCEV(Ptr); const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV); // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always // sign-extended OffSCEV to i64, so make sure they agree again. DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType()); // What we really want to know is the overall offset to the aligned // address. This address is displaced by the provided offset. DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV); DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to " << *AlignSCEV << " and offset " << *OffSCEV << " using diff " << *DiffSCEV << "\n"); unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE); DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n"); if (NewAlignment) { return NewAlignment; } else if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) { // The relative offset to the alignment assumption did not yield a constant, // but we should try harder: if we assume that a is 32-byte aligned, then in // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are // 32-byte aligned, but instead alternate between 32 and 16-byte alignment. // As a result, the new alignment will not be a constant, but can still // be improved over the default (of 4) to 16. const SCEV *DiffStartSCEV = DiffARSCEV->getStart(); const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE); DEBUG(dbgs() << "\ttrying start/inc alignment using start " << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n"); // Now compute the new alignment using the displacement to the value in the // first iteration, and also the alignment using the per-iteration delta. // If these are the same, then use that answer. Otherwise, use the smaller // one, but only if it divides the larger one. NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE); unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE); DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n"); DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n"); if (!NewAlignment || !NewIncAlignment) { return 0; } else if (NewAlignment > NewIncAlignment) { if (NewAlignment % NewIncAlignment == 0) { DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment << "\n"); return NewIncAlignment; } } else if (NewIncAlignment > NewAlignment) { if (NewIncAlignment % NewAlignment == 0) { DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment << "\n"); return NewAlignment; } } else if (NewIncAlignment == NewAlignment) { DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment << "\n"); return NewAlignment; } } return 0; } bool AlignmentFromAssumptions::extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV, const SCEV *&OffSCEV) { // An alignment assume must be a statement about the least-significant // bits of the pointer being zero, possibly with some offset. ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0)); if (!ICI) return false; // This must be an expression of the form: x & m == 0. if (ICI->getPredicate() != ICmpInst::ICMP_EQ) return false; // Swap things around so that the RHS is 0. Value *CmpLHS = ICI->getOperand(0); Value *CmpRHS = ICI->getOperand(1); const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS); const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS); if (CmpLHSSCEV->isZero()) std::swap(CmpLHS, CmpRHS); else if (!CmpRHSSCEV->isZero()) return false; BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS); if (!CmpBO || CmpBO->getOpcode() != Instruction::And) return false; // Swap things around so that the right operand of the and is a constant // (the mask); we cannot deal with variable masks. Value *AndLHS = CmpBO->getOperand(0); Value *AndRHS = CmpBO->getOperand(1); const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS); const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS); if (isa<SCEVConstant>(AndLHSSCEV)) { std::swap(AndLHS, AndRHS); std::swap(AndLHSSCEV, AndRHSSCEV); } const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV); if (!MaskSCEV) return false; // The mask must have some trailing ones (otherwise the condition is // trivial and tells us nothing about the alignment of the left operand). unsigned TrailingOnes = MaskSCEV->getValue()->getValue().countTrailingOnes(); if (!TrailingOnes) return false; // Cap the alignment at the maximum with which LLVM can deal (and make sure // we don't overflow the shift). uint64_t Alignment; TrailingOnes = std::min(TrailingOnes, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment); Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext()); AlignSCEV = SE->getConstant(Int64Ty, Alignment); // The LHS might be a ptrtoint instruction, or it might be the pointer // with an offset. AAPtr = nullptr; OffSCEV = nullptr; if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) { AAPtr = PToI->getPointerOperand(); OffSCEV = SE->getConstant(Int64Ty, 0); } else if (const SCEVAddExpr* AndLHSAddSCEV = dyn_cast<SCEVAddExpr>(AndLHSSCEV)) { // Try to find the ptrtoint; subtract it and the rest is the offset. for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(), JE = AndLHSAddSCEV->op_end(); J != JE; ++J) if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J)) if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) { AAPtr = PToI->getPointerOperand(); OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J); break; } } if (!AAPtr) return false; // Sign extend the offset to 64 bits (so that it is like all of the other // expressions). unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits(); if (OffSCEVBits < 64) OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty); else if (OffSCEVBits > 64) return false; AAPtr = AAPtr->stripPointerCasts(); return true; } bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) { Value *AAPtr; const SCEV *AlignSCEV, *OffSCEV; if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV)) return false; const SCEV *AASCEV = SE->getSCEV(AAPtr); // Apply the assumption to all other users of the specified pointer. SmallPtrSet<Instruction *, 32> Visited; SmallVector<Instruction*, 16> WorkList; for (User *J : AAPtr->users()) { if (J == ACall) continue; if (Instruction *K = dyn_cast<Instruction>(J)) if (isValidAssumeForContext(ACall, K, DT)) WorkList.push_back(K); } while (!WorkList.empty()) { Instruction *J = WorkList.pop_back_val(); if (LoadInst *LI = dyn_cast<LoadInst>(J)) { unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, LI->getPointerOperand(), SE); if (NewAlignment > LI->getAlignment()) { LI->setAlignment(NewAlignment); ++NumLoadAlignChanged; } } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) { unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, SI->getPointerOperand(), SE); if (NewAlignment > SI->getAlignment()) { SI->setAlignment(NewAlignment); ++NumStoreAlignChanged; } } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) { unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE); // For memory transfers, we need a common alignment for both the // source and destination. If we have a new alignment for this // instruction, but only for one operand, save it. If we reach the // other operand through another assumption later, then we may // change the alignment at that point. if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE); DenseMap<MemTransferInst *, unsigned>::iterator DI = NewDestAlignments.find(MTI); unsigned AltDestAlignment = (DI == NewDestAlignments.end()) ? 0 : DI->second; DenseMap<MemTransferInst *, unsigned>::iterator SI = NewSrcAlignments.find(MTI); unsigned AltSrcAlignment = (SI == NewSrcAlignments.end()) ? 0 : SI->second; DEBUG(dbgs() << "\tmem trans: " << NewDestAlignment << " " << AltDestAlignment << " " << NewSrcAlignment << " " << AltSrcAlignment << "\n"); // Of these four alignments, pick the largest possible... unsigned NewAlignment = 0; if (NewDestAlignment <= std::max(NewSrcAlignment, AltSrcAlignment)) NewAlignment = std::max(NewAlignment, NewDestAlignment); if (AltDestAlignment <= std::max(NewSrcAlignment, AltSrcAlignment)) NewAlignment = std::max(NewAlignment, AltDestAlignment); if (NewSrcAlignment <= std::max(NewDestAlignment, AltDestAlignment)) NewAlignment = std::max(NewAlignment, NewSrcAlignment); if (AltSrcAlignment <= std::max(NewDestAlignment, AltDestAlignment)) NewAlignment = std::max(NewAlignment, AltSrcAlignment); if (NewAlignment > MI->getAlignment()) { MI->setAlignment(ConstantInt::get(Type::getInt32Ty( MI->getParent()->getContext()), NewAlignment)); ++NumMemIntAlignChanged; } NewDestAlignments.insert(std::make_pair(MTI, NewDestAlignment)); NewSrcAlignments.insert(std::make_pair(MTI, NewSrcAlignment)); } else if (NewDestAlignment > MI->getAlignment()) { assert((!isa<MemIntrinsic>(MI) || isa<MemSetInst>(MI)) && "Unknown memory intrinsic"); MI->setAlignment(ConstantInt::get(Type::getInt32Ty( MI->getParent()->getContext()), NewDestAlignment)); ++NumMemIntAlignChanged; } } // Now that we've updated that use of the pointer, look for other uses of // the pointer to update. Visited.insert(J); for (User *UJ : J->users()) { Instruction *K = cast<Instruction>(UJ); if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT)) WorkList.push_back(K); } } return true; } bool AlignmentFromAssumptions::runOnFunction(Function &F) { bool Changed = false; auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); SE = &getAnalysis<ScalarEvolution>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); NewDestAlignments.clear(); NewSrcAlignments.clear(); for (auto &AssumeVH : AC.assumptions()) if (AssumeVH) Changed |= processAssumption(cast<CallInst>(AssumeVH)); return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/StructurizeCFG.cpp
//===-- StructurizeCFG.cpp ------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/RegionInfo.h" #include "llvm/Analysis/RegionIterator.h" #include "llvm/Analysis/RegionPass.h" #include "llvm/IR/Module.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/SSAUpdater.h" using namespace llvm; using namespace llvm::PatternMatch; #define DEBUG_TYPE "structurizecfg" namespace { // Definition of the complex types used in this pass. typedef std::pair<BasicBlock *, Value *> BBValuePair; typedef SmallVector<RegionNode*, 8> RNVector; typedef SmallVector<BasicBlock*, 8> BBVector; typedef SmallVector<BranchInst*, 8> BranchVector; typedef SmallVector<BBValuePair, 2> BBValueVector; typedef SmallPtrSet<BasicBlock *, 8> BBSet; typedef MapVector<PHINode *, BBValueVector> PhiMap; typedef MapVector<BasicBlock *, BBVector> BB2BBVecMap; typedef DenseMap<DomTreeNode *, unsigned> DTN2UnsignedMap; typedef DenseMap<BasicBlock *, PhiMap> BBPhiMap; typedef DenseMap<BasicBlock *, Value *> BBPredicates; typedef DenseMap<BasicBlock *, BBPredicates> PredMap; typedef DenseMap<BasicBlock *, BasicBlock*> BB2BBMap; // The name for newly created blocks. static const char *const FlowBlockName = "Flow"; /// @brief Find the nearest common dominator for multiple BasicBlocks /// /// Helper class for StructurizeCFG /// TODO: Maybe move into common code class NearestCommonDominator { DominatorTree *DT; DTN2UnsignedMap IndexMap; BasicBlock *Result; unsigned ResultIndex; bool ExplicitMentioned; public: /// \brief Start a new query NearestCommonDominator(DominatorTree *DomTree) { DT = DomTree; Result = nullptr; } /// \brief Add BB to the resulting dominator void addBlock(BasicBlock *BB, bool Remember = true) { DomTreeNode *Node = DT->getNode(BB); if (!Result) { unsigned Numbering = 0; for (;Node;Node = Node->getIDom()) IndexMap[Node] = ++Numbering; Result = BB; ResultIndex = 1; ExplicitMentioned = Remember; return; } for (;Node;Node = Node->getIDom()) if (IndexMap.count(Node)) break; else IndexMap[Node] = 0; assert(Node && "Dominator tree invalid!"); unsigned Numbering = IndexMap[Node]; if (Numbering > ResultIndex) { Result = Node->getBlock(); ResultIndex = Numbering; ExplicitMentioned = Remember && (Result == BB); } else if (Numbering == ResultIndex) { ExplicitMentioned |= Remember; } } /// \brief Is "Result" one of the BBs added with "Remember" = True? bool wasResultExplicitMentioned() { return ExplicitMentioned; } /// \brief Get the query result BasicBlock *getResult() { return Result; } }; /// @brief Transforms the control flow graph on one single entry/exit region /// at a time. /// /// After the transform all "If"/"Then"/"Else" style control flow looks like /// this: /// /// \verbatim /// 1 /// || /// | | /// 2 | /// | / /// |/ /// 3 /// || Where: /// | | 1 = "If" block, calculates the condition /// 4 | 2 = "Then" subregion, runs if the condition is true /// | / 3 = "Flow" blocks, newly inserted flow blocks, rejoins the flow /// |/ 4 = "Else" optional subregion, runs if the condition is false /// 5 5 = "End" block, also rejoins the control flow /// \endverbatim /// /// Control flow is expressed as a branch where the true exit goes into the /// "Then"/"Else" region, while the false exit skips the region /// The condition for the optional "Else" region is expressed as a PHI node. /// The incomming values of the PHI node are true for the "If" edge and false /// for the "Then" edge. /// /// Additionally to that even complicated loops look like this: /// /// \verbatim /// 1 /// || /// | | /// 2 ^ Where: /// | / 1 = "Entry" block /// |/ 2 = "Loop" optional subregion, with all exits at "Flow" block /// 3 3 = "Flow" block, with back edge to entry block /// | /// \endverbatim /// /// The back edge of the "Flow" block is always on the false side of the branch /// while the true side continues the general flow. So the loop condition /// consist of a network of PHI nodes where the true incoming values expresses /// breaks and the false values expresses continue states. class StructurizeCFG : public RegionPass { Type *Boolean; ConstantInt *BoolTrue; ConstantInt *BoolFalse; UndefValue *BoolUndef; Function *Func; Region *ParentRegion; DominatorTree *DT; LoopInfo *LI; RNVector Order; BBSet Visited; BBPhiMap DeletedPhis; BB2BBVecMap AddedPhis; PredMap Predicates; BranchVector Conditions; BB2BBMap Loops; PredMap LoopPreds; BranchVector LoopConds; RegionNode *PrevNode; void orderNodes(); void analyzeLoops(RegionNode *N); Value *invert(Value *Condition); Value *buildCondition(BranchInst *Term, unsigned Idx, bool Invert); void gatherPredicates(RegionNode *N); void collectInfos(); void insertConditions(bool Loops); void delPhiValues(BasicBlock *From, BasicBlock *To); void addPhiValues(BasicBlock *From, BasicBlock *To); void setPhiValues(); void killTerminator(BasicBlock *BB); void changeExit(RegionNode *Node, BasicBlock *NewExit, bool IncludeDominator); BasicBlock *getNextFlow(BasicBlock *Dominator); BasicBlock *needPrefix(bool NeedEmpty); BasicBlock *needPostfix(BasicBlock *Flow, bool ExitUseAllowed); void setPrevNode(BasicBlock *BB); bool dominatesPredicates(BasicBlock *BB, RegionNode *Node); bool isPredictableTrue(RegionNode *Node); void wireFlow(bool ExitUseAllowed, BasicBlock *LoopEnd); void handleLoops(bool ExitUseAllowed, BasicBlock *LoopEnd); void createFlow(); void rebuildSSA(); public: static char ID; StructurizeCFG() : RegionPass(ID) { initializeStructurizeCFGPass(*PassRegistry::getPassRegistry()); } using Pass::doInitialization; bool doInitialization(Region *R, RGPassManager &RGM) override; bool runOnRegion(Region *R, RGPassManager &RGM) override; StringRef getPassName() const override { return "Structurize control flow"; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequiredID(LowerSwitchID); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); RegionPass::getAnalysisUsage(AU); } }; } // end anonymous namespace char StructurizeCFG::ID = 0; INITIALIZE_PASS_BEGIN(StructurizeCFG, "structurizecfg", "Structurize the CFG", false, false) INITIALIZE_PASS_DEPENDENCY(LowerSwitch) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(RegionInfoPass) INITIALIZE_PASS_END(StructurizeCFG, "structurizecfg", "Structurize the CFG", false, false) /// \brief Initialize the types and constants used in the pass bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) { LLVMContext &Context = R->getEntry()->getContext(); Boolean = Type::getInt1Ty(Context); BoolTrue = ConstantInt::getTrue(Context); BoolFalse = ConstantInt::getFalse(Context); BoolUndef = UndefValue::get(Boolean); return false; } /// \brief Build up the general order of nodes void StructurizeCFG::orderNodes() { RNVector TempOrder; ReversePostOrderTraversal<Region*> RPOT(ParentRegion); TempOrder.append(RPOT.begin(), RPOT.end()); std::map<Loop*, unsigned> LoopBlocks; // The reverse post-order traversal of the list gives us an ordering close // to what we want. The only problem with it is that sometimes backedges // for outer loops will be visited before backedges for inner loops. for (RegionNode *RN : TempOrder) { BasicBlock *BB = RN->getEntry(); Loop *Loop = LI->getLoopFor(BB); if (!LoopBlocks.count(Loop)) { LoopBlocks[Loop] = 1; continue; } LoopBlocks[Loop]++; } unsigned CurrentLoopDepth = 0; Loop *CurrentLoop = nullptr; BBSet TempVisited; for (RNVector::iterator I = TempOrder.begin(), E = TempOrder.end(); I != E; ++I) { BasicBlock *BB = (*I)->getEntry(); unsigned LoopDepth = LI->getLoopDepth(BB); if (std::find(Order.begin(), Order.end(), *I) != Order.end()) continue; if (LoopDepth < CurrentLoopDepth) { // Make sure we have visited all blocks in this loop before moving back to // the outer loop. RNVector::iterator LoopI = I; while(LoopBlocks[CurrentLoop]) { LoopI++; BasicBlock *LoopBB = (*LoopI)->getEntry(); if (LI->getLoopFor(LoopBB) == CurrentLoop) { LoopBlocks[CurrentLoop]--; Order.push_back(*LoopI); } } } CurrentLoop = LI->getLoopFor(BB); if (CurrentLoop) { LoopBlocks[CurrentLoop]--; } CurrentLoopDepth = LoopDepth; Order.push_back(*I); } // This pass originally used a post-order traversal and then operated on // the list in reverse. Now that we are using a reverse post-order traversal // rather than re-working the whole pass to operate on the list in order, // we just reverse the list and continue to operate on it in reverse. std::reverse(Order.begin(), Order.end()); } /// \brief Determine the end of the loops void StructurizeCFG::analyzeLoops(RegionNode *N) { if (N->isSubRegion()) { // Test for exit as back edge BasicBlock *Exit = N->getNodeAs<Region>()->getExit(); if (Visited.count(Exit)) Loops[Exit] = N->getEntry(); } else { // Test for sucessors as back edge BasicBlock *BB = N->getNodeAs<BasicBlock>(); BranchInst *Term = cast<BranchInst>(BB->getTerminator()); for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) { BasicBlock *Succ = Term->getSuccessor(i); if (Visited.count(Succ)) { Loops[Succ] = BB; } } } } /// \brief Invert the given condition Value *StructurizeCFG::invert(Value *Condition) { // First: Check if it's a constant if (Condition == BoolTrue) return BoolFalse; if (Condition == BoolFalse) return BoolTrue; if (Condition == BoolUndef) return BoolUndef; // Second: If the condition is already inverted, return the original value if (match(Condition, m_Not(m_Value(Condition)))) return Condition; if (Instruction *Inst = dyn_cast<Instruction>(Condition)) { // Third: Check all the users for an invert BasicBlock *Parent = Inst->getParent(); for (User *U : Condition->users()) if (Instruction *I = dyn_cast<Instruction>(U)) if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition)))) return I; // Last option: Create a new instruction return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator()); } if (Argument *Arg = dyn_cast<Argument>(Condition)) { BasicBlock &EntryBlock = Arg->getParent()->getEntryBlock(); return BinaryOperator::CreateNot(Condition, Arg->getName() + ".inv", EntryBlock.getTerminator()); } llvm_unreachable("Unhandled condition to invert"); } /// \brief Build the condition for one edge Value *StructurizeCFG::buildCondition(BranchInst *Term, unsigned Idx, bool Invert) { Value *Cond = Invert ? BoolFalse : BoolTrue; if (Term->isConditional()) { Cond = Term->getCondition(); if (Idx != (unsigned)Invert) Cond = invert(Cond); } return Cond; } /// \brief Analyze the predecessors of each block and build up predicates void StructurizeCFG::gatherPredicates(RegionNode *N) { RegionInfo *RI = ParentRegion->getRegionInfo(); BasicBlock *BB = N->getEntry(); BBPredicates &Pred = Predicates[BB]; BBPredicates &LPred = LoopPreds[BB]; for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { // Ignore it if it's a branch from outside into our region entry if (!ParentRegion->contains(*PI)) continue; Region *R = RI->getRegionFor(*PI); if (R == ParentRegion) { // It's a top level block in our region BranchInst *Term = cast<BranchInst>((*PI)->getTerminator()); for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) { BasicBlock *Succ = Term->getSuccessor(i); if (Succ != BB) continue; if (Visited.count(*PI)) { // Normal forward edge if (Term->isConditional()) { // Try to treat it like an ELSE block BasicBlock *Other = Term->getSuccessor(!i); if (Visited.count(Other) && !Loops.count(Other) && !Pred.count(Other) && !Pred.count(*PI)) { Pred[Other] = BoolFalse; Pred[*PI] = BoolTrue; continue; } } Pred[*PI] = buildCondition(Term, i, false); } else { // Back edge LPred[*PI] = buildCondition(Term, i, true); } } } else { // It's an exit from a sub region while (R->getParent() != ParentRegion) R = R->getParent(); // Edge from inside a subregion to its entry, ignore it if (*R == *N) continue; BasicBlock *Entry = R->getEntry(); if (Visited.count(Entry)) Pred[Entry] = BoolTrue; else LPred[Entry] = BoolFalse; } } } /// \brief Collect various loop and predicate infos void StructurizeCFG::collectInfos() { // Reset predicate Predicates.clear(); // and loop infos Loops.clear(); LoopPreds.clear(); // Reset the visited nodes Visited.clear(); for (RNVector::reverse_iterator OI = Order.rbegin(), OE = Order.rend(); OI != OE; ++OI) { DEBUG(dbgs() << "Visiting: " << ((*OI)->isSubRegion() ? "SubRegion with entry: " : "") << (*OI)->getEntry()->getName() << " Loop Depth: " << LI->getLoopDepth((*OI)->getEntry()) << "\n"); // Analyze all the conditions leading to a node gatherPredicates(*OI); // Remember that we've seen this node Visited.insert((*OI)->getEntry()); // Find the last back edges analyzeLoops(*OI); } } /// \brief Insert the missing branch conditions void StructurizeCFG::insertConditions(bool Loops) { BranchVector &Conds = Loops ? LoopConds : Conditions; Value *Default = Loops ? BoolTrue : BoolFalse; SSAUpdater PhiInserter; for (BranchInst *Term : Conds) { assert(Term->isConditional()); BasicBlock *Parent = Term->getParent(); BasicBlock *SuccTrue = Term->getSuccessor(0); BasicBlock *SuccFalse = Term->getSuccessor(1); PhiInserter.Initialize(Boolean, ""); PhiInserter.AddAvailableValue(&Func->getEntryBlock(), Default); PhiInserter.AddAvailableValue(Loops ? SuccFalse : Parent, Default); BBPredicates &Preds = Loops ? LoopPreds[SuccFalse] : Predicates[SuccTrue]; NearestCommonDominator Dominator(DT); Dominator.addBlock(Parent, false); Value *ParentValue = nullptr; for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end(); PI != PE; ++PI) { if (PI->first == Parent) { ParentValue = PI->second; break; } PhiInserter.AddAvailableValue(PI->first, PI->second); Dominator.addBlock(PI->first); } if (ParentValue) { Term->setCondition(ParentValue); } else { if (!Dominator.wasResultExplicitMentioned()) PhiInserter.AddAvailableValue(Dominator.getResult(), Default); Term->setCondition(PhiInserter.GetValueInMiddleOfBlock(Parent)); } } } /// \brief Remove all PHI values coming from "From" into "To" and remember /// them in DeletedPhis void StructurizeCFG::delPhiValues(BasicBlock *From, BasicBlock *To) { PhiMap &Map = DeletedPhis[To]; for (BasicBlock::iterator I = To->begin(), E = To->end(); I != E && isa<PHINode>(*I);) { PHINode &Phi = cast<PHINode>(*I++); while (Phi.getBasicBlockIndex(From) != -1) { Value *Deleted = Phi.removeIncomingValue(From, false); Map[&Phi].push_back(std::make_pair(From, Deleted)); } } } /// \brief Add a dummy PHI value as soon as we knew the new predecessor void StructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) { for (BasicBlock::iterator I = To->begin(), E = To->end(); I != E && isa<PHINode>(*I);) { PHINode &Phi = cast<PHINode>(*I++); Value *Undef = UndefValue::get(Phi.getType()); Phi.addIncoming(Undef, From); } AddedPhis[To].push_back(From); } /// \brief Add the real PHI value as soon as everything is set up void StructurizeCFG::setPhiValues() { SSAUpdater Updater; for (BB2BBVecMap::iterator AI = AddedPhis.begin(), AE = AddedPhis.end(); AI != AE; ++AI) { BasicBlock *To = AI->first; BBVector &From = AI->second; if (!DeletedPhis.count(To)) continue; PhiMap &Map = DeletedPhis[To]; for (PhiMap::iterator PI = Map.begin(), PE = Map.end(); PI != PE; ++PI) { PHINode *Phi = PI->first; Value *Undef = UndefValue::get(Phi->getType()); Updater.Initialize(Phi->getType(), ""); Updater.AddAvailableValue(&Func->getEntryBlock(), Undef); Updater.AddAvailableValue(To, Undef); NearestCommonDominator Dominator(DT); Dominator.addBlock(To, false); for (BBValueVector::iterator VI = PI->second.begin(), VE = PI->second.end(); VI != VE; ++VI) { Updater.AddAvailableValue(VI->first, VI->second); Dominator.addBlock(VI->first); } if (!Dominator.wasResultExplicitMentioned()) Updater.AddAvailableValue(Dominator.getResult(), Undef); for (BBVector::iterator FI = From.begin(), FE = From.end(); FI != FE; ++FI) { int Idx = Phi->getBasicBlockIndex(*FI); assert(Idx != -1); Phi->setIncomingValue(Idx, Updater.GetValueAtEndOfBlock(*FI)); } } DeletedPhis.erase(To); } assert(DeletedPhis.empty()); } /// \brief Remove phi values from all successors and then remove the terminator. void StructurizeCFG::killTerminator(BasicBlock *BB) { TerminatorInst *Term = BB->getTerminator(); if (!Term) return; for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) { delPhiValues(BB, *SI); } Term->eraseFromParent(); } /// \brief Let node exit(s) point to NewExit void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit, bool IncludeDominator) { if (Node->isSubRegion()) { Region *SubRegion = Node->getNodeAs<Region>(); BasicBlock *OldExit = SubRegion->getExit(); BasicBlock *Dominator = nullptr; // Find all the edges from the sub region to the exit for (pred_iterator I = pred_begin(OldExit), E = pred_end(OldExit); I != E;) { BasicBlock *BB = *I++; if (!SubRegion->contains(BB)) continue; // Modify the edges to point to the new exit delPhiValues(BB, OldExit); BB->getTerminator()->replaceUsesOfWith(OldExit, NewExit); addPhiValues(BB, NewExit); // Find the new dominator (if requested) if (IncludeDominator) { if (!Dominator) Dominator = BB; else Dominator = DT->findNearestCommonDominator(Dominator, BB); } } // Change the dominator (if requested) if (Dominator) DT->changeImmediateDominator(NewExit, Dominator); // Update the region info SubRegion->replaceExit(NewExit); } else { BasicBlock *BB = Node->getNodeAs<BasicBlock>(); killTerminator(BB); BranchInst::Create(NewExit, BB); addPhiValues(BB, NewExit); if (IncludeDominator) DT->changeImmediateDominator(NewExit, BB); } } /// \brief Create a new flow node and update dominator tree and region info BasicBlock *StructurizeCFG::getNextFlow(BasicBlock *Dominator) { LLVMContext &Context = Func->getContext(); BasicBlock *Insert = Order.empty() ? ParentRegion->getExit() : Order.back()->getEntry(); BasicBlock *Flow = BasicBlock::Create(Context, FlowBlockName, Func, Insert); DT->addNewBlock(Flow, Dominator); ParentRegion->getRegionInfo()->setRegionFor(Flow, ParentRegion); return Flow; } /// \brief Create a new or reuse the previous node as flow node BasicBlock *StructurizeCFG::needPrefix(bool NeedEmpty) { BasicBlock *Entry = PrevNode->getEntry(); if (!PrevNode->isSubRegion()) { killTerminator(Entry); if (!NeedEmpty || Entry->getFirstInsertionPt() == Entry->end()) return Entry; } // create a new flow node BasicBlock *Flow = getNextFlow(Entry); // and wire it up changeExit(PrevNode, Flow, true); PrevNode = ParentRegion->getBBNode(Flow); return Flow; } /// \brief Returns the region exit if possible, otherwise just a new flow node BasicBlock *StructurizeCFG::needPostfix(BasicBlock *Flow, bool ExitUseAllowed) { if (Order.empty() && ExitUseAllowed) { BasicBlock *Exit = ParentRegion->getExit(); DT->changeImmediateDominator(Exit, Flow); addPhiValues(Flow, Exit); return Exit; } return getNextFlow(Flow); } /// \brief Set the previous node void StructurizeCFG::setPrevNode(BasicBlock *BB) { PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB) : nullptr; } /// \brief Does BB dominate all the predicates of Node ? bool StructurizeCFG::dominatesPredicates(BasicBlock *BB, RegionNode *Node) { BBPredicates &Preds = Predicates[Node->getEntry()]; for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end(); PI != PE; ++PI) { if (!DT->dominates(BB, PI->first)) return false; } return true; } /// \brief Can we predict that this node will always be called? bool StructurizeCFG::isPredictableTrue(RegionNode *Node) { BBPredicates &Preds = Predicates[Node->getEntry()]; bool Dominated = false; // Regionentry is always true if (!PrevNode) return true; for (BBPredicates::iterator I = Preds.begin(), E = Preds.end(); I != E; ++I) { if (I->second != BoolTrue) return false; if (!Dominated && DT->dominates(I->first, PrevNode->getEntry())) Dominated = true; } // TODO: The dominator check is too strict return Dominated; } /// Take one node from the order vector and wire it up void StructurizeCFG::wireFlow(bool ExitUseAllowed, BasicBlock *LoopEnd) { RegionNode *Node = Order.pop_back_val(); Visited.insert(Node->getEntry()); if (isPredictableTrue(Node)) { // Just a linear flow if (PrevNode) { changeExit(PrevNode, Node->getEntry(), true); } PrevNode = Node; } else { // Insert extra prefix node (or reuse last one) BasicBlock *Flow = needPrefix(false); // Insert extra postfix node (or use exit instead) BasicBlock *Entry = Node->getEntry(); BasicBlock *Next = needPostfix(Flow, ExitUseAllowed); // let it point to entry and next block Conditions.push_back(BranchInst::Create(Entry, Next, BoolUndef, Flow)); addPhiValues(Flow, Entry); DT->changeImmediateDominator(Entry, Flow); PrevNode = Node; while (!Order.empty() && !Visited.count(LoopEnd) && dominatesPredicates(Entry, Order.back())) { handleLoops(false, LoopEnd); } changeExit(PrevNode, Next, false); setPrevNode(Next); } } void StructurizeCFG::handleLoops(bool ExitUseAllowed, BasicBlock *LoopEnd) { RegionNode *Node = Order.back(); BasicBlock *LoopStart = Node->getEntry(); if (!Loops.count(LoopStart)) { wireFlow(ExitUseAllowed, LoopEnd); return; } if (!isPredictableTrue(Node)) LoopStart = needPrefix(true); LoopEnd = Loops[Node->getEntry()]; wireFlow(false, LoopEnd); while (!Visited.count(LoopEnd)) { handleLoops(false, LoopEnd); } // If the start of the loop is the entry block, we can't branch to it so // insert a new dummy entry block. Function *LoopFunc = LoopStart->getParent(); if (LoopStart == &LoopFunc->getEntryBlock()) { LoopStart->setName("entry.orig"); BasicBlock *NewEntry = BasicBlock::Create(LoopStart->getContext(), "entry", LoopFunc, LoopStart); BranchInst::Create(LoopStart, NewEntry); } // Create an extra loop end node LoopEnd = needPrefix(false); BasicBlock *Next = needPostfix(LoopEnd, ExitUseAllowed); LoopConds.push_back(BranchInst::Create(Next, LoopStart, BoolUndef, LoopEnd)); addPhiValues(LoopEnd, LoopStart); setPrevNode(Next); } /// After this function control flow looks like it should be, but /// branches and PHI nodes only have undefined conditions. void StructurizeCFG::createFlow() { BasicBlock *Exit = ParentRegion->getExit(); bool EntryDominatesExit = DT->dominates(ParentRegion->getEntry(), Exit); DeletedPhis.clear(); AddedPhis.clear(); Conditions.clear(); LoopConds.clear(); PrevNode = nullptr; Visited.clear(); while (!Order.empty()) { handleLoops(EntryDominatesExit, nullptr); } if (PrevNode) changeExit(PrevNode, Exit, EntryDominatesExit); else assert(EntryDominatesExit); } /// Handle a rare case where the disintegrated nodes instructions /// no longer dominate all their uses. Not sure if this is really nessasary void StructurizeCFG::rebuildSSA() { SSAUpdater Updater; for (auto *BB : ParentRegion->blocks()) for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE; ++II) { bool Initialized = false; for (auto I = II->use_begin(), E = II->use_end(); I != E;) { Use &U = *I++; Instruction *User = cast<Instruction>(U.getUser()); if (User->getParent() == BB) { continue; } else if (PHINode *UserPN = dyn_cast<PHINode>(User)) { if (UserPN->getIncomingBlock(U) == BB) continue; } if (DT->dominates(II, User)) continue; if (!Initialized) { Value *Undef = UndefValue::get(II->getType()); Updater.Initialize(II->getType(), ""); Updater.AddAvailableValue(&Func->getEntryBlock(), Undef); Updater.AddAvailableValue(BB, II); Initialized = true; } Updater.RewriteUseAfterInsertions(U); } } } /// \brief Run the transformation for each region found bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) { if (R->isTopLevelRegion()) return false; Func = R->getEntry()->getParent(); ParentRegion = R; DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); orderNodes(); collectInfos(); createFlow(); insertConditions(false); insertConditions(true); setPhiValues(); rebuildSSA(); // Cleanup Order.clear(); Visited.clear(); DeletedPhis.clear(); AddedPhis.clear(); Predicates.clear(); Conditions.clear(); Loops.clear(); LoopPreds.clear(); LoopConds.clear(); return true; } /// \brief Create the pass Pass *llvm::createStructurizeCFGPass() { return new StructurizeCFG(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/DxilEliminateVector.cpp
/////////////////////////////////////////////////////////////////////////////// // // // DxilEliminateVector.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // A pass to remove vector instructions, especially in situations where // // optimizations are turned off. // // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/Pass.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Analysis/DxilValueCache.h" #include <vector> using namespace llvm; namespace { class DxilEliminateVector : public FunctionPass { public: static char ID; DxilEliminateVector() : FunctionPass(ID) { initializeDxilEliminateVectorPass(*PassRegistry::getPassRegistry()); } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DxilValueCache>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.setPreservesAll(); // DxilValueCache is safe. CFG is not changed, so DT // is okay. } bool TryRewriteDebugInfoForVector(InsertElementInst *IE); bool runOnFunction(Function &F) override; StringRef getPassName() const override { return "Dxil Eliminate Vector"; } }; char DxilEliminateVector::ID; } // namespace static MetadataAsValue *GetAsMetadata(Instruction *I) { if (auto *L = LocalAsMetadata::getIfExists(I)) { if (auto *DINode = MetadataAsValue::getIfExists(I->getContext(), L)) { return DINode; } } return nullptr; } static bool IsZeroInitializer(Value *V) { Constant *C = dyn_cast<Constant>(V); return C && C->isZeroValue(); } static bool CollectVectorElements(Value *V, SmallVector<Value *, 4> &Elements) { if (InsertElementInst *IE = dyn_cast<InsertElementInst>(V)) { Value *Vec = IE->getOperand(0); Value *Element = IE->getOperand(1); Value *Index = IE->getOperand(2); if (!isa<UndefValue>(Vec) && !IsZeroInitializer(Vec)) { if (!CollectVectorElements(Vec, Elements)) return false; } ConstantInt *ConstIndex = dyn_cast<ConstantInt>(Index); if (!ConstIndex) return false; uint64_t IdxValue = ConstIndex->getLimitedValue(); if (IdxValue < 4) { if (Elements.size() <= IdxValue) Elements.resize(IdxValue + 1); Elements[IdxValue] = Element; } return true; } return false; } bool DxilEliminateVector::TryRewriteDebugInfoForVector(InsertElementInst *IE) { // If this is not ever used as meta-data, there's no debug MetadataAsValue *DebugI = GetAsMetadata(IE); if (!DebugI) return false; // Collect @dbg.value instructions SmallVector<DbgValueInst *, 4> DbgValueInsts; for (User *U : DebugI->users()) { if (DbgValueInst *DbgValueI = dyn_cast<DbgValueInst>(U)) { DbgValueInsts.push_back(DbgValueI); } } if (!DbgValueInsts.size()) return false; SmallVector<Value *, 4> Elements; if (!CollectVectorElements(IE, Elements)) return false; DIBuilder DIB(*IE->getModule()); const DataLayout &DL = IE->getModule()->getDataLayout(); // Go through the elements and create @dbg.value with bit-piece // expressions for them. bool Changed = false; for (DbgValueInst *DVI : DbgValueInsts) { DIExpression *ParentExpr = DVI->getExpression(); unsigned BitpieceOffset = 0; if (ParentExpr->isBitPiece()) BitpieceOffset = ParentExpr->getBitPieceOffset(); for (unsigned i = 0; i < Elements.size(); i++) { if (!Elements[i]) continue; unsigned ElementSize = DL.getTypeSizeInBits(Elements[i]->getType()); unsigned ElementAlign = DL.getTypeAllocSizeInBits(Elements[i]->getType()); DIExpression *Expr = DIB.createBitPieceExpression( BitpieceOffset + i * ElementAlign, ElementSize); DIB.insertDbgValueIntrinsic(Elements[i], 0, DVI->getVariable(), Expr, DVI->getDebugLoc(), DVI); Changed = true; } DVI->eraseFromParent(); } return Changed; } bool DxilEliminateVector::runOnFunction(Function &F) { auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); DxilValueCache *DVC = &getAnalysis<DxilValueCache>(); std::vector<Instruction *> VectorInsts; std::vector<AllocaInst *> VectorAllocas; // Collect the vector insts and allocas. for (auto &BB : F) { for (auto &I : BB) if (isa<InsertElementInst>(&I) || isa<ExtractElementInst>(&I)) VectorInsts.push_back(&I); else if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { if (AI->getAllocatedType()->isVectorTy() && llvm::isAllocaPromotable(AI)) VectorAllocas.push_back(AI); } } if (!VectorInsts.size()) return false; bool Changed = false; // Promote the allocas if they exist. They could very well exist // because of precise. if (VectorAllocas.size()) { PromoteMemToReg(VectorAllocas, *DT); Changed = true; } // Iteratively try to remove them, untill all gone or unable to // do it anymore. unsigned Attempts = VectorInsts.size(); for (unsigned i = 0; i < Attempts; i++) { bool LocalChange = false; for (unsigned j = 0; j < VectorInsts.size();) { auto *I = VectorInsts[j]; bool Remove = false; if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) { TryRewriteDebugInfoForVector(IE); } if (Value *V = DVC->GetValue(I, DT)) { I->replaceAllUsesWith(V); Remove = true; } else if (I->user_empty()) { Remove = true; } // Do the remove if (Remove) { LocalChange = true; I->eraseFromParent(); VectorInsts.erase(VectorInsts.begin() + j); } else { j++; } } Changed |= LocalChange; if (!LocalChange) break; } return Changed; } Pass *llvm::createDxilEliminateVectorPass() { return new DxilEliminateVector(); } INITIALIZE_PASS(DxilEliminateVector, "dxil-elim-vector", "Dxil Eliminate Vectors", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
//===- MergedLoadStoreMotion.cpp - merge and hoist/sink load/stores -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // //! \file //! \brief This pass performs merges of loads and stores on both sides of a // diamond (hammock). It hoists the loads and sinks the stores. // // The algorithm iteratively hoists two loads to the same address out of a // diamond (hammock) and merges them into a single load in the header. Similar // it sinks and merges two stores to the tail block (footer). The algorithm // iterates over the instructions of one side of the diamond and attempts to // find a matching load/store on the other side. It hoists / sinks when it // thinks it safe to do so. This optimization helps with eg. hiding load // latencies, triggering if-conversion, and reducing static code size. // //===----------------------------------------------------------------------===// // // // Example: // Diamond shaped code before merge: // // header: // br %cond, label %if.then, label %if.else // + + // + + // + + // if.then: if.else: // %lt = load %addr_l %le = load %addr_l // <use %lt> <use %le> // <...> <...> // store %st, %addr_s store %se, %addr_s // br label %if.end br label %if.end // + + // + + // + + // if.end ("footer"): // <...> // // Diamond shaped code after merge: // // header: // %l = load %addr_l // br %cond, label %if.then, label %if.else // + + // + + // + + // if.then: if.else: // <use %l> <use %l> // <...> <...> // br label %if.end br label %if.end // + + // + + // + + // if.end ("footer"): // %s.sink = phi [%st, if.then], [%se, if.else] // <...> // store %s.sink, %addr_s // <...> // // //===----------------------- TODO -----------------------------------------===// // // 1) Generalize to regions other than diamonds // 2) Be more aggressive merging memory operations // Note that both changes require register pressure control // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include "llvm/Transforms/Utils/Local.h" #include <vector> using namespace llvm; #define DEBUG_TYPE "mldst-motion" //===----------------------------------------------------------------------===// // MergedLoadStoreMotion Pass //===----------------------------------------------------------------------===// namespace { class MergedLoadStoreMotion : public FunctionPass { AliasAnalysis *AA; MemoryDependenceAnalysis *MD; public: static char ID; // Pass identification, replacement for typeid explicit MergedLoadStoreMotion(void) : FunctionPass(ID), MD(nullptr), MagicCompileTimeControl(250) { initializeMergedLoadStoreMotionPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; private: // This transformation requires dominator postdominator info void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addRequired<AliasAnalysis>(); AU.addPreserved<MemoryDependenceAnalysis>(); AU.addPreserved<AliasAnalysis>(); } // Helper routines /// /// \brief Remove instruction from parent and update memory dependence /// analysis. /// void removeInstruction(Instruction *Inst); BasicBlock *getDiamondTail(BasicBlock *BB); bool isDiamondHead(BasicBlock *BB); // Routines for hoisting loads bool isLoadHoistBarrierInRange(const Instruction& Start, const Instruction& End, LoadInst* LI); LoadInst *canHoistFromBlock(BasicBlock *BB, LoadInst *LI); void hoistInstruction(BasicBlock *BB, Instruction *HoistCand, Instruction *ElseInst); bool isSafeToHoist(Instruction *I) const; bool hoistLoad(BasicBlock *BB, LoadInst *HoistCand, LoadInst *ElseInst); bool mergeLoads(BasicBlock *BB); // Routines for sinking stores StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI); PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1); bool isStoreSinkBarrierInRange(const Instruction &Start, const Instruction &End, MemoryLocation Loc); bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst); bool mergeStores(BasicBlock *BB); // The mergeLoad/Store algorithms could have Size0 * Size1 complexity, // where Size0 and Size1 are the #instructions on the two sides of // the diamond. The constant chosen here is arbitrary. Compiler Time // Control is enforced by the check Size0 * Size1 < MagicCompileTimeControl. const int MagicCompileTimeControl; }; char MergedLoadStoreMotion::ID = 0; } /// /// \brief createMergedLoadStoreMotionPass - The public interface to this file. /// FunctionPass *llvm::createMergedLoadStoreMotionPass() { return new MergedLoadStoreMotion(); } INITIALIZE_PASS_BEGIN(MergedLoadStoreMotion, "mldst-motion", "MergedLoadStoreMotion", false, false) INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(MergedLoadStoreMotion, "mldst-motion", "MergedLoadStoreMotion", false, false) /// /// \brief Remove instruction from parent and update memory dependence analysis. /// void MergedLoadStoreMotion::removeInstruction(Instruction *Inst) { // Notify the memory dependence analysis. if (MD) { MD->removeInstruction(Inst); if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) MD->invalidateCachedPointerInfo(LI->getPointerOperand()); if (Inst->getType()->getScalarType()->isPointerTy()) { MD->invalidateCachedPointerInfo(Inst); } } Inst->eraseFromParent(); } /// /// \brief Return tail block of a diamond. /// BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) { assert(isDiamondHead(BB) && "Basic block is not head of a diamond"); BranchInst *BI = (BranchInst *)(BB->getTerminator()); BasicBlock *Succ0 = BI->getSuccessor(0); BasicBlock *Tail = Succ0->getTerminator()->getSuccessor(0); return Tail; } /// /// \brief True when BB is the head of a diamond (hammock) /// bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) { if (!BB) return false; if (!isa<BranchInst>(BB->getTerminator())) return false; if (BB->getTerminator()->getNumSuccessors() != 2) return false; BranchInst *BI = (BranchInst *)(BB->getTerminator()); BasicBlock *Succ0 = BI->getSuccessor(0); BasicBlock *Succ1 = BI->getSuccessor(1); if (!Succ0->getSinglePredecessor() || Succ0->getTerminator()->getNumSuccessors() != 1) return false; if (!Succ1->getSinglePredecessor() || Succ1->getTerminator()->getNumSuccessors() != 1) return false; BasicBlock *Tail = Succ0->getTerminator()->getSuccessor(0); // Ignore triangles. if (Succ1->getTerminator()->getSuccessor(0) != Tail) return false; return true; } /// /// \brief True when instruction is a hoist barrier for a load /// /// Whenever an instruction could possibly modify the value /// being loaded or protect against the load from happening /// it is considered a hoist barrier. /// bool MergedLoadStoreMotion::isLoadHoistBarrierInRange(const Instruction& Start, const Instruction& End, LoadInst* LI) { MemoryLocation Loc = MemoryLocation::get(LI); return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::Mod); } /// /// \brief Decide if a load can be hoisted /// /// When there is a load in \p BB to the same address as \p LI /// and it can be hoisted from \p BB, return that load. /// Otherwise return Null. /// LoadInst *MergedLoadStoreMotion::canHoistFromBlock(BasicBlock *BB1, LoadInst *Load0) { for (BasicBlock::iterator BBI = BB1->begin(), BBE = BB1->end(); BBI != BBE; ++BBI) { Instruction *Inst = BBI; // Only merge and hoist loads when their result in used only in BB if (!isa<LoadInst>(Inst) || Inst->isUsedOutsideOfBlock(BB1)) continue; LoadInst *Load1 = dyn_cast<LoadInst>(Inst); BasicBlock *BB0 = Load0->getParent(); MemoryLocation Loc0 = MemoryLocation::get(Load0); MemoryLocation Loc1 = MemoryLocation::get(Load1); if (AA->isMustAlias(Loc0, Loc1) && Load0->isSameOperationAs(Load1) && !isLoadHoistBarrierInRange(BB1->front(), *Load1, Load1) && !isLoadHoistBarrierInRange(BB0->front(), *Load0, Load0)) { return Load1; } } return nullptr; } /// /// \brief Merge two equivalent instructions \p HoistCand and \p ElseInst into /// \p BB /// /// BB is the head of a diamond /// void MergedLoadStoreMotion::hoistInstruction(BasicBlock *BB, Instruction *HoistCand, Instruction *ElseInst) { DEBUG(dbgs() << " Hoist Instruction into BB \n"; BB->dump(); dbgs() << "Instruction Left\n"; HoistCand->dump(); dbgs() << "\n"; dbgs() << "Instruction Right\n"; ElseInst->dump(); dbgs() << "\n"); // Hoist the instruction. assert(HoistCand->getParent() != BB); // Intersect optional metadata. HoistCand->intersectOptionalDataWith(ElseInst); combineMetadata(HoistCand, ElseInst, None); // HLSL Change: Preserve DXIL metadata // Prepend point for instruction insert Instruction *HoistPt = BB->getTerminator(); // Merged instruction Instruction *HoistedInst = HoistCand->clone(); // Hoist instruction. HoistedInst->insertBefore(HoistPt); HoistCand->replaceAllUsesWith(HoistedInst); removeInstruction(HoistCand); // Replace the else block instruction. ElseInst->replaceAllUsesWith(HoistedInst); removeInstruction(ElseInst); } /// /// \brief Return true if no operand of \p I is defined in I's parent block /// bool MergedLoadStoreMotion::isSafeToHoist(Instruction *I) const { BasicBlock *Parent = I->getParent(); for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { Instruction *Instr = dyn_cast<Instruction>(I->getOperand(i)); if (Instr && Instr->getParent() == Parent) return false; } return true; } /// /// \brief Merge two equivalent loads and GEPs and hoist into diamond head /// bool MergedLoadStoreMotion::hoistLoad(BasicBlock *BB, LoadInst *L0, LoadInst *L1) { // Only one definition? Instruction *A0 = dyn_cast<Instruction>(L0->getPointerOperand()); Instruction *A1 = dyn_cast<Instruction>(L1->getPointerOperand()); if (A0 && A1 && A0->isIdenticalTo(A1) && isSafeToHoist(A0) && A0->hasOneUse() && (A0->getParent() == L0->getParent()) && A1->hasOneUse() && (A1->getParent() == L1->getParent()) && isa<GetElementPtrInst>(A0)) { DEBUG(dbgs() << "Hoist Instruction into BB \n"; BB->dump(); dbgs() << "Instruction Left\n"; L0->dump(); dbgs() << "\n"; dbgs() << "Instruction Right\n"; L1->dump(); dbgs() << "\n"); hoistInstruction(BB, A0, A1); hoistInstruction(BB, L0, L1); return true; } else return false; } /// /// \brief Try to hoist two loads to same address into diamond header /// /// Starting from a diamond head block, iterate over the instructions in one /// successor block and try to match a load in the second successor. /// bool MergedLoadStoreMotion::mergeLoads(BasicBlock *BB) { bool MergedLoads = false; assert(isDiamondHead(BB)); BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); BasicBlock *Succ0 = BI->getSuccessor(0); BasicBlock *Succ1 = BI->getSuccessor(1); // #Instructions in Succ1 for Compile Time Control // int Size1 = Succ1->size(); // HLSL Change int Size1 = Succ1->compute_size_no_dbg(); // HLSL Change int NLoads = 0; for (BasicBlock::iterator BBI = Succ0->begin(), BBE = Succ0->end(); BBI != BBE;) { Instruction *I = BBI; ++BBI; // Only move non-simple (atomic, volatile) loads. LoadInst *L0 = dyn_cast<LoadInst>(I); if (!L0 || !L0->isSimple() || L0->isUsedOutsideOfBlock(Succ0)) continue; ++NLoads; if (NLoads * Size1 >= MagicCompileTimeControl) break; if (LoadInst *L1 = canHoistFromBlock(Succ1, L0)) { bool Res = hoistLoad(BB, L0, L1); MergedLoads |= Res; // Don't attempt to hoist above loads that had not been hoisted. if (!Res) break; } } return MergedLoads; } /// /// \brief True when instruction is a sink barrier for a store /// located in Loc /// /// Whenever an instruction could possibly read or modify the /// value being stored or protect against the store from /// happening it is considered a sink barrier. /// bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start, const Instruction &End, MemoryLocation Loc) { return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::ModRef); } /// /// \brief Check if \p BB contains a store to the same address as \p SI /// /// \return The store in \p when it is safe to sink. Otherwise return Null. /// StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1, StoreInst *Store0) { DEBUG(dbgs() << "can Sink? : "; Store0->dump(); dbgs() << "\n"); BasicBlock *BB0 = Store0->getParent(); for (BasicBlock::reverse_iterator RBI = BB1->rbegin(), RBE = BB1->rend(); RBI != RBE; ++RBI) { Instruction *Inst = &*RBI; if (!isa<StoreInst>(Inst)) continue; StoreInst *Store1 = cast<StoreInst>(Inst); MemoryLocation Loc0 = MemoryLocation::get(Store0); MemoryLocation Loc1 = MemoryLocation::get(Store1); if (AA->isMustAlias(Loc0, Loc1) && Store0->isSameOperationAs(Store1) && !isStoreSinkBarrierInRange(*(std::next(BasicBlock::iterator(Store1))), BB1->back(), Loc1) && !isStoreSinkBarrierInRange(*(std::next(BasicBlock::iterator(Store0))), BB0->back(), Loc0)) { return Store1; } } return nullptr; } /// /// \brief Create a PHI node in BB for the operands of S0 and S1 /// PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1) { // Create a phi if the values mismatch. PHINode *NewPN = 0; Value *Opd1 = S0->getValueOperand(); Value *Opd2 = S1->getValueOperand(); if (Opd1 != Opd2) { NewPN = PHINode::Create(Opd1->getType(), 2, Opd2->getName() + ".sink", BB->begin()); NewPN->addIncoming(Opd1, S0->getParent()); NewPN->addIncoming(Opd2, S1->getParent()); if (NewPN->getType()->getScalarType()->isPointerTy()) { // AA needs to be informed when a PHI-use of the pointer value is added for (unsigned I = 0, E = NewPN->getNumIncomingValues(); I != E; ++I) { unsigned J = PHINode::getOperandNumForIncomingValue(I); AA->addEscapingUse(NewPN->getOperandUse(J)); } if (MD) MD->invalidateCachedPointerInfo(NewPN); } } return NewPN; } /// /// \brief Merge two stores to same address and sink into \p BB /// /// Also sinks GEP instruction computing the store address /// bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0, StoreInst *S1) { // Only one definition? Instruction *A0 = dyn_cast<Instruction>(S0->getPointerOperand()); Instruction *A1 = dyn_cast<Instruction>(S1->getPointerOperand()); if (A0 && A1 && A0->isIdenticalTo(A1) && A0->hasOneUse() && (A0->getParent() == S0->getParent()) && A1->hasOneUse() && (A1->getParent() == S1->getParent()) && isa<GetElementPtrInst>(A0)) { DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump(); dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n"; dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n"); // Hoist the instruction. BasicBlock::iterator InsertPt = BB->getFirstInsertionPt(); // Intersect optional metadata. S0->intersectOptionalDataWith(S1); combineMetadata(S0, S1, None); // HLSL Change: Preserve DXIL metadata // Create the new store to be inserted at the join point. StoreInst *SNew = (StoreInst *)(S0->clone()); Instruction *ANew = A0->clone(); SNew->insertBefore(InsertPt); ANew->insertBefore(SNew); assert(S0->getParent() == A0->getParent()); assert(S1->getParent() == A1->getParent()); PHINode *NewPN = getPHIOperand(BB, S0, S1); // New PHI operand? Use it. if (NewPN) SNew->setOperand(0, NewPN); removeInstruction(S0); removeInstruction(S1); A0->replaceAllUsesWith(ANew); removeInstruction(A0); A1->replaceAllUsesWith(ANew); removeInstruction(A1); return true; } return false; } /// /// \brief True when two stores are equivalent and can sink into the footer /// /// Starting from a diamond tail block, iterate over the instructions in one /// predecessor block and try to match a store in the second predecessor. /// bool MergedLoadStoreMotion::mergeStores(BasicBlock *T) { bool MergedStores = false; assert(T && "Footer of a diamond cannot be empty"); pred_iterator PI = pred_begin(T), E = pred_end(T); assert(PI != E); BasicBlock *Pred0 = *PI; ++PI; BasicBlock *Pred1 = *PI; ++PI; // tail block of a diamond/hammock? if (Pred0 == Pred1) return false; // No. if (PI != E) return false; // No. More than 2 predecessors. // #Instructions in Succ1 for Compile Time Control // int Size1 = Succ1->size(); // HLSL Change int Size1 = Pred1->compute_size_no_dbg(); // HLSL Change int NStores = 0; for (BasicBlock::reverse_iterator RBI = Pred0->rbegin(), RBE = Pred0->rend(); RBI != RBE;) { Instruction *I = &*RBI; ++RBI; // Sink move non-simple (atomic, volatile) stores if (!isa<StoreInst>(I)) continue; StoreInst *S0 = (StoreInst *)I; if (!S0->isSimple()) continue; ++NStores; if (NStores * Size1 >= MagicCompileTimeControl) break; if (StoreInst *S1 = canSinkFromBlock(Pred1, S0)) { bool Res = sinkStore(T, S0, S1); MergedStores |= Res; // Don't attempt to sink below stores that had to stick around // But after removal of a store and some of its feeding // instruction search again from the beginning since the iterator // is likely stale at this point. if (!Res) break; else { RBI = Pred0->rbegin(); RBE = Pred0->rend(); DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump()); } } } return MergedStores; } /// /// \brief Run the transformation for each function /// bool MergedLoadStoreMotion::runOnFunction(Function &F) { MD = getAnalysisIfAvailable<MemoryDependenceAnalysis>(); AA = &getAnalysis<AliasAnalysis>(); bool Changed = false; DEBUG(dbgs() << "Instruction Merger\n"); // Merge unconditional branches, allowing PRE to catch more // optimization opportunities. for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE;) { BasicBlock *BB = FI++; // Hoist equivalent loads and sink stores // outside diamonds when possible if (isDiamondHead(BB)) { Changed |= mergeLoads(BB); Changed |= mergeStores(getDiamondTail(BB)); } } return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/ConstantHoisting.cpp
//===- ConstantHoisting.cpp - Prepare code for expensive constants --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass identifies expensive constants to hoist and coalesces them to // better prepare it for SelectionDAG-based code generation. This works around // the limitations of the basic-block-at-a-time approach. // // First it scans all instructions for integer constants and calculates its // cost. If the constant can be folded into the instruction (the cost is // TCC_Free) or the cost is just a simple operation (TCC_BASIC), then we don't // consider it expensive and leave it alone. This is the default behavior and // the default implementation of getIntImmCost will always return TCC_Free. // // If the cost is more than TCC_BASIC, then the integer constant can't be folded // into the instruction and it might be beneficial to hoist the constant. // Similar constants are coalesced to reduce register pressure and // materialization code. // // When a constant is hoisted, it is also hidden behind a bitcast to force it to // be live-out of the basic block. Otherwise the constant would be just // duplicated and each basic block would have its own copy in the SelectionDAG. // The SelectionDAG recognizes such constants as opaque and doesn't perform // certain transformations on them, which would create a new expensive constant. // // This optimization is only applied to integer constants in instructions and // simple (this means not nested) constant cast expressions. For example: // %0 = load i64* inttoptr (i64 big_constant to i64*) //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <tuple> using namespace llvm; #define DEBUG_TYPE "consthoist" STATISTIC(NumConstantsHoisted, "Number of constants hoisted"); STATISTIC(NumConstantsRebased, "Number of constants rebased"); namespace { struct ConstantUser; struct RebasedConstantInfo; typedef SmallVector<ConstantUser, 8> ConstantUseListType; typedef SmallVector<RebasedConstantInfo, 4> RebasedConstantListType; /// \brief Keeps track of the user of a constant and the operand index where the /// constant is used. struct ConstantUser { Instruction *Inst; unsigned OpndIdx; ConstantUser(Instruction *Inst, unsigned Idx) : Inst(Inst), OpndIdx(Idx) { } }; /// \brief Keeps track of a constant candidate and its uses. struct ConstantCandidate { ConstantUseListType Uses; ConstantInt *ConstInt; unsigned CumulativeCost; ConstantCandidate(ConstantInt *ConstInt) : ConstInt(ConstInt), CumulativeCost(0) { } /// \brief Add the user to the use list and update the cost. void addUser(Instruction *Inst, unsigned Idx, unsigned Cost) { CumulativeCost += Cost; Uses.push_back(ConstantUser(Inst, Idx)); } }; /// \brief This represents a constant that has been rebased with respect to a /// base constant. The difference to the base constant is recorded in Offset. struct RebasedConstantInfo { ConstantUseListType Uses; Constant *Offset; RebasedConstantInfo(ConstantUseListType &&Uses, Constant *Offset) : Uses(std::move(Uses)), Offset(Offset) { } }; /// \brief A base constant and all its rebased constants. struct ConstantInfo { ConstantInt *BaseConstant; RebasedConstantListType RebasedConstants; }; /// \brief The constant hoisting pass. class ConstantHoisting : public FunctionPass { typedef DenseMap<ConstantInt *, unsigned> ConstCandMapType; typedef std::vector<ConstantCandidate> ConstCandVecType; const TargetTransformInfo *TTI; DominatorTree *DT; BasicBlock *Entry; /// Keeps track of constant candidates found in the function. ConstCandVecType ConstCandVec; /// Keep track of cast instructions we already cloned. SmallDenseMap<Instruction *, Instruction *> ClonedCastMap; /// These are the final constants we decided to hoist. SmallVector<ConstantInfo, 8> ConstantVec; public: static char ID; // Pass identification, replacement for typeid ConstantHoisting() : FunctionPass(ID), TTI(nullptr), DT(nullptr), Entry(nullptr) { initializeConstantHoistingPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &Fn) override; StringRef getPassName() const override { return "Constant Hoisting"; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<TargetTransformInfoWrapperPass>(); } private: /// \brief Initialize the pass. void setup(Function &Fn) { DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(Fn); Entry = &Fn.getEntryBlock(); } /// \brief Cleanup. void cleanup() { ConstantVec.clear(); ClonedCastMap.clear(); ConstCandVec.clear(); TTI = nullptr; DT = nullptr; Entry = nullptr; } Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const; Instruction *findConstantInsertionPoint(const ConstantInfo &ConstInfo) const; void collectConstantCandidates(ConstCandMapType &ConstCandMap, Instruction *Inst, unsigned Idx, ConstantInt *ConstInt); void collectConstantCandidates(ConstCandMapType &ConstCandMap, Instruction *Inst); void collectConstantCandidates(Function &Fn); void findAndMakeBaseConstant(ConstCandVecType::iterator S, ConstCandVecType::iterator E); void findBaseConstants(); void emitBaseConstants(Instruction *Base, Constant *Offset, const ConstantUser &ConstUser); bool emitBaseConstants(); void deleteDeadCastInst() const; bool optimizeConstants(Function &Fn); }; } char ConstantHoisting::ID = 0; INITIALIZE_PASS_BEGIN(ConstantHoisting, "consthoist", "Constant Hoisting", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(ConstantHoisting, "consthoist", "Constant Hoisting", false, false) FunctionPass *llvm::createConstantHoistingPass() { return new ConstantHoisting(); } /// \brief Perform the constant hoisting optimization for the given function. bool ConstantHoisting::runOnFunction(Function &Fn) { if (skipOptnoneFunction(Fn)) return false; DEBUG(dbgs() << "********** Begin Constant Hoisting **********\n"); DEBUG(dbgs() << "********** Function: " << Fn.getName() << '\n'); setup(Fn); bool MadeChange = optimizeConstants(Fn); if (MadeChange) { DEBUG(dbgs() << "********** Function after Constant Hoisting: " << Fn.getName() << '\n'); DEBUG(dbgs() << Fn); } DEBUG(dbgs() << "********** End Constant Hoisting **********\n"); cleanup(); return MadeChange; } /// \brief Find the constant materialization insertion point. Instruction *ConstantHoisting::findMatInsertPt(Instruction *Inst, unsigned Idx) const { // If the operand is a cast instruction, then we have to materialize the // constant before the cast instruction. if (Idx != ~0U) { Value *Opnd = Inst->getOperand(Idx); if (auto CastInst = dyn_cast<Instruction>(Opnd)) if (CastInst->isCast()) return CastInst; } // The simple and common case. This also includes constant expressions. if (!isa<PHINode>(Inst) && !isa<LandingPadInst>(Inst)) return Inst; // We can't insert directly before a phi node or landing pad. Insert before // the terminator of the incoming or dominating block. assert(Entry != Inst->getParent() && "PHI or landing pad in entry block!"); if (Idx != ~0U && isa<PHINode>(Inst)) return cast<PHINode>(Inst)->getIncomingBlock(Idx)->getTerminator(); BasicBlock *IDom = DT->getNode(Inst->getParent())->getIDom()->getBlock(); return IDom->getTerminator(); } /// \brief Find an insertion point that dominates all uses. Instruction *ConstantHoisting:: findConstantInsertionPoint(const ConstantInfo &ConstInfo) const { assert(!ConstInfo.RebasedConstants.empty() && "Invalid constant info entry."); // Collect all basic blocks. SmallPtrSet<BasicBlock *, 8> BBs; for (auto const &RCI : ConstInfo.RebasedConstants) for (auto const &U : RCI.Uses) BBs.insert(findMatInsertPt(U.Inst, U.OpndIdx)->getParent()); if (BBs.count(Entry)) return &Entry->front(); while (BBs.size() >= 2) { BasicBlock *BB, *BB1, *BB2; BB1 = *BBs.begin(); BB2 = *std::next(BBs.begin()); BB = DT->findNearestCommonDominator(BB1, BB2); if (BB == Entry) return &Entry->front(); BBs.erase(BB1); BBs.erase(BB2); BBs.insert(BB); } assert((BBs.size() == 1) && "Expected only one element."); Instruction &FirstInst = (*BBs.begin())->front(); return findMatInsertPt(&FirstInst); } /// \brief Record constant integer ConstInt for instruction Inst at operand /// index Idx. /// /// The operand at index Idx is not necessarily the constant integer itself. It /// could also be a cast instruction or a constant expression that uses the // constant integer. void ConstantHoisting::collectConstantCandidates(ConstCandMapType &ConstCandMap, Instruction *Inst, unsigned Idx, ConstantInt *ConstInt) { unsigned Cost; // Ask the target about the cost of materializing the constant for the given // instruction and operand index. if (auto IntrInst = dyn_cast<IntrinsicInst>(Inst)) Cost = TTI->getIntImmCost(IntrInst->getIntrinsicID(), Idx, ConstInt->getValue(), ConstInt->getType()); else Cost = TTI->getIntImmCost(Inst->getOpcode(), Idx, ConstInt->getValue(), ConstInt->getType()); // Ignore cheap integer constants. if (Cost > TargetTransformInfo::TCC_Basic) { ConstCandMapType::iterator Itr; bool Inserted; std::tie(Itr, Inserted) = ConstCandMap.insert(std::make_pair(ConstInt, 0)); if (Inserted) { ConstCandVec.push_back(ConstantCandidate(ConstInt)); Itr->second = ConstCandVec.size() - 1; } ConstCandVec[Itr->second].addUser(Inst, Idx, Cost); DEBUG(if (isa<ConstantInt>(Inst->getOperand(Idx))) dbgs() << "Collect constant " << *ConstInt << " from " << *Inst << " with cost " << Cost << '\n'; else dbgs() << "Collect constant " << *ConstInt << " indirectly from " << *Inst << " via " << *Inst->getOperand(Idx) << " with cost " << Cost << '\n'; ); } } /// \brief Scan the instruction for expensive integer constants and record them /// in the constant candidate vector. void ConstantHoisting::collectConstantCandidates(ConstCandMapType &ConstCandMap, Instruction *Inst) { // Skip all cast instructions. They are visited indirectly later on. if (Inst->isCast()) return; // Can't handle inline asm. Skip it. if (auto Call = dyn_cast<CallInst>(Inst)) if (isa<InlineAsm>(Call->getCalledValue())) return; // Scan all operands. for (unsigned Idx = 0, E = Inst->getNumOperands(); Idx != E; ++Idx) { Value *Opnd = Inst->getOperand(Idx); // Visit constant integers. if (auto ConstInt = dyn_cast<ConstantInt>(Opnd)) { collectConstantCandidates(ConstCandMap, Inst, Idx, ConstInt); continue; } // Visit cast instructions that have constant integers. if (auto CastInst = dyn_cast<Instruction>(Opnd)) { // Only visit cast instructions, which have been skipped. All other // instructions should have already been visited. if (!CastInst->isCast()) continue; if (auto *ConstInt = dyn_cast<ConstantInt>(CastInst->getOperand(0))) { // Pretend the constant is directly used by the instruction and ignore // the cast instruction. collectConstantCandidates(ConstCandMap, Inst, Idx, ConstInt); continue; } } // Visit constant expressions that have constant integers. if (auto ConstExpr = dyn_cast<ConstantExpr>(Opnd)) { // Only visit constant cast expressions. if (!ConstExpr->isCast()) continue; if (auto ConstInt = dyn_cast<ConstantInt>(ConstExpr->getOperand(0))) { // Pretend the constant is directly used by the instruction and ignore // the constant expression. collectConstantCandidates(ConstCandMap, Inst, Idx, ConstInt); continue; } } } // end of for all operands } /// \brief Collect all integer constants in the function that cannot be folded /// into an instruction itself. void ConstantHoisting::collectConstantCandidates(Function &Fn) { ConstCandMapType ConstCandMap; for (Function::iterator BB : Fn) for (BasicBlock::iterator Inst : *BB) collectConstantCandidates(ConstCandMap, Inst); } /// \brief Find the base constant within the given range and rebase all other /// constants with respect to the base constant. void ConstantHoisting::findAndMakeBaseConstant(ConstCandVecType::iterator S, ConstCandVecType::iterator E) { auto MaxCostItr = S; unsigned NumUses = 0; // Use the constant that has the maximum cost as base constant. for (auto ConstCand = S; ConstCand != E; ++ConstCand) { NumUses += ConstCand->Uses.size(); if (ConstCand->CumulativeCost > MaxCostItr->CumulativeCost) MaxCostItr = ConstCand; } // Don't hoist constants that have only one use. if (NumUses <= 1) return; ConstantInfo ConstInfo; ConstInfo.BaseConstant = MaxCostItr->ConstInt; Type *Ty = ConstInfo.BaseConstant->getType(); // Rebase the constants with respect to the base constant. for (auto ConstCand = S; ConstCand != E; ++ConstCand) { APInt Diff = ConstCand->ConstInt->getValue() - ConstInfo.BaseConstant->getValue(); Constant *Offset = Diff == 0 ? nullptr : ConstantInt::get(Ty, Diff); ConstInfo.RebasedConstants.push_back( RebasedConstantInfo(std::move(ConstCand->Uses), Offset)); } ConstantVec.push_back(std::move(ConstInfo)); } /// \brief Finds and combines constant candidates that can be easily /// rematerialized with an add from a common base constant. void ConstantHoisting::findBaseConstants() { // Sort the constants by value and type. This invalidates the mapping! std::sort(ConstCandVec.begin(), ConstCandVec.end(), [](const ConstantCandidate &LHS, const ConstantCandidate &RHS) { if (LHS.ConstInt->getType() != RHS.ConstInt->getType()) return LHS.ConstInt->getType()->getBitWidth() < RHS.ConstInt->getType()->getBitWidth(); return LHS.ConstInt->getValue().ult(RHS.ConstInt->getValue()); }); // Simple linear scan through the sorted constant candidate vector for viable // merge candidates. auto MinValItr = ConstCandVec.begin(); for (auto CC = std::next(ConstCandVec.begin()), E = ConstCandVec.end(); CC != E; ++CC) { if (MinValItr->ConstInt->getType() == CC->ConstInt->getType()) { // Check if the constant is in range of an add with immediate. APInt Diff = CC->ConstInt->getValue() - MinValItr->ConstInt->getValue(); if ((Diff.getBitWidth() <= 64) && TTI->isLegalAddImmediate(Diff.getSExtValue())) continue; } // We either have now a different constant type or the constant is not in // range of an add with immediate anymore. findAndMakeBaseConstant(MinValItr, CC); // Start a new base constant search. MinValItr = CC; } // Finalize the last base constant search. findAndMakeBaseConstant(MinValItr, ConstCandVec.end()); } /// \brief Updates the operand at Idx in instruction Inst with the result of /// instruction Mat. If the instruction is a PHI node then special /// handling for duplicate values form the same incomming basic block is /// required. /// \return The update will always succeed, but the return value indicated if /// Mat was used for the update or not. static bool updateOperand(Instruction *Inst, unsigned Idx, Instruction *Mat) { if (auto PHI = dyn_cast<PHINode>(Inst)) { // Check if any previous operand of the PHI node has the same incoming basic // block. This is a very odd case that happens when the incoming basic block // has a switch statement. In this case use the same value as the previous // operand(s), otherwise we will fail verification due to different values. // The values are actually the same, but the variable names are different // and the verifier doesn't like that. BasicBlock *IncomingBB = PHI->getIncomingBlock(Idx); for (unsigned i = 0; i < Idx; ++i) { if (PHI->getIncomingBlock(i) == IncomingBB) { Value *IncomingVal = PHI->getIncomingValue(i); Inst->setOperand(Idx, IncomingVal); return false; } } } Inst->setOperand(Idx, Mat); return true; } /// \brief Emit materialization code for all rebased constants and update their /// users. void ConstantHoisting::emitBaseConstants(Instruction *Base, Constant *Offset, const ConstantUser &ConstUser) { Instruction *Mat = Base; if (Offset) { Instruction *InsertionPt = findMatInsertPt(ConstUser.Inst, ConstUser.OpndIdx); Mat = BinaryOperator::Create(Instruction::Add, Base, Offset, "const_mat", InsertionPt); DEBUG(dbgs() << "Materialize constant (" << *Base->getOperand(0) << " + " << *Offset << ") in BB " << Mat->getParent()->getName() << '\n' << *Mat << '\n'); Mat->setDebugLoc(ConstUser.Inst->getDebugLoc()); } Value *Opnd = ConstUser.Inst->getOperand(ConstUser.OpndIdx); // Visit constant integer. if (isa<ConstantInt>(Opnd)) { DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, Mat) && Offset) Mat->eraseFromParent(); DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); return; } // Visit cast instruction. if (auto CastInst = dyn_cast<Instruction>(Opnd)) { assert(CastInst->isCast() && "Expected an cast instruction!"); // Check if we already have visited this cast instruction before to avoid // unnecessary cloning. Instruction *&ClonedCastInst = ClonedCastMap[CastInst]; if (!ClonedCastInst) { ClonedCastInst = CastInst->clone(); ClonedCastInst->setOperand(0, Mat); ClonedCastInst->insertAfter(CastInst); // Use the same debug location as the original cast instruction. ClonedCastInst->setDebugLoc(CastInst->getDebugLoc()); DEBUG(dbgs() << "Clone instruction: " << *CastInst << '\n' << "To : " << *ClonedCastInst << '\n'); } DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ClonedCastInst); DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); return; } // Visit constant expression. if (auto ConstExpr = dyn_cast<ConstantExpr>(Opnd)) { Instruction *ConstExprInst = ConstExpr->getAsInstruction(); ConstExprInst->setOperand(0, Mat); ConstExprInst->insertBefore(findMatInsertPt(ConstUser.Inst, ConstUser.OpndIdx)); // Use the same debug location as the instruction we are about to update. ConstExprInst->setDebugLoc(ConstUser.Inst->getDebugLoc()); DEBUG(dbgs() << "Create instruction: " << *ConstExprInst << '\n' << "From : " << *ConstExpr << '\n'); DEBUG(dbgs() << "Update: " << *ConstUser.Inst << '\n'); if (!updateOperand(ConstUser.Inst, ConstUser.OpndIdx, ConstExprInst)) { ConstExprInst->eraseFromParent(); if (Offset) Mat->eraseFromParent(); } DEBUG(dbgs() << "To : " << *ConstUser.Inst << '\n'); return; } } /// \brief Hoist and hide the base constant behind a bitcast and emit /// materialization code for derived constants. bool ConstantHoisting::emitBaseConstants() { bool MadeChange = false; for (auto const &ConstInfo : ConstantVec) { // Hoist and hide the base constant behind a bitcast. Instruction *IP = findConstantInsertionPoint(ConstInfo); IntegerType *Ty = ConstInfo.BaseConstant->getType(); Instruction *Base = new BitCastInst(ConstInfo.BaseConstant, Ty, "const", IP); DEBUG(dbgs() << "Hoist constant (" << *ConstInfo.BaseConstant << ") to BB " << IP->getParent()->getName() << '\n' << *Base << '\n'); NumConstantsHoisted++; // Emit materialization code for all rebased constants. for (auto const &RCI : ConstInfo.RebasedConstants) { NumConstantsRebased++; for (auto const &U : RCI.Uses) emitBaseConstants(Base, RCI.Offset, U); } // Use the same debug location as the last user of the constant. assert(!Base->use_empty() && "The use list is empty!?"); assert(isa<Instruction>(Base->user_back()) && "All uses should be instructions."); Base->setDebugLoc(cast<Instruction>(Base->user_back())->getDebugLoc()); // Correct for base constant, which we counted above too. NumConstantsRebased--; MadeChange = true; } return MadeChange; } /// \brief Check all cast instructions we made a copy of and remove them if they /// have no more users. void ConstantHoisting::deleteDeadCastInst() const { for (auto const &I : ClonedCastMap) if (I.first->use_empty()) I.first->eraseFromParent(); } /// \brief Optimize expensive integer constants in the given function. bool ConstantHoisting::optimizeConstants(Function &Fn) { // Collect all constant candidates. collectConstantCandidates(Fn); // There are no constant candidates to worry about. if (ConstCandVec.empty()) return false; // Combine constants that can be easily materialized with an add from a common // base constant. findBaseConstants(); // There are no constants to emit. if (ConstantVec.empty()) return false; // Finally hoist the base constant and emit materialization code for dependent // constants. bool MadeChange = emitBaseConstants(); // Cleanup dead instructions. deleteDeadCastInst(); return MadeChange; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/MemCpyOptimizer.cpp
//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass performs various transformations related to eliminating memcpy // calls, or transforming sets of stores into memset's. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Local.h" #include <list> using namespace llvm; #define DEBUG_TYPE "memcpyopt" STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); STATISTIC(NumMemSetInfer, "Number of memsets inferred"); STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, bool &VariableIdxFound, const DataLayout &DL) { // Skip over the first indices. gep_type_iterator GTI = gep_type_begin(GEP); for (unsigned i = 1; i != Idx; ++i, ++GTI) /*skip along*/; // Compute the offset implied by the rest of the indices. int64_t Offset = 0; for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); if (!OpC) return VariableIdxFound = true; if (OpC->isZero()) continue; // No offset. // Handle struct indices, which add their field offset to the pointer. if (StructType *STy = dyn_cast<StructType>(*GTI)) { Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); continue; } // Otherwise, we have a sequential type like an array or vector. Multiply // the index by the ElementSize. uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); Offset += Size*OpC->getSExtValue(); } return Offset; } /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a /// constant offset, and return that constant offset. For example, Ptr1 might /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, const DataLayout &DL) { Ptr1 = Ptr1->stripPointerCasts(); Ptr2 = Ptr2->stripPointerCasts(); // Handle the trivial case first. if (Ptr1 == Ptr2) { Offset = 0; return true; } GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); bool VariableIdxFound = false; // If one pointer is a GEP and the other isn't, then see if the GEP is a // constant offset from the base, as in "P" and "gep P, 1". if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL); return !VariableIdxFound; } if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL); return !VariableIdxFound; } // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical // base. After that base, they may have some number of common (and // potentially variable) indices. After that they handle some constant // offset, which determines their offset from each other. At this point, we // handle no other case. if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) return false; // Skip any common indices and track the GEP types. unsigned Idx = 1; for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) break; int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL); if (VariableIdxFound) return false; Offset = Offset2-Offset1; return true; } /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. /// This allows us to analyze stores like: /// store 0 -> P+1 /// store 0 -> P+0 /// store 0 -> P+3 /// store 0 -> P+2 /// which sometimes happens with stores to arrays of structs etc. When we see /// the first store, we make a range [1, 2). The second store extends the range /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the /// two ranges into [0, 3) which is memset'able. namespace { struct MemsetRange { // Start/End - A semi range that describes the span that this range covers. // The range is closed at the start and open at the end: [Start, End). int64_t Start, End; /// StartPtr - The getelementptr instruction that points to the start of the /// range. Value *StartPtr; /// Alignment - The known alignment of the first store. unsigned Alignment; /// TheStores - The actual stores that make up this range. SmallVector<Instruction*, 16> TheStores; bool isProfitableToUseMemset(const DataLayout &DL) const; }; } // end anon namespace bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { // If we found more than 4 stores to merge or 16 bytes, use memset. if (TheStores.size() >= 4 || End-Start >= 16) return true; // If there is nothing to merge, don't do anything. if (TheStores.size() < 2) return false; // If any of the stores are a memset, then it is always good to extend the // memset. for (unsigned i = 0, e = TheStores.size(); i != e; ++i) if (!isa<StoreInst>(TheStores[i])) return true; // Assume that the code generator is capable of merging pairs of stores // together if it wants to. if (TheStores.size() == 2) return false; // If we have fewer than 8 stores, it can still be worthwhile to do this. // For example, merging 4 i8 stores into an i32 store is useful almost always. // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the // memset will be split into 2 32-bit stores anyway) and doing so can // pessimize the llvm optimizer. // // Since we don't have perfect knowledge here, make some assumptions: assume // the maximum GPR width is the same size as the largest legal integer // size. If so, check to see whether we will end up actually reducing the // number of stores used. unsigned Bytes = unsigned(End-Start); unsigned MaxIntSize = DL.getLargestLegalIntTypeSize(); if (MaxIntSize == 0) MaxIntSize = 1; unsigned NumPointerStores = Bytes / MaxIntSize; // Assume the remaining bytes if any are done a byte at a time. unsigned NumByteStores = Bytes - NumPointerStores * MaxIntSize; // If we will reduce the # stores (according to this heuristic), do the // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 // etc. return TheStores.size() > NumPointerStores+NumByteStores; } namespace { class MemsetRanges { /// Ranges - A sorted list of the memset ranges. We use std::list here /// because each element is relatively large and expensive to copy. std::list<MemsetRange> Ranges; typedef std::list<MemsetRange>::iterator range_iterator; const DataLayout &DL; public: MemsetRanges(const DataLayout &DL) : DL(DL) {} typedef std::list<MemsetRange>::const_iterator const_iterator; const_iterator begin() const { return Ranges.begin(); } const_iterator end() const { return Ranges.end(); } bool empty() const { return Ranges.empty(); } void addInst(int64_t OffsetFromFirst, Instruction *Inst) { if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) addStore(OffsetFromFirst, SI); else addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); } void addStore(int64_t OffsetFromFirst, StoreInst *SI) { int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), SI->getAlignment(), SI); } void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); } void addRange(int64_t Start, int64_t Size, Value *Ptr, unsigned Alignment, Instruction *Inst); }; } // end anon namespace /// addRange - Add a new store to the MemsetRanges data structure. This adds a /// new range for the specified store at the specified offset, merging into /// existing ranges as appropriate. /// /// Do a linear search of the ranges to see if this can be joined and/or to /// find the insertion point in the list. We keep the ranges sorted for /// simplicity here. This is a linear search of a linked list, which is ugly, /// however the number of ranges is limited, so this won't get crazy slow. void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, unsigned Alignment, Instruction *Inst) { int64_t End = Start+Size; range_iterator I = Ranges.begin(), E = Ranges.end(); while (I != E && Start > I->End) ++I; // We now know that I == E, in which case we didn't find anything to merge // with, or that Start <= I->End. If End < I->Start or I == E, then we need // to insert a new range. Handle this now. if (I == E || End < I->Start) { MemsetRange &R = *Ranges.insert(I, MemsetRange()); R.Start = Start; R.End = End; R.StartPtr = Ptr; R.Alignment = Alignment; R.TheStores.push_back(Inst); return; } // This store overlaps with I, add it. I->TheStores.push_back(Inst); // At this point, we may have an interval that completely contains our store. // If so, just add it to the interval and return. if (I->Start <= Start && I->End >= End) return; // Now we know that Start <= I->End and End >= I->Start so the range overlaps // but is not entirely contained within the range. // See if the range extends the start of the range. In this case, it couldn't // possibly cause it to join the prior range, because otherwise we would have // stopped on *it*. if (Start < I->Start) { I->Start = Start; I->StartPtr = Ptr; I->Alignment = Alignment; } // Now we know that Start <= I->End and Start >= I->Start (so the startpoint // is in or right at the end of I), and that End >= I->Start. Extend I out to // End. if (End > I->End) { I->End = End; range_iterator NextI = I; while (++NextI != E && End >= NextI->Start) { // Merge the range in. I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); if (NextI->End > I->End) I->End = NextI->End; Ranges.erase(NextI); NextI = I; } } } //===----------------------------------------------------------------------===// // MemCpyOpt Pass //===----------------------------------------------------------------------===// namespace { class MemCpyOpt : public FunctionPass { MemoryDependenceAnalysis *MD; TargetLibraryInfo *TLI; public: static char ID; // Pass identification, replacement for typeid MemCpyOpt() : FunctionPass(ID) { initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); MD = nullptr; TLI = nullptr; } bool runOnFunction(Function &F) override; private: // This transformation requires dominator postdominator info void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<MemoryDependenceAnalysis>(); AU.addRequired<AliasAnalysis>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); AU.addPreserved<AliasAnalysis>(); AU.addPreserved<MemoryDependenceAnalysis>(); } // Helper functions bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); bool processMemCpy(MemCpyInst *M); bool processMemMove(MemMoveInst *M); bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, uint64_t cpyLen, unsigned cpyAlign, CallInst *C); bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep); bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep); bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep); bool processByValArgument(CallSite CS, unsigned ArgNo); Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, Value *ByteVal); bool iterateOnFunction(Function &F); }; char MemCpyOpt::ID = 0; } // createMemCpyOptPass - The public interface to this file... FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", false, false) /// tryMergingIntoMemset - When scanning forward over instructions, we look for /// some other patterns to fold away. In particular, this looks for stores to /// neighboring locations of memory. If it sees enough consecutive ones, it /// attempts to merge them together into a memcpy/memset. Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, Value *StartPtr, Value *ByteVal) { const DataLayout &DL = StartInst->getModule()->getDataLayout(); // Okay, so we now have a single store that can be splatable. Scan to find // all subsequent stores of the same value to offset from the same pointer. // Join these together into ranges, so we can decide whether contiguous blocks // are stored. MemsetRanges Ranges(DL); BasicBlock::iterator BI = StartInst; for (++BI; !isa<TerminatorInst>(BI); ++BI) { if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { // If the instruction is readnone, ignore it, otherwise bail out. We // don't even allow readonly here because we don't want something like: // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) break; continue; } if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { // If this is a store, see if we can merge it in. if (!NextStore->isSimple()) break; // Check to see if this stored value is of the same byte-splattable value. if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) break; // Check to see if this store is to a constant offset from the start ptr. int64_t Offset; if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, DL)) break; Ranges.addStore(Offset, NextStore); } else { MemSetInst *MSI = cast<MemSetInst>(BI); if (MSI->isVolatile() || ByteVal != MSI->getValue() || !isa<ConstantInt>(MSI->getLength())) break; // Check to see if this store is to a constant offset from the start ptr. int64_t Offset; if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL)) break; Ranges.addMemSet(Offset, MSI); } } // If we have no ranges, then we just had a single store with nothing that // could be merged in. This is a very common case of course. if (Ranges.empty()) return nullptr; // If we had at least one store that could be merged in, add the starting // store as well. We try to avoid this unless there is at least something // interesting as a small compile-time optimization. Ranges.addInst(0, StartInst); // If we create any memsets, we put it right before the first instruction that // isn't part of the memset block. This ensure that the memset is dominated // by any addressing instruction needed by the start of the block. IRBuilder<> Builder(BI); // Now that we have full information about ranges, loop over the ranges and // emit memset's for anything big enough to be worthwhile. Instruction *AMemSet = nullptr; for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); I != E; ++I) { const MemsetRange &Range = *I; if (Range.TheStores.size() == 1) continue; // If it is profitable to lower this range to memset, do so now. if (!Range.isProfitableToUseMemset(DL)) continue; // Otherwise, we do want to transform this! Create a new memset. // Get the starting pointer of the block. StartPtr = Range.StartPtr; // Determine alignment unsigned Alignment = Range.Alignment; if (Alignment == 0) { Type *EltType = cast<PointerType>(StartPtr->getType())->getElementType(); Alignment = DL.getABITypeAlignment(EltType); } AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); DEBUG(dbgs() << "Replace stores:\n"; for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) dbgs() << *Range.TheStores[i] << '\n'; dbgs() << "With: " << *AMemSet << '\n'); if (!Range.TheStores.empty()) AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); // Zap all the stores. for (SmallVectorImpl<Instruction *>::const_iterator SI = Range.TheStores.begin(), SE = Range.TheStores.end(); SI != SE; ++SI) { MD->removeInstruction(*SI); (*SI)->eraseFromParent(); } ++NumMemSetInfer; } return AMemSet; } bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { if (!SI->isSimple()) return false; const DataLayout &DL = SI->getModule()->getDataLayout(); // Detect cases where we're performing call slot forwarding, but // happen to be using a load-store pair to implement it, rather than // a memcpy. if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { if (LI->isSimple() && LI->hasOneUse() && LI->getParent() == SI->getParent()) { MemDepResult ldep = MD->getDependency(LI); CallInst *C = nullptr; if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) C = dyn_cast<CallInst>(ldep.getInst()); if (C) { // Check that nothing touches the dest of the "copy" between // the call and the store. AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); MemoryLocation StoreLoc = MemoryLocation::get(SI); for (BasicBlock::iterator I = --BasicBlock::iterator(SI), E = C; I != E; --I) { if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) { C = nullptr; break; } } } if (C) { unsigned storeAlign = SI->getAlignment(); if (!storeAlign) storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); unsigned loadAlign = LI->getAlignment(); if (!loadAlign) loadAlign = DL.getABITypeAlignment(LI->getType()); bool changed = performCallSlotOptzn( LI, SI->getPointerOperand()->stripPointerCasts(), LI->getPointerOperand()->stripPointerCasts(), DL.getTypeStoreSize(SI->getOperand(0)->getType()), std::min(storeAlign, loadAlign), C); if (changed) { MD->removeInstruction(SI); SI->eraseFromParent(); MD->removeInstruction(LI); LI->eraseFromParent(); ++NumMemCpyInstr; return true; } } } } // There are two cases that are interesting for this code to handle: memcpy // and memset. Right now we only handle memset. // Ensure that the value being stored is something that can be memset'able a // byte at a time like "0" or "-1" or any width, as well as things like // 0xA0A0A0A0 and 0.0. if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), ByteVal)) { BBI = I; // Don't invalidate iterator. return true; } return false; } bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { // See if there is another memset or store neighboring this memset which // allows us to widen out the memset to do a single larger store. if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), MSI->getValue())) { BBI = I; // Don't invalidate iterator. return true; } return false; } /// performCallSlotOptzn - takes a memcpy and a call that it depends on, /// and checks for the possibility of a call slot optimization by having /// the call write its result directly into the destination of the memcpy. bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, Value *cpySrc, uint64_t cpyLen, unsigned cpyAlign, CallInst *C) { // The general transformation to keep in mind is // // call @func(..., src, ...) // memcpy(dest, src, ...) // // -> // // memcpy(dest, src, ...) // call @func(..., dest, ...) // // Since moving the memcpy is technically awkward, we additionally check that // src only holds uninitialized values at the moment of the call, meaning that // the memcpy can be discarded rather than moved. // Deliberately get the source and destination with bitcasts stripped away, // because we'll need to do type comparisons based on the underlying type. CallSite CS(C); // Require that src be an alloca. This simplifies the reasoning considerably. AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); if (!srcAlloca) return false; ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); if (!srcArraySize) return false; const DataLayout &DL = cpy->getModule()->getDataLayout(); uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * srcArraySize->getZExtValue(); if (cpyLen < srcSize) return false; // Check that accessing the first srcSize bytes of dest will not cause a // trap. Otherwise the transform is invalid since it might cause a trap // to occur earlier than it otherwise would. if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { // The destination is an alloca. Check it is larger than srcSize. ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); if (!destArraySize) return false; uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * destArraySize->getZExtValue(); if (destSize < srcSize) return false; } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { if (A->getDereferenceableBytes() < srcSize) { // If the destination is an sret parameter then only accesses that are // outside of the returned struct type can trap. if (!A->hasStructRetAttr()) return false; Type *StructTy = cast<PointerType>(A->getType())->getElementType(); if (!StructTy->isSized()) { // The call may never return and hence the copy-instruction may never // be executed, and therefore it's not safe to say "the destination // has at least <cpyLen> bytes, as implied by the copy-instruction", return false; } uint64_t destSize = DL.getTypeAllocSize(StructTy); if (destSize < srcSize) return false; } } else { return false; } // Check that dest points to memory that is at least as aligned as src. unsigned srcAlign = srcAlloca->getAlignment(); if (!srcAlign) srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType()); bool isDestSufficientlyAligned = srcAlign <= cpyAlign; // If dest is not aligned enough and we can't increase its alignment then // bail out. if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) return false; // Check that src is not accessed except via the call and the memcpy. This // guarantees that it holds only undefined values when passed in (so the final // memcpy can be dropped), that it is not read or written between the call and // the memcpy, and that writing beyond the end of it is undefined. SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), srcAlloca->user_end()); while (!srcUseList.empty()) { User *U = srcUseList.pop_back_val(); if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { for (User *UU : U->users()) srcUseList.push_back(UU); continue; } if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { if (!G->hasAllZeroIndices()) return false; for (User *UU : U->users()) srcUseList.push_back(UU); continue; } if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) if (IT->getIntrinsicID() == Intrinsic::lifetime_start || IT->getIntrinsicID() == Intrinsic::lifetime_end) continue; if (U != C && U != cpy) return false; } // Check that src isn't captured by the called function since the // transformation can cause aliasing issues in that case. for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i)) return false; // Since we're changing the parameter to the callsite, we need to make sure // that what would be the new parameter dominates the callsite. DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) if (!DT.dominates(cpyDestInst, C)) return false; // In addition to knowing that the call does not access src in some // unexpected manner, for example via a global, which we deduce from // the use analysis, we also need to know that it does not sneakily // access dest. We rely on AA to figure this out for us. AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize); // If necessary, perform additional analysis. if (MR != AliasAnalysis::NoModRef) MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); if (MR != AliasAnalysis::NoModRef) return false; // All the checks have passed, so do the transformation. bool changedArgument = false; for (unsigned i = 0; i < CS.arg_size(); ++i) if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), cpyDest->getName(), C); changedArgument = true; if (CS.getArgument(i)->getType() == Dest->getType()) CS.setArgument(i, Dest); else CS.setArgument(i, CastInst::CreatePointerCast(Dest, CS.getArgument(i)->getType(), Dest->getName(), C)); } if (!changedArgument) return false; // If the destination wasn't sufficiently aligned then increase its alignment. if (!isDestSufficientlyAligned) { assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); } // Drop any cached information about the call, because we may have changed // its dependence information by changing its parameter. MD->removeInstruction(C); // Update AA metadata // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be // handled here, but combineMetadata doesn't support them yet unsigned KnownIDs[] = { LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, LLVMContext::MD_noalias, }; combineMetadata(C, cpy, KnownIDs); // Remove the memcpy. MD->removeInstruction(cpy); ++NumMemCpyInstr; return true; } /// processMemCpyMemCpyDependence - We've found that the (upward scanning) /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to /// copy from MDep's input if we can. /// bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) { // We can only transforms memcpy's where the dest of one is the source of the // other. if (M->getSource() != MDep->getDest() || MDep->isVolatile()) return false; // If dep instruction is reading from our current input, then it is a noop // transfer and substituting the input won't change this instruction. Just // ignore the input and let someone else zap MDep. This handles cases like: // memcpy(a <- a) // memcpy(b <- a) if (M->getSource() == MDep->getSource()) return false; // Second, the length of the memcpy's must be the same, or the preceding one // must be larger than the following one. ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) return false; AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); // Verify that the copied-from memory doesn't change in between the two // transfers. For example, in: // memcpy(a <- b) // *b = 42; // memcpy(c <- a) // It would be invalid to transform the second memcpy into memcpy(c <- b). // // TODO: If the code between M and MDep is transparent to the destination "c", // then we could still perform the xform by moving M up to the first memcpy. // // NOTE: This is conservative, it will stop on any read from the source loc, // not just the defining memcpy. MemDepResult SourceDep = MD->getPointerDependencyFrom( MemoryLocation::getForSource(MDep), false, M, M->getParent()); if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) return false; // If the dest of the second might alias the source of the first, then the // source and dest might overlap. We still want to eliminate the intermediate // value, but we have to generate a memmove instead of memcpy. bool UseMemMove = false; if (!AA.isNoAlias(MemoryLocation::getForDest(M), MemoryLocation::getForSource(MDep))) UseMemMove = true; // If all checks passed, then we can transform M. // Make sure to use the lesser of the alignment of the source and the dest // since we're changing where we're reading from, but don't want to increase // the alignment past what can be read from or written to. // TODO: Is this worth it if we're creating a less aligned memcpy? For // example we could be moving from movaps -> movq on x86. unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); IRBuilder<> Builder(M); if (UseMemMove) Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), Align, M->isVolatile()); else Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), Align, M->isVolatile()); // Remove the instruction we're replacing. MD->removeInstruction(M); M->eraseFromParent(); ++NumMemCpyInstr; return true; } /// We've found that the (upward scanning) memory dependence of \p MemCpy is /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that /// weren't copied over by \p MemCpy. /// /// In other words, transform: /// \code /// memset(dst, c, dst_size); /// memcpy(dst, src, src_size); /// \endcode /// into: /// \code /// memcpy(dst, src, src_size); /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); /// \endcode bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy, MemSetInst *MemSet) { // We can only transform memset/memcpy with the same destination. if (MemSet->getDest() != MemCpy->getDest()) return false; // Check that there are no other dependencies on the memset destination. MemDepResult DstDepInfo = MD->getPointerDependencyFrom( MemoryLocation::getForDest(MemSet), false, MemCpy, MemCpy->getParent()); if (DstDepInfo.getInst() != MemSet) return false; // Use the same i8* dest as the memcpy, killing the memset dest if different. Value *Dest = MemCpy->getRawDest(); Value *DestSize = MemSet->getLength(); Value *SrcSize = MemCpy->getLength(); // By default, create an unaligned memset. unsigned Align = 1; // If Dest is aligned, and SrcSize is constant, use the minimum alignment // of the sum. const unsigned DestAlign = std::max(MemSet->getAlignment(), MemCpy->getAlignment()); if (DestAlign > 1) if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); IRBuilder<> Builder(MemCpy); // If the sizes have different types, zext the smaller one. if (DestSize->getType() != SrcSize->getType()) { if (DestSize->getType()->getIntegerBitWidth() > SrcSize->getType()->getIntegerBitWidth()) SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); else DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); } Value *MemsetLen = Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize), ConstantInt::getNullValue(DestSize->getType()), Builder.CreateSub(DestSize, SrcSize)); Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1), MemsetLen, Align); MD->removeInstruction(MemSet); MemSet->eraseFromParent(); return true; } /// Transform memcpy to memset when its source was just memset. /// In other words, turn: /// \code /// memset(dst1, c, dst1_size); /// memcpy(dst2, dst1, dst2_size); /// \endcode /// into: /// \code /// memset(dst1, c, dst1_size); /// memset(dst2, c, dst2_size); /// \endcode /// When dst2_size <= dst1_size. /// /// The \p MemCpy must have a Constant length. bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, MemSetInst *MemSet) { // This only makes sense on memcpy(..., memset(...), ...). if (MemSet->getRawDest() != MemCpy->getRawSource()) return false; ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); // Make sure the memcpy doesn't read any more than what the memset wrote. // Don't worry about sizes larger than i64. if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue()) return false; IRBuilder<> Builder(MemCpy); Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), CopySize, MemCpy->getAlignment()); return true; } /// processMemCpy - perform simplification of memcpy's. If we have memcpy A /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite /// B to be a memcpy from X to Z (or potentially a memmove, depending on /// circumstances). This allows later passes to remove the first memcpy /// altogether. bool MemCpyOpt::processMemCpy(MemCpyInst *M) { // We can only optimize non-volatile memcpy's. if (M->isVolatile()) return false; // If the source and destination of the memcpy are the same, then zap it. if (M->getSource() == M->getDest()) { MD->removeInstruction(M); M->eraseFromParent(); return false; } // If copying from a constant, try to turn the memcpy into a memset. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) if (GV->isConstant() && GV->hasDefinitiveInitializer()) if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { IRBuilder<> Builder(M); Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), M->getAlignment(), false); MD->removeInstruction(M); M->eraseFromParent(); ++NumCpyToSet; return true; } MemDepResult DepInfo = MD->getDependency(M); // Try to turn a partially redundant memset + memcpy into // memcpy + smaller memset. We don't need the memcpy size for this. if (DepInfo.isClobber()) if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) if (processMemSetMemCpyDependence(M, MDep)) return true; // The optimizations after this point require the memcpy size. ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); if (!CopySize) return false; // There are four possible optimizations we can do for memcpy: // a) memcpy-memcpy xform which exposes redundance for DSE. // b) call-memcpy xform for return slot optimization. // c) memcpy from freshly alloca'd space or space that has just started its // lifetime copies undefined data, and we can therefore eliminate the // memcpy in favor of the data that was already at the destination. // d) memcpy from a just-memset'd source can be turned into memset. if (DepInfo.isClobber()) { if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { if (performCallSlotOptzn(M, M->getDest(), M->getSource(), CopySize->getZExtValue(), M->getAlignment(), C)) { MD->removeInstruction(M); M->eraseFromParent(); return true; } } } MemoryLocation SrcLoc = MemoryLocation::getForSource(M); MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true, M, M->getParent()); if (SrcDepInfo.isClobber()) { if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) return processMemCpyMemCpyDependence(M, MDep); } else if (SrcDepInfo.isDef()) { Instruction *I = SrcDepInfo.getInst(); bool hasUndefContents = false; if (isa<AllocaInst>(I)) { hasUndefContents = true; } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start) if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) if (LTSize->getZExtValue() >= CopySize->getZExtValue()) hasUndefContents = true; } if (hasUndefContents) { MD->removeInstruction(M); M->eraseFromParent(); ++NumMemCpyInstr; return true; } } if (SrcDepInfo.isClobber()) if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) if (performMemCpyToMemSetOptzn(M, MDep)) { MD->removeInstruction(M); M->eraseFromParent(); ++NumCpyToSet; return true; } return false; } /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst /// are guaranteed not to alias. bool MemCpyOpt::processMemMove(MemMoveInst *M) { AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); if (!TLI->has(LibFunc::memmove)) return false; // See if the pointers alias. if (!AA.isNoAlias(MemoryLocation::getForDest(M), MemoryLocation::getForSource(M))) return false; DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); // If not, then we know we can transform this. Module *Mod = M->getParent()->getParent()->getParent(); Type *ArgTys[3] = { M->getRawDest()->getType(), M->getRawSource()->getType(), M->getLength()->getType() }; M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, ArgTys)); // MemDep may have over conservative information about this instruction, just // conservatively flush it from the cache. MD->removeInstruction(M); ++NumMoveToCpy; return true; } /// processByValArgument - This is called on every byval argument in call sites. bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); // Find out what feeds this byval argument. Value *ByValArg = CS.getArgument(ArgNo); Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); MemDepResult DepInfo = MD->getPointerDependencyFrom( MemoryLocation(ByValArg, ByValSize), true, CS.getInstruction(), CS.getInstruction()->getParent()); if (!DepInfo.isClobber()) return false; // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by // a memcpy, see if we can byval from the source of the memcpy instead of the // result. MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); if (!MDep || MDep->isVolatile() || ByValArg->stripPointerCasts() != MDep->getDest()) return false; // The length of the memcpy must be larger or equal to the size of the byval. ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); if (!C1 || C1->getValue().getZExtValue() < ByValSize) return false; // Get the alignment of the byval. If the call doesn't specify the alignment, // then it is some target specific value that we can't know. unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); if (ByValAlign == 0) return false; // If it is greater than the memcpy, then we check to see if we can force the // source of the memcpy to the alignment we need. If we fail, we bail out. AssumptionCache &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache( *CS->getParent()->getParent()); DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); if (MDep->getAlignment() < ByValAlign && getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, CS.getInstruction(), &AC, &DT) < ByValAlign) return false; // Verify that the copied-from memory doesn't change in between the memcpy and // the byval call. // memcpy(a <- b) // *b = 42; // foo(*a) // It would be invalid to transform the second memcpy into foo(*b). // // NOTE: This is conservative, it will stop on any read from the source loc, // not just the defining memcpy. MemDepResult SourceDep = MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, CS.getInstruction(), MDep->getParent()); if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) return false; Value *TmpCast = MDep->getSource(); if (MDep->getSource()->getType() != ByValArg->getType()) TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), "tmpcast", CS.getInstruction()); DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" << " " << *MDep << "\n" << " " << *CS.getInstruction() << "\n"); // Otherwise we're good! Update the byval argument. CS.setArgument(ArgNo, TmpCast); ++NumMemCpyInstr; return true; } /// iterateOnFunction - Executes one iteration of MemCpyOpt. bool MemCpyOpt::iterateOnFunction(Function &F) { bool MadeChange = false; // Walk all instruction in the function. for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { // Avoid invalidating the iterator. Instruction *I = BI++; bool RepeatInstruction = false; if (StoreInst *SI = dyn_cast<StoreInst>(I)) MadeChange |= processStore(SI, BI); else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) RepeatInstruction = processMemSet(M, BI); else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) RepeatInstruction = processMemCpy(M); else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) RepeatInstruction = processMemMove(M); else if (auto CS = CallSite(I)) { for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) if (CS.isByValArgument(i)) MadeChange |= processByValArgument(CS, i); } // Reprocess the instruction if desired. if (RepeatInstruction) { if (BI != BB->begin()) --BI; MadeChange = true; } } } return MadeChange; } // MemCpyOpt::runOnFunction - This is the main transformation entry point for a // function. // bool MemCpyOpt::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; bool MadeChange = false; MD = &getAnalysis<MemoryDependenceAnalysis>(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); // If we don't have at least memset and memcpy, there is little point of doing // anything here. These are required by a freestanding implementation, so if // even they are disabled, there is no point in trying hard. if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) return false; while (1) { if (!iterateOnFunction(F)) break; MadeChange = true; } MD = nullptr; return MadeChange; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/Scalar.cpp
//===-- Scalar.cpp --------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements common infrastructure for libLLVMScalarOpts.a, which // implements several scalar transformations over the LLVM intermediate // representation, including the C bindings for that library. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm-c/Initialization.h" #include "llvm-c/Transforms/Scalar.h" #include "llvm/Analysis/Passes.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Verifier.h" #include "llvm/InitializePasses.h" #include "llvm/IR/LegacyPassManager.h" using namespace llvm; /// initializeScalarOptsPasses - Initialize all passes linked into the /// ScalarOpts library. void llvm::initializeScalarOpts(PassRegistry &Registry) { initializeADCEPass(Registry); initializeBDCEPass(Registry); initializeAlignmentFromAssumptionsPass(Registry); initializeSampleProfileLoaderPass(Registry); initializeConstantHoistingPass(Registry); initializeConstantPropagationPass(Registry); initializeCorrelatedValuePropagationPass(Registry); initializeDCEPass(Registry); initializeDeadInstEliminationPass(Registry); initializeDynamicIndexingVectorToArrayPass(Registry); // HLSL Change initializeScalarizerPass(Registry); initializeDSEPass(Registry); initializeGVNPass(Registry); initializeEarlyCSELegacyPassPass(Registry); initializeFlattenCFGPassPass(Registry); initializeInductiveRangeCheckEliminationPass(Registry); initializeIndVarSimplifyPass(Registry); initializeJumpThreadingPass(Registry); initializeLICMPass(Registry); initializeLoopDeletionPass(Registry); initializeLoopAccessAnalysisPass(Registry); initializeLoopInstSimplifyPass(Registry); initializeLoopInterchangePass(Registry); initializeLoopRotatePass(Registry); initializeLoopStrengthReducePass(Registry); initializeLoopRerollPass(Registry); initializeLoopUnrollPass(Registry); initializeLoopUnswitchPass(Registry); initializeLoopIdiomRecognizePass(Registry); initializeLowerAtomicPass(Registry); initializeLowerExpectIntrinsicPass(Registry); initializeMemCpyOptPass(Registry); initializeMergedLoadStoreMotionPass(Registry); initializeNaryReassociatePass(Registry); initializePartiallyInlineLibCallsPass(Registry); initializeReassociatePass(Registry); initializeRegToMemPass(Registry); initializeRegToMemHlslPass(Registry); // HLSL Change initializeRewriteStatepointsForGCPass(Registry); initializeSCCPPass(Registry); initializeIPSCCPPass(Registry); initializeSROAPass(Registry); initializeSROA_DTPass(Registry); initializeSROA_SSAUpPass(Registry); initializeCFGSimplifyPassPass(Registry); initializeStructurizeCFGPass(Registry); initializeSinkingPass(Registry); initializeTailCallElimPass(Registry); initializeSeparateConstOffsetFromGEPPass(Registry); initializeSpeculativeExecutionPass(Registry); initializeStraightLineStrengthReducePass(Registry); initializeLoadCombinePass(Registry); initializePlaceBackedgeSafepointsImplPass(Registry); initializePlaceSafepointsPass(Registry); initializeFloat2IntPass(Registry); initializeLoopDistributePass(Registry); } void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) { initializeScalarOpts(*unwrap(R)); } void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createAggressiveDCEPass()); } void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createBitTrackingDCEPass()); } void LLVMAddAlignmentFromAssumptionsPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createAlignmentFromAssumptionsPass()); } void LLVMAddCFGSimplificationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createCFGSimplificationPass()); } void LLVMAddDeadStoreEliminationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createDeadStoreEliminationPass()); } void LLVMAddScalarizerPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createScalarizerPass()); } void LLVMAddGVNPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createGVNPass()); } void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createMergedLoadStoreMotionPass()); } void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createIndVarSimplifyPass()); } void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createInstructionCombiningPass()); } void LLVMAddJumpThreadingPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createJumpThreadingPass()); } void LLVMAddLICMPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLICMPass()); } void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLoopDeletionPass()); } void LLVMAddLoopIdiomPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLoopIdiomPass()); } void LLVMAddLoopRotatePass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLoopRotatePass()); } void LLVMAddLoopRerollPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLoopRerollPass()); } void LLVMAddLoopUnrollPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLoopUnrollPass()); } void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLoopUnswitchPass()); } void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createMemCpyOptPass()); } void LLVMAddPartiallyInlineLibCallsPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createPartiallyInlineLibCallsPass()); } void LLVMAddLowerSwitchPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLowerSwitchPass()); } void LLVMAddPromoteMemoryToRegisterPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createPromoteMemoryToRegisterPass()); } void LLVMAddReassociatePass(LLVMPassManagerRef PM) { unwrap(PM)->add(createReassociatePass()); } void LLVMAddSCCPPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createSCCPPass()); } void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createScalarReplAggregatesPass()); } void LLVMAddScalarReplAggregatesPassSSA(LLVMPassManagerRef PM) { unwrap(PM)->add(createScalarReplAggregatesPass(-1, false)); } void LLVMAddScalarReplAggregatesPassWithThreshold(LLVMPassManagerRef PM, int Threshold) { unwrap(PM)->add(createScalarReplAggregatesPass(Threshold)); } void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM) { // NOTE: The simplify-libcalls pass has been removed. } void LLVMAddTailCallEliminationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createTailCallEliminationPass()); } void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createConstantPropagationPass()); } void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createDemoteRegisterToMemoryPass()); } // HLSL Change start void LLVMAddDemoteMemoryToRegisterHlslPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createDemoteRegisterToMemoryHlslPass()); } // HLSL Change end void LLVMAddVerifierPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createVerifierPass()); } void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createCorrelatedValuePropagationPass()); } void LLVMAddEarlyCSEPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createEarlyCSEPass()); } void LLVMAddTypeBasedAliasAnalysisPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createTypeBasedAliasAnalysisPass()); } void LLVMAddScopedNoAliasAAPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createScopedNoAliasAAPass()); } void LLVMAddBasicAliasAnalysisPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createBasicAliasAnalysisPass()); } void LLVMAddLowerExpectIntrinsicPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createLowerExpectIntrinsicPass()); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Scalar/LoopRerollPass.cpp
//===-- LoopReroll.cpp - Loop rerolling pass ------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass implements a simple loop reroller. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasSetTracker.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" using namespace llvm; #define DEBUG_TYPE "loop-reroll" STATISTIC(NumRerolledLoops, "Number of rerolled loops"); #if 0 // HLSL Change Starts - option pending static cl::opt<unsigned> MaxInc("max-reroll-increment", cl::init(2048), cl::Hidden, cl::desc("The maximum increment for loop rerolling")); static cl::opt<unsigned> NumToleratedFailedMatches("reroll-num-tolerated-failed-matches", cl::init(400), cl::Hidden, cl::desc("The maximum number of failures to tolerate" " during fuzzy matching. (default: 400)")); #else static const unsigned MaxInc = 2048; static const unsigned NumToleratedFailedMatches = 400; #endif // HLSL Change Ends // This loop re-rolling transformation aims to transform loops like this: // // int foo(int a); // void bar(int *x) { // for (int i = 0; i < 500; i += 3) { // foo(i); // foo(i+1); // foo(i+2); // } // } // // into a loop like this: // // void bar(int *x) { // for (int i = 0; i < 500; ++i) // foo(i); // } // // It does this by looking for loops that, besides the latch code, are composed // of isomorphic DAGs of instructions, with each DAG rooted at some increment // to the induction variable, and where each DAG is isomorphic to the DAG // rooted at the induction variable (excepting the sub-DAGs which root the // other induction-variable increments). In other words, we're looking for loop // bodies of the form: // // %iv = phi [ (preheader, ...), (body, %iv.next) ] // f(%iv) // %iv.1 = add %iv, 1 <-- a root increment // f(%iv.1) // %iv.2 = add %iv, 2 <-- a root increment // f(%iv.2) // %iv.scale_m_1 = add %iv, scale-1 <-- a root increment // f(%iv.scale_m_1) // ... // %iv.next = add %iv, scale // %cmp = icmp(%iv, ...) // br %cmp, header, exit // // where each f(i) is a set of instructions that, collectively, are a function // only of i (and other loop-invariant values). // // As a special case, we can also reroll loops like this: // // int foo(int); // void bar(int *x) { // for (int i = 0; i < 500; ++i) { // x[3*i] = foo(0); // x[3*i+1] = foo(0); // x[3*i+2] = foo(0); // } // } // // into this: // // void bar(int *x) { // for (int i = 0; i < 1500; ++i) // x[i] = foo(0); // } // // in which case, we're looking for inputs like this: // // %iv = phi [ (preheader, ...), (body, %iv.next) ] // %scaled.iv = mul %iv, scale // f(%scaled.iv) // %scaled.iv.1 = add %scaled.iv, 1 // f(%scaled.iv.1) // %scaled.iv.2 = add %scaled.iv, 2 // f(%scaled.iv.2) // %scaled.iv.scale_m_1 = add %scaled.iv, scale-1 // f(%scaled.iv.scale_m_1) // ... // %iv.next = add %iv, 1 // %cmp = icmp(%iv, ...) // br %cmp, header, exit namespace { enum IterationLimits { /// The maximum number of iterations that we'll try and reroll. This /// has to be less than 25 in order to fit into a SmallBitVector. IL_MaxRerollIterations = 16, /// The bitvector index used by loop induction variables and other /// instructions that belong to all iterations. IL_All, IL_End }; class LoopReroll : public LoopPass { public: static char ID; // Pass ID, replacement for typeid LoopReroll() : LoopPass(ID) { initializeLoopRerollPass(*PassRegistry::getPassRegistry()); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AliasAnalysis>(); AU.addRequired<LoopInfoWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addRequired<ScalarEvolution>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } protected: AliasAnalysis *AA; LoopInfo *LI; ScalarEvolution *SE; TargetLibraryInfo *TLI; DominatorTree *DT; typedef SmallVector<Instruction *, 16> SmallInstructionVector; typedef SmallSet<Instruction *, 16> SmallInstructionSet; // A chain of isomorphic instructions, indentified by a single-use PHI, // representing a reduction. Only the last value may be used outside the // loop. struct SimpleLoopReduction { SimpleLoopReduction(Instruction *P, Loop *L) : Valid(false), Instructions(1, P) { assert(isa<PHINode>(P) && "First reduction instruction must be a PHI"); add(L); } bool valid() const { return Valid; } Instruction *getPHI() const { assert(Valid && "Using invalid reduction"); return Instructions.front(); } Instruction *getReducedValue() const { assert(Valid && "Using invalid reduction"); return Instructions.back(); } Instruction *get(size_t i) const { assert(Valid && "Using invalid reduction"); return Instructions[i+1]; } Instruction *operator [] (size_t i) const { return get(i); } // The size, ignoring the initial PHI. size_t size() const { assert(Valid && "Using invalid reduction"); return Instructions.size()-1; } typedef SmallInstructionVector::iterator iterator; typedef SmallInstructionVector::const_iterator const_iterator; iterator begin() { assert(Valid && "Using invalid reduction"); return std::next(Instructions.begin()); } const_iterator begin() const { assert(Valid && "Using invalid reduction"); return std::next(Instructions.begin()); } iterator end() { return Instructions.end(); } const_iterator end() const { return Instructions.end(); } protected: bool Valid; SmallInstructionVector Instructions; void add(Loop *L); }; // The set of all reductions, and state tracking of possible reductions // during loop instruction processing. struct ReductionTracker { typedef SmallVector<SimpleLoopReduction, 16> SmallReductionVector; // Add a new possible reduction. void addSLR(SimpleLoopReduction &SLR) { PossibleReds.push_back(SLR); } // Setup to track possible reductions corresponding to the provided // rerolling scale. Only reductions with a number of non-PHI instructions // that is divisible by the scale are considered. Three instructions sets // are filled in: // - A set of all possible instructions in eligible reductions. // - A set of all PHIs in eligible reductions // - A set of all reduced values (last instructions) in eligible // reductions. void restrictToScale(uint64_t Scale, SmallInstructionSet &PossibleRedSet, SmallInstructionSet &PossibleRedPHISet, SmallInstructionSet &PossibleRedLastSet) { PossibleRedIdx.clear(); PossibleRedIter.clear(); Reds.clear(); for (unsigned i = 0, e = PossibleReds.size(); i != e; ++i) if (PossibleReds[i].size() % Scale == 0) { PossibleRedLastSet.insert(PossibleReds[i].getReducedValue()); PossibleRedPHISet.insert(PossibleReds[i].getPHI()); PossibleRedSet.insert(PossibleReds[i].getPHI()); PossibleRedIdx[PossibleReds[i].getPHI()] = i; for (Instruction *J : PossibleReds[i]) { PossibleRedSet.insert(J); PossibleRedIdx[J] = i; } } } // The functions below are used while processing the loop instructions. // Are the two instructions both from reductions, and furthermore, from // the same reduction? bool isPairInSame(Instruction *J1, Instruction *J2) { DenseMap<Instruction *, int>::iterator J1I = PossibleRedIdx.find(J1); if (J1I != PossibleRedIdx.end()) { DenseMap<Instruction *, int>::iterator J2I = PossibleRedIdx.find(J2); if (J2I != PossibleRedIdx.end() && J1I->second == J2I->second) return true; } return false; } // The two provided instructions, the first from the base iteration, and // the second from iteration i, form a matched pair. If these are part of // a reduction, record that fact. void recordPair(Instruction *J1, Instruction *J2, unsigned i) { if (PossibleRedIdx.count(J1)) { assert(PossibleRedIdx.count(J2) && "Recording reduction vs. non-reduction instruction?"); PossibleRedIter[J1] = 0; PossibleRedIter[J2] = i; int Idx = PossibleRedIdx[J1]; assert(Idx == PossibleRedIdx[J2] && "Recording pair from different reductions?"); Reds.insert(Idx); } } // The functions below can be called after we've finished processing all // instructions in the loop, and we know which reductions were selected. // Is the provided instruction the PHI of a reduction selected for // rerolling? bool isSelectedPHI(Instruction *J) { if (!isa<PHINode>(J)) return false; for (DenseSet<int>::iterator RI = Reds.begin(), RIE = Reds.end(); RI != RIE; ++RI) { int i = *RI; if (cast<Instruction>(J) == PossibleReds[i].getPHI()) return true; } return false; } bool validateSelected(); void replaceSelected(); protected: // The vector of all possible reductions (for any scale). SmallReductionVector PossibleReds; DenseMap<Instruction *, int> PossibleRedIdx; DenseMap<Instruction *, int> PossibleRedIter; DenseSet<int> Reds; }; // A DAGRootSet models an induction variable being used in a rerollable // loop. For example, // // x[i*3+0] = y1 // x[i*3+1] = y2 // x[i*3+2] = y3 // // Base instruction -> i*3 // +---+----+ // / | \ // ST[y1] +1 +2 <-- Roots // | | // ST[y2] ST[y3] // // There may be multiple DAGRoots, for example: // // x[i*2+0] = ... (1) // x[i*2+1] = ... (1) // x[i*2+4] = ... (2) // x[i*2+5] = ... (2) // x[(i+1234)*2+5678] = ... (3) // x[(i+1234)*2+5679] = ... (3) // // The loop will be rerolled by adding a new loop induction variable, // one for the Base instruction in each DAGRootSet. // struct DAGRootSet { Instruction *BaseInst; SmallInstructionVector Roots; // The instructions between IV and BaseInst (but not including BaseInst). SmallInstructionSet SubsumedInsts; }; // The set of all DAG roots, and state tracking of all roots // for a particular induction variable. struct DAGRootTracker { DAGRootTracker(LoopReroll *Parent, Loop *L, Instruction *IV, ScalarEvolution *SE, AliasAnalysis *AA, TargetLibraryInfo *TLI) : Parent(Parent), L(L), SE(SE), AA(AA), TLI(TLI), IV(IV) {} /// Stage 1: Find all the DAG roots for the induction variable. bool findRoots(); /// Stage 2: Validate if the found roots are valid. bool validate(ReductionTracker &Reductions); /// Stage 3: Assuming validate() returned true, perform the /// replacement. /// @param IterCount The maximum iteration count of L. void replace(const SCEV *IterCount); protected: typedef MapVector<Instruction*, SmallBitVector> UsesTy; bool findRootsRecursive(Instruction *IVU, SmallInstructionSet SubsumedInsts); bool findRootsBase(Instruction *IVU, SmallInstructionSet SubsumedInsts); bool collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots); bool collectUsedInstructions(SmallInstructionSet &PossibleRedSet); void collectInLoopUserSet(const SmallInstructionVector &Roots, const SmallInstructionSet &Exclude, const SmallInstructionSet &Final, DenseSet<Instruction *> &Users); void collectInLoopUserSet(Instruction *Root, const SmallInstructionSet &Exclude, const SmallInstructionSet &Final, DenseSet<Instruction *> &Users); UsesTy::iterator nextInstr(int Val, UsesTy &In, const SmallInstructionSet &Exclude, UsesTy::iterator *StartI=nullptr); bool isBaseInst(Instruction *I); bool isRootInst(Instruction *I); bool instrDependsOn(Instruction *I, UsesTy::iterator Start, UsesTy::iterator End); LoopReroll *Parent; // Members of Parent, replicated here for brevity. Loop *L; ScalarEvolution *SE; AliasAnalysis *AA; TargetLibraryInfo *TLI; // The loop induction variable. Instruction *IV; // Loop step amount. uint64_t Inc; // Loop reroll count; if Inc == 1, this records the scaling applied // to the indvar: a[i*2+0] = ...; a[i*2+1] = ... ; // If Inc is not 1, Scale = Inc. uint64_t Scale; // The roots themselves. SmallVector<DAGRootSet,16> RootSets; // All increment instructions for IV. SmallInstructionVector LoopIncs; // Map of all instructions in the loop (in order) to the iterations // they are used in (or specially, IL_All for instructions // used in the loop increment mechanism). UsesTy Uses; }; void collectPossibleIVs(Loop *L, SmallInstructionVector &PossibleIVs); void collectPossibleReductions(Loop *L, ReductionTracker &Reductions); bool reroll(Instruction *IV, Loop *L, BasicBlock *Header, const SCEV *IterCount, ReductionTracker &Reductions); }; } char LoopReroll::ID = 0; INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(LoopReroll, "loop-reroll", "Reroll loops", false, false) Pass *llvm::createLoopRerollPass() { return new LoopReroll; } // Returns true if the provided instruction is used outside the given loop. // This operates like Instruction::isUsedOutsideOfBlock, but considers PHIs in // non-loop blocks to be outside the loop. static bool hasUsesOutsideLoop(Instruction *I, Loop *L) { for (User *U : I->users()) { if (!L->contains(cast<Instruction>(U))) return true; } return false; } // Collect the list of loop induction variables with respect to which it might // be possible to reroll the loop. void LoopReroll::collectPossibleIVs(Loop *L, SmallInstructionVector &PossibleIVs) { BasicBlock *Header = L->getHeader(); for (BasicBlock::iterator I = Header->begin(), IE = Header->getFirstInsertionPt(); I != IE; ++I) { if (!isa<PHINode>(I)) continue; if (!I->getType()->isIntegerTy()) continue; if (const SCEVAddRecExpr *PHISCEV = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(I))) { if (PHISCEV->getLoop() != L) continue; if (!PHISCEV->isAffine()) continue; if (const SCEVConstant *IncSCEV = dyn_cast<SCEVConstant>(PHISCEV->getStepRecurrence(*SE))) { if (!IncSCEV->getValue()->getValue().isStrictlyPositive()) continue; if (IncSCEV->getValue()->uge(MaxInc)) continue; DEBUG(dbgs() << "LRR: Possible IV: " << *I << " = " << *PHISCEV << "\n"); PossibleIVs.push_back(I); } } } } // Add the remainder of the reduction-variable chain to the instruction vector // (the initial PHINode has already been added). If successful, the object is // marked as valid. void LoopReroll::SimpleLoopReduction::add(Loop *L) { assert(!Valid && "Cannot add to an already-valid chain"); // The reduction variable must be a chain of single-use instructions // (including the PHI), except for the last value (which is used by the PHI // and also outside the loop). Instruction *C = Instructions.front(); if (C->user_empty()) return; do { C = cast<Instruction>(*C->user_begin()); if (C->hasOneUse()) { if (!C->isBinaryOp()) return; if (!(isa<PHINode>(Instructions.back()) || C->isSameOperationAs(Instructions.back()))) return; Instructions.push_back(C); } } while (C->hasOneUse()); if (Instructions.size() < 2 || !C->isSameOperationAs(Instructions.back()) || C->use_empty()) return; // C is now the (potential) last instruction in the reduction chain. for (User *U : C->users()) { // The only in-loop user can be the initial PHI. if (L->contains(cast<Instruction>(U))) if (cast<Instruction>(U) != Instructions.front()) return; } Instructions.push_back(C); Valid = true; } // Collect the vector of possible reduction variables. void LoopReroll::collectPossibleReductions(Loop *L, ReductionTracker &Reductions) { BasicBlock *Header = L->getHeader(); for (BasicBlock::iterator I = Header->begin(), IE = Header->getFirstInsertionPt(); I != IE; ++I) { if (!isa<PHINode>(I)) continue; if (!I->getType()->isSingleValueType()) continue; SimpleLoopReduction SLR(I, L); if (!SLR.valid()) continue; DEBUG(dbgs() << "LRR: Possible reduction: " << *I << " (with " << SLR.size() << " chained instructions)\n"); Reductions.addSLR(SLR); } } // Collect the set of all users of the provided root instruction. This set of // users contains not only the direct users of the root instruction, but also // all users of those users, and so on. There are two exceptions: // // 1. Instructions in the set of excluded instructions are never added to the // use set (even if they are users). This is used, for example, to exclude // including root increments in the use set of the primary IV. // // 2. Instructions in the set of final instructions are added to the use set // if they are users, but their users are not added. This is used, for // example, to prevent a reduction update from forcing all later reduction // updates into the use set. void LoopReroll::DAGRootTracker::collectInLoopUserSet( Instruction *Root, const SmallInstructionSet &Exclude, const SmallInstructionSet &Final, DenseSet<Instruction *> &Users) { SmallInstructionVector Queue(1, Root); while (!Queue.empty()) { Instruction *I = Queue.pop_back_val(); if (!Users.insert(I).second) continue; if (!Final.count(I)) for (Use &U : I->uses()) { Instruction *User = cast<Instruction>(U.getUser()); if (PHINode *PN = dyn_cast<PHINode>(User)) { // Ignore "wrap-around" uses to PHIs of this loop's header. if (PN->getIncomingBlock(U) == L->getHeader()) continue; } if (L->contains(User) && !Exclude.count(User)) { Queue.push_back(User); } } // We also want to collect single-user "feeder" values. for (User::op_iterator OI = I->op_begin(), OIE = I->op_end(); OI != OIE; ++OI) { if (Instruction *Op = dyn_cast<Instruction>(*OI)) if (Op->hasOneUse() && L->contains(Op) && !Exclude.count(Op) && !Final.count(Op)) Queue.push_back(Op); } } } // Collect all of the users of all of the provided root instructions (combined // into a single set). void LoopReroll::DAGRootTracker::collectInLoopUserSet( const SmallInstructionVector &Roots, const SmallInstructionSet &Exclude, const SmallInstructionSet &Final, DenseSet<Instruction *> &Users) { for (SmallInstructionVector::const_iterator I = Roots.begin(), IE = Roots.end(); I != IE; ++I) collectInLoopUserSet(*I, Exclude, Final, Users); } static bool isSimpleLoadStore(Instruction *I) { if (LoadInst *LI = dyn_cast<LoadInst>(I)) return LI->isSimple(); if (StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->isSimple(); if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) return !MI->isVolatile(); return false; } /// Return true if IVU is a "simple" arithmetic operation. /// This is used for narrowing the search space for DAGRoots; only arithmetic /// and GEPs can be part of a DAGRoot. static bool isSimpleArithmeticOp(User *IVU) { if (Instruction *I = dyn_cast<Instruction>(IVU)) { switch (I->getOpcode()) { default: return false; case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::Shl: case Instruction::AShr: case Instruction::LShr: case Instruction::GetElementPtr: case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: return true; } } return false; } static bool isLoopIncrement(User *U, Instruction *IV) { BinaryOperator *BO = dyn_cast<BinaryOperator>(U); if (!BO || BO->getOpcode() != Instruction::Add) return false; for (auto *UU : BO->users()) { PHINode *PN = dyn_cast<PHINode>(UU); if (PN && PN == IV) return true; } return false; } bool LoopReroll::DAGRootTracker:: collectPossibleRoots(Instruction *Base, std::map<int64_t,Instruction*> &Roots) { SmallInstructionVector BaseUsers; for (auto *I : Base->users()) { ConstantInt *CI = nullptr; if (isLoopIncrement(I, IV)) { LoopIncs.push_back(cast<Instruction>(I)); continue; } // The root nodes must be either GEPs, ORs or ADDs. if (auto *BO = dyn_cast<BinaryOperator>(I)) { if (BO->getOpcode() == Instruction::Add || BO->getOpcode() == Instruction::Or) CI = dyn_cast<ConstantInt>(BO->getOperand(1)); } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { Value *LastOperand = GEP->getOperand(GEP->getNumOperands()-1); CI = dyn_cast<ConstantInt>(LastOperand); } if (!CI) { if (Instruction *II = dyn_cast<Instruction>(I)) { BaseUsers.push_back(II); continue; } else { DEBUG(dbgs() << "LRR: Aborting due to non-instruction: " << *I << "\n"); return false; } } int64_t V = CI->getValue().getSExtValue(); if (Roots.find(V) != Roots.end()) // No duplicates, please. return false; // FIXME: Add support for negative values. if (V < 0) { DEBUG(dbgs() << "LRR: Aborting due to negative value: " << V << "\n"); return false; } Roots[V] = cast<Instruction>(I); } if (Roots.empty()) return false; // If we found non-loop-inc, non-root users of Base, assume they are // for the zeroth root index. This is because "add %a, 0" gets optimized // away. if (BaseUsers.size()) { if (Roots.find(0) != Roots.end()) { DEBUG(dbgs() << "LRR: Multiple roots found for base - aborting!\n"); return false; } Roots[0] = Base; } // Calculate the number of users of the base, or lowest indexed, iteration. unsigned NumBaseUses = BaseUsers.size(); if (NumBaseUses == 0) NumBaseUses = Roots.begin()->second->getNumUses(); // Check that every node has the same number of users. for (auto &KV : Roots) { if (KV.first == 0) continue; if (KV.second->getNumUses() != NumBaseUses) { DEBUG(dbgs() << "LRR: Aborting - Root and Base #users not the same: " << "#Base=" << NumBaseUses << ", #Root=" << KV.second->getNumUses() << "\n"); return false; } } return true; } bool LoopReroll::DAGRootTracker:: findRootsRecursive(Instruction *I, SmallInstructionSet SubsumedInsts) { // Does the user look like it could be part of a root set? // All its users must be simple arithmetic ops. if (I->getNumUses() > IL_MaxRerollIterations) return false; if ((I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::PHI) && I != IV && findRootsBase(I, SubsumedInsts)) return true; SubsumedInsts.insert(I); for (User *V : I->users()) { Instruction *I = dyn_cast<Instruction>(V); if (std::find(LoopIncs.begin(), LoopIncs.end(), I) != LoopIncs.end()) continue; if (!I || !isSimpleArithmeticOp(I) || !findRootsRecursive(I, SubsumedInsts)) return false; } return true; } bool LoopReroll::DAGRootTracker:: findRootsBase(Instruction *IVU, SmallInstructionSet SubsumedInsts) { // The base instruction needs to be a multiply so // that we can erase it. if (IVU->getOpcode() != Instruction::Mul && IVU->getOpcode() != Instruction::PHI) return false; std::map<int64_t, Instruction*> V; if (!collectPossibleRoots(IVU, V)) return false; // If we didn't get a root for index zero, then IVU must be // subsumed. if (V.find(0) == V.end()) SubsumedInsts.insert(IVU); // Partition the vector into monotonically increasing indexes. DAGRootSet DRS; DRS.BaseInst = nullptr; for (auto &KV : V) { if (!DRS.BaseInst) { DRS.BaseInst = KV.second; DRS.SubsumedInsts = SubsumedInsts; } else if (DRS.Roots.empty()) { DRS.Roots.push_back(KV.second); } else if (V.find(KV.first - 1) != V.end()) { DRS.Roots.push_back(KV.second); } else { // Linear sequence terminated. RootSets.push_back(DRS); DRS.BaseInst = KV.second; DRS.SubsumedInsts = SubsumedInsts; DRS.Roots.clear(); } } RootSets.push_back(DRS); return true; } bool LoopReroll::DAGRootTracker::findRoots() { const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(IV)); Inc = cast<SCEVConstant>(RealIVSCEV->getOperand(1))-> getValue()->getZExtValue(); assert(RootSets.empty() && "Unclean state!"); if (Inc == 1) { for (auto *IVU : IV->users()) { if (isLoopIncrement(IVU, IV)) LoopIncs.push_back(cast<Instruction>(IVU)); } if (!findRootsRecursive(IV, SmallInstructionSet())) return false; LoopIncs.push_back(IV); } else { if (!findRootsBase(IV, SmallInstructionSet())) return false; } // Ensure all sets have the same size. if (RootSets.empty()) { DEBUG(dbgs() << "LRR: Aborting because no root sets found!\n"); return false; } for (auto &V : RootSets) { if (V.Roots.empty() || V.Roots.size() != RootSets[0].Roots.size()) { DEBUG(dbgs() << "LRR: Aborting because not all root sets have the same size\n"); return false; } } // And ensure all loop iterations are consecutive. We rely on std::map // providing ordered traversal. for (auto &V : RootSets) { const auto *ADR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(V.BaseInst)); if (!ADR) return false; // Consider a DAGRootSet with N-1 roots (so N different values including // BaseInst). // Define d = Roots[0] - BaseInst, which should be the same as // Roots[I] - Roots[I-1] for all I in [1..N). // Define D = BaseInst@J - BaseInst@J-1, where "@J" means the value at the // loop iteration J. // // Now, For the loop iterations to be consecutive: // D = d * N unsigned N = V.Roots.size() + 1; const SCEV *StepSCEV = SE->getMinusSCEV(SE->getSCEV(V.Roots[0]), ADR); const SCEV *ScaleSCEV = SE->getConstant(StepSCEV->getType(), N); if (ADR->getStepRecurrence(*SE) != SE->getMulExpr(StepSCEV, ScaleSCEV)) { DEBUG(dbgs() << "LRR: Aborting because iterations are not consecutive\n"); return false; } } Scale = RootSets[0].Roots.size() + 1; if (Scale > IL_MaxRerollIterations) { DEBUG(dbgs() << "LRR: Aborting - too many iterations found. " << "#Found=" << Scale << ", #Max=" << IL_MaxRerollIterations << "\n"); return false; } DEBUG(dbgs() << "LRR: Successfully found roots: Scale=" << Scale << "\n"); return true; } bool LoopReroll::DAGRootTracker::collectUsedInstructions(SmallInstructionSet &PossibleRedSet) { // Populate the MapVector with all instructions in the block, in order first, // so we can iterate over the contents later in perfect order. for (auto &I : *L->getHeader()) { Uses[&I].resize(IL_End); } SmallInstructionSet Exclude; for (auto &DRS : RootSets) { Exclude.insert(DRS.Roots.begin(), DRS.Roots.end()); Exclude.insert(DRS.SubsumedInsts.begin(), DRS.SubsumedInsts.end()); Exclude.insert(DRS.BaseInst); } Exclude.insert(LoopIncs.begin(), LoopIncs.end()); for (auto &DRS : RootSets) { DenseSet<Instruction*> VBase; collectInLoopUserSet(DRS.BaseInst, Exclude, PossibleRedSet, VBase); for (auto *I : VBase) { Uses[I].set(0); } unsigned Idx = 1; for (auto *Root : DRS.Roots) { DenseSet<Instruction*> V; collectInLoopUserSet(Root, Exclude, PossibleRedSet, V); // While we're here, check the use sets are the same size. if (V.size() != VBase.size()) { DEBUG(dbgs() << "LRR: Aborting - use sets are different sizes\n"); return false; } for (auto *I : V) { Uses[I].set(Idx); } ++Idx; } // Make sure our subsumed instructions are remembered too. for (auto *I : DRS.SubsumedInsts) { Uses[I].set(IL_All); } } // Make sure the loop increments are also accounted for. Exclude.clear(); for (auto &DRS : RootSets) { Exclude.insert(DRS.Roots.begin(), DRS.Roots.end()); Exclude.insert(DRS.SubsumedInsts.begin(), DRS.SubsumedInsts.end()); Exclude.insert(DRS.BaseInst); } DenseSet<Instruction*> V; collectInLoopUserSet(LoopIncs, Exclude, PossibleRedSet, V); for (auto *I : V) { Uses[I].set(IL_All); } return true; } /// Get the next instruction in "In" that is a member of set Val. /// Start searching from StartI, and do not return anything in Exclude. /// If StartI is not given, start from In.begin(). LoopReroll::DAGRootTracker::UsesTy::iterator LoopReroll::DAGRootTracker::nextInstr(int Val, UsesTy &In, const SmallInstructionSet &Exclude, UsesTy::iterator *StartI) { UsesTy::iterator I = StartI ? *StartI : In.begin(); while (I != In.end() && (I->second.test(Val) == 0 || Exclude.count(I->first) != 0)) ++I; return I; } bool LoopReroll::DAGRootTracker::isBaseInst(Instruction *I) { for (auto &DRS : RootSets) { if (DRS.BaseInst == I) return true; } return false; } bool LoopReroll::DAGRootTracker::isRootInst(Instruction *I) { for (auto &DRS : RootSets) { if (std::find(DRS.Roots.begin(), DRS.Roots.end(), I) != DRS.Roots.end()) return true; } return false; } /// Return true if instruction I depends on any instruction between /// Start and End. bool LoopReroll::DAGRootTracker::instrDependsOn(Instruction *I, UsesTy::iterator Start, UsesTy::iterator End) { for (auto *U : I->users()) { for (auto It = Start; It != End; ++It) if (U == It->first) return true; } return false; } bool LoopReroll::DAGRootTracker::validate(ReductionTracker &Reductions) { // We now need to check for equivalence of the use graph of each root with // that of the primary induction variable (excluding the roots). Our goal // here is not to solve the full graph isomorphism problem, but rather to // catch common cases without a lot of work. As a result, we will assume // that the relative order of the instructions in each unrolled iteration // is the same (although we will not make an assumption about how the // different iterations are intermixed). Note that while the order must be // the same, the instructions may not be in the same basic block. // An array of just the possible reductions for this scale factor. When we // collect the set of all users of some root instructions, these reduction // instructions are treated as 'final' (their uses are not considered). // This is important because we don't want the root use set to search down // the reduction chain. SmallInstructionSet PossibleRedSet; SmallInstructionSet PossibleRedLastSet; SmallInstructionSet PossibleRedPHISet; Reductions.restrictToScale(Scale, PossibleRedSet, PossibleRedPHISet, PossibleRedLastSet); // Populate "Uses" with where each instruction is used. if (!collectUsedInstructions(PossibleRedSet)) return false; // Make sure we mark the reduction PHIs as used in all iterations. for (auto *I : PossibleRedPHISet) { Uses[I].set(IL_All); } // Make sure all instructions in the loop are in one and only one // set. for (auto &KV : Uses) { if (KV.second.count() != 1) { DEBUG(dbgs() << "LRR: Aborting - instruction is not used in 1 iteration: " << *KV.first << " (#uses=" << KV.second.count() << ")\n"); return false; } } DEBUG( for (auto &KV : Uses) { dbgs() << "LRR: " << KV.second.find_first() << "\t" << *KV.first << "\n"; } ); for (unsigned Iter = 1; Iter < Scale; ++Iter) { // In addition to regular aliasing information, we need to look for // instructions from later (future) iterations that have side effects // preventing us from reordering them past other instructions with side // effects. bool FutureSideEffects = false; AliasSetTracker AST(*AA); // The map between instructions in f(%iv.(i+1)) and f(%iv). DenseMap<Value *, Value *> BaseMap; // Compare iteration Iter to the base. SmallInstructionSet Visited; auto BaseIt = nextInstr(0, Uses, Visited); auto RootIt = nextInstr(Iter, Uses, Visited); auto LastRootIt = Uses.begin(); while (BaseIt != Uses.end() && RootIt != Uses.end()) { Instruction *BaseInst = BaseIt->first; Instruction *RootInst = RootIt->first; // Skip over the IV or root instructions; only match their users. bool Continue = false; if (isBaseInst(BaseInst)) { Visited.insert(BaseInst); BaseIt = nextInstr(0, Uses, Visited); Continue = true; } if (isRootInst(RootInst)) { LastRootIt = RootIt; Visited.insert(RootInst); RootIt = nextInstr(Iter, Uses, Visited); Continue = true; } if (Continue) continue; if (!BaseInst->isSameOperationAs(RootInst)) { // Last chance saloon. We don't try and solve the full isomorphism // problem, but try and at least catch the case where two instructions // *of different types* are round the wrong way. We won't be able to // efficiently tell, given two ADD instructions, which way around we // should match them, but given an ADD and a SUB, we can at least infer // which one is which. // // This should allow us to deal with a greater subset of the isomorphism // problem. It does however change a linear algorithm into a quadratic // one, so limit the number of probes we do. auto TryIt = RootIt; unsigned N = NumToleratedFailedMatches; while (TryIt != Uses.end() && !BaseInst->isSameOperationAs(TryIt->first) && N--) { ++TryIt; TryIt = nextInstr(Iter, Uses, Visited, &TryIt); } if (TryIt == Uses.end() || TryIt == RootIt || instrDependsOn(TryIt->first, RootIt, TryIt)) { DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << " vs. " << *RootInst << "\n"); return false; } RootIt = TryIt; RootInst = TryIt->first; } // All instructions between the last root and this root // may belong to some other iteration. If they belong to a // future iteration, then they're dangerous to alias with. // // Note that because we allow a limited amount of flexibility in the order // that we visit nodes, LastRootIt might be *before* RootIt, in which // case we've already checked this set of instructions so we shouldn't // do anything. for (; LastRootIt < RootIt; ++LastRootIt) { Instruction *I = LastRootIt->first; if (LastRootIt->second.find_first() < (int)Iter) continue; if (I->mayWriteToMemory()) AST.add(I); // Note: This is specifically guarded by a check on isa<PHINode>, // which while a valid (somewhat arbitrary) micro-optimization, is // needed because otherwise isSafeToSpeculativelyExecute returns // false on PHI nodes. if (!isa<PHINode>(I) && !isSimpleLoadStore(I) && !isSafeToSpeculativelyExecute(I)) // Intervening instructions cause side effects. FutureSideEffects = true; } // Make sure that this instruction, which is in the use set of this // root instruction, does not also belong to the base set or the set of // some other root instruction. if (RootIt->second.count() > 1) { DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << " vs. " << *RootInst << " (prev. case overlap)\n"); return false; } // Make sure that we don't alias with any instruction in the alias set // tracker. If we do, then we depend on a future iteration, and we // can't reroll. if (RootInst->mayReadFromMemory()) for (auto &K : AST) { if (K.aliasesUnknownInst(RootInst, *AA)) { DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << " vs. " << *RootInst << " (depends on future store)\n"); return false; } } // If we've past an instruction from a future iteration that may have // side effects, and this instruction might also, then we can't reorder // them, and this matching fails. As an exception, we allow the alias // set tracker to handle regular (simple) load/store dependencies. if (FutureSideEffects && ((!isSimpleLoadStore(BaseInst) && !isSafeToSpeculativelyExecute(BaseInst)) || (!isSimpleLoadStore(RootInst) && !isSafeToSpeculativelyExecute(RootInst)))) { DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << " vs. " << *RootInst << " (side effects prevent reordering)\n"); return false; } // For instructions that are part of a reduction, if the operation is // associative, then don't bother matching the operands (because we // already know that the instructions are isomorphic, and the order // within the iteration does not matter). For non-associative reductions, // we do need to match the operands, because we need to reject // out-of-order instructions within an iteration! // For example (assume floating-point addition), we need to reject this: // x += a[i]; x += b[i]; // x += a[i+1]; x += b[i+1]; // x += b[i+2]; x += a[i+2]; bool InReduction = Reductions.isPairInSame(BaseInst, RootInst); if (!(InReduction && BaseInst->isAssociative())) { bool Swapped = false, SomeOpMatched = false; for (unsigned j = 0; j < BaseInst->getNumOperands(); ++j) { Value *Op2 = RootInst->getOperand(j); // If this is part of a reduction (and the operation is not // associatve), then we match all operands, but not those that are // part of the reduction. if (InReduction) if (Instruction *Op2I = dyn_cast<Instruction>(Op2)) if (Reductions.isPairInSame(RootInst, Op2I)) continue; DenseMap<Value *, Value *>::iterator BMI = BaseMap.find(Op2); if (BMI != BaseMap.end()) { Op2 = BMI->second; } else { for (auto &DRS : RootSets) { if (DRS.Roots[Iter-1] == (Instruction*) Op2) { Op2 = DRS.BaseInst; break; } } } if (BaseInst->getOperand(Swapped ? unsigned(!j) : j) != Op2) { // If we've not already decided to swap the matched operands, and // we've not already matched our first operand (note that we could // have skipped matching the first operand because it is part of a // reduction above), and the instruction is commutative, then try // the swapped match. if (!Swapped && BaseInst->isCommutative() && !SomeOpMatched && BaseInst->getOperand(!j) == Op2) { Swapped = true; } else { DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << " vs. " << *RootInst << " (operand " << j << ")\n"); return false; } } SomeOpMatched = true; } } if ((!PossibleRedLastSet.count(BaseInst) && hasUsesOutsideLoop(BaseInst, L)) || (!PossibleRedLastSet.count(RootInst) && hasUsesOutsideLoop(RootInst, L))) { DEBUG(dbgs() << "LRR: iteration root match failed at " << *BaseInst << " vs. " << *RootInst << " (uses outside loop)\n"); return false; } Reductions.recordPair(BaseInst, RootInst, Iter); BaseMap.insert(std::make_pair(RootInst, BaseInst)); LastRootIt = RootIt; Visited.insert(BaseInst); Visited.insert(RootInst); BaseIt = nextInstr(0, Uses, Visited); RootIt = nextInstr(Iter, Uses, Visited); } assert (BaseIt == Uses.end() && RootIt == Uses.end() && "Mismatched set sizes!"); } DEBUG(dbgs() << "LRR: Matched all iteration increments for " << *IV << "\n"); return true; } void LoopReroll::DAGRootTracker::replace(const SCEV *IterCount) { BasicBlock *Header = L->getHeader(); // Remove instructions associated with non-base iterations. for (BasicBlock::reverse_iterator J = Header->rbegin(); J != Header->rend();) { unsigned I = Uses[&*J].find_first(); if (I > 0 && I < IL_All) { Instruction *D = &*J; DEBUG(dbgs() << "LRR: removing: " << *D << "\n"); D->eraseFromParent(); continue; } ++J; } const DataLayout &DL = Header->getModule()->getDataLayout(); // We need to create a new induction variable for each different BaseInst. for (auto &DRS : RootSets) { // Insert the new induction variable. const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(DRS.BaseInst)); const SCEV *Start = RealIVSCEV->getStart(); const SCEVAddRecExpr *H = cast<SCEVAddRecExpr> (SE->getAddRecExpr(Start, SE->getConstant(RealIVSCEV->getType(), 1), L, SCEV::FlagAnyWrap)); { // Limit the lifetime of SCEVExpander. SCEVExpander Expander(*SE, DL, "reroll"); Value *NewIV = Expander.expandCodeFor(H, IV->getType(), Header->begin()); for (auto &KV : Uses) { if (KV.second.find_first() == 0) KV.first->replaceUsesOfWith(DRS.BaseInst, NewIV); } if (BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator())) { // FIXME: Why do we need this check? if (Uses[BI].find_first() == IL_All) { const SCEV *ICSCEV = RealIVSCEV->evaluateAtIteration(IterCount, *SE); // Iteration count SCEV minus 1 const SCEV *ICMinus1SCEV = SE->getMinusSCEV(ICSCEV, SE->getConstant(ICSCEV->getType(), 1)); Value *ICMinus1; // Iteration count minus 1 if (isa<SCEVConstant>(ICMinus1SCEV)) { ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(), BI); } else { BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) Preheader = InsertPreheaderForLoop(L, Parent); ICMinus1 = Expander.expandCodeFor(ICMinus1SCEV, NewIV->getType(), Preheader->getTerminator()); } Value *Cond = new ICmpInst(BI, CmpInst::ICMP_EQ, NewIV, ICMinus1, "exitcond"); BI->setCondition(Cond); if (BI->getSuccessor(1) != Header) BI->swapSuccessors(); } } } } SimplifyInstructionsInBlock(Header, TLI); DeleteDeadPHIs(Header, TLI); } // Validate the selected reductions. All iterations must have an isomorphic // part of the reduction chain and, for non-associative reductions, the chain // entries must appear in order. bool LoopReroll::ReductionTracker::validateSelected() { // For a non-associative reduction, the chain entries must appear in order. for (DenseSet<int>::iterator RI = Reds.begin(), RIE = Reds.end(); RI != RIE; ++RI) { int i = *RI; int PrevIter = 0, BaseCount = 0, Count = 0; for (Instruction *J : PossibleReds[i]) { // Note that all instructions in the chain must have been found because // all instructions in the function must have been assigned to some // iteration. int Iter = PossibleRedIter[J]; if (Iter != PrevIter && Iter != PrevIter + 1 && !PossibleReds[i].getReducedValue()->isAssociative()) { DEBUG(dbgs() << "LRR: Out-of-order non-associative reduction: " << J << "\n"); return false; } if (Iter != PrevIter) { if (Count != BaseCount) { DEBUG(dbgs() << "LRR: Iteration " << PrevIter << " reduction use count " << Count << " is not equal to the base use count " << BaseCount << "\n"); return false; } Count = 0; } ++Count; if (Iter == 0) ++BaseCount; PrevIter = Iter; } } return true; } // For all selected reductions, remove all parts except those in the first // iteration (and the PHI). Replace outside uses of the reduced value with uses // of the first-iteration reduced value (in other words, reroll the selected // reductions). void LoopReroll::ReductionTracker::replaceSelected() { // Fixup reductions to refer to the last instruction associated with the // first iteration (not the last). for (DenseSet<int>::iterator RI = Reds.begin(), RIE = Reds.end(); RI != RIE; ++RI) { int i = *RI; int j = 0; for (int e = PossibleReds[i].size(); j != e; ++j) if (PossibleRedIter[PossibleReds[i][j]] != 0) { --j; break; } // Replace users with the new end-of-chain value. SmallInstructionVector Users; for (User *U : PossibleReds[i].getReducedValue()->users()) { Users.push_back(cast<Instruction>(U)); } for (SmallInstructionVector::iterator J = Users.begin(), JE = Users.end(); J != JE; ++J) (*J)->replaceUsesOfWith(PossibleReds[i].getReducedValue(), PossibleReds[i][j]); } } // Reroll the provided loop with respect to the provided induction variable. // Generally, we're looking for a loop like this: // // %iv = phi [ (preheader, ...), (body, %iv.next) ] // f(%iv) // %iv.1 = add %iv, 1 <-- a root increment // f(%iv.1) // %iv.2 = add %iv, 2 <-- a root increment // f(%iv.2) // %iv.scale_m_1 = add %iv, scale-1 <-- a root increment // f(%iv.scale_m_1) // ... // %iv.next = add %iv, scale // %cmp = icmp(%iv, ...) // br %cmp, header, exit // // Notably, we do not require that f(%iv), f(%iv.1), etc. be isolated groups of // instructions. In other words, the instructions in f(%iv), f(%iv.1), etc. can // be intermixed with eachother. The restriction imposed by this algorithm is // that the relative order of the isomorphic instructions in f(%iv), f(%iv.1), // etc. be the same. // // First, we collect the use set of %iv, excluding the other increment roots. // This gives us f(%iv). Then we iterate over the loop instructions (scale-1) // times, having collected the use set of f(%iv.(i+1)), during which we: // - Ensure that the next unmatched instruction in f(%iv) is isomorphic to // the next unmatched instruction in f(%iv.(i+1)). // - Ensure that both matched instructions don't have any external users // (with the exception of last-in-chain reduction instructions). // - Track the (aliasing) write set, and other side effects, of all // instructions that belong to future iterations that come before the matched // instructions. If the matched instructions read from that write set, then // f(%iv) or f(%iv.(i+1)) has some dependency on instructions in // f(%iv.(j+1)) for some j > i, and we cannot reroll the loop. Similarly, // if any of these future instructions had side effects (could not be // speculatively executed), and so do the matched instructions, when we // cannot reorder those side-effect-producing instructions, and rerolling // fails. // // Finally, we make sure that all loop instructions are either loop increment // roots, belong to simple latch code, parts of validated reductions, part of // f(%iv) or part of some f(%iv.i). If all of that is true (and all reductions // have been validated), then we reroll the loop. bool LoopReroll::reroll(Instruction *IV, Loop *L, BasicBlock *Header, const SCEV *IterCount, ReductionTracker &Reductions) { DAGRootTracker DAGRoots(this, L, IV, SE, AA, TLI); if (!DAGRoots.findRoots()) return false; DEBUG(dbgs() << "LRR: Found all root induction increments for: " << *IV << "\n"); if (!DAGRoots.validate(Reductions)) return false; if (!Reductions.validateSelected()) return false; // At this point, we've validated the rerolling, and we're committed to // making changes! Reductions.replaceSelected(); DAGRoots.replace(IterCount); ++NumRerolledLoops; return true; } bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; AA = &getAnalysis<AliasAnalysis>(); LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); SE = &getAnalysis<ScalarEvolution>(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); BasicBlock *Header = L->getHeader(); DEBUG(dbgs() << "LRR: F[" << Header->getParent()->getName() << "] Loop %" << Header->getName() << " (" << L->getNumBlocks() << " block(s))\n"); bool Changed = false; // For now, we'll handle only single BB loops. if (L->getNumBlocks() > 1) return Changed; if (!SE->hasLoopInvariantBackedgeTakenCount(L)) return Changed; const SCEV *LIBETC = SE->getBackedgeTakenCount(L); const SCEV *IterCount = SE->getAddExpr(LIBETC, SE->getConstant(LIBETC->getType(), 1)); DEBUG(dbgs() << "LRR: iteration count = " << *IterCount << "\n"); // First, we need to find the induction variable with respect to which we can // reroll (there may be several possible options). SmallInstructionVector PossibleIVs; collectPossibleIVs(L, PossibleIVs); if (PossibleIVs.empty()) { DEBUG(dbgs() << "LRR: No possible IVs found\n"); return Changed; } ReductionTracker Reductions; collectPossibleReductions(L, Reductions); // For each possible IV, collect the associated possible set of 'root' nodes // (i+1, i+2, etc.). for (SmallInstructionVector::iterator I = PossibleIVs.begin(), IE = PossibleIVs.end(); I != IE; ++I) if (reroll(*I, L, Header, IterCount, Reductions)) { Changed = true; break; } return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/DeadArgumentElimination.cpp
//===-- DeadArgumentElimination.cpp - Eliminate dead arguments ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass deletes dead arguments from internal functions. Dead argument // elimination removes arguments which are directly dead, as well as arguments // only passed into function calls as dead arguments of other functions. This // pass also deletes dead return values in a similar way. // // This pass is often useful as a cleanup pass to run after aggressive // interprocedural passes, which add possibly-dead arguments or return values. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Constant.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <map> #include <set> #include <tuple> using namespace llvm; #define DEBUG_TYPE "deadargelim" STATISTIC(NumArgumentsEliminated, "Number of unread args removed"); STATISTIC(NumRetValsEliminated , "Number of unused return values removed"); STATISTIC(NumArgumentsReplacedWithUndef, "Number of unread args replaced with undef"); namespace { /// DAE - The dead argument elimination pass. /// class DAE : public ModulePass { public: /// Struct that represents (part of) either a return value or a function /// argument. Used so that arguments and return values can be used /// interchangeably. struct RetOrArg { RetOrArg(const Function *F, unsigned Idx, bool IsArg) : F(F), Idx(Idx), IsArg(IsArg) {} const Function *F; unsigned Idx; bool IsArg; /// Make RetOrArg comparable, so we can put it into a map. bool operator<(const RetOrArg &O) const { return std::tie(F, Idx, IsArg) < std::tie(O.F, O.Idx, O.IsArg); } /// Make RetOrArg comparable, so we can easily iterate the multimap. bool operator==(const RetOrArg &O) const { return F == O.F && Idx == O.Idx && IsArg == O.IsArg; } std::string getDescription() const { return (Twine(IsArg ? "Argument #" : "Return value #") + utostr(Idx) + " of function " + F->getName()).str(); } }; /// Liveness enum - During our initial pass over the program, we determine /// that things are either alive or maybe alive. We don't mark anything /// explicitly dead (even if we know they are), since anything not alive /// with no registered uses (in Uses) will never be marked alive and will /// thus become dead in the end. enum Liveness { Live, MaybeLive }; /// Convenience wrapper RetOrArg CreateRet(const Function *F, unsigned Idx) { return RetOrArg(F, Idx, false); } /// Convenience wrapper RetOrArg CreateArg(const Function *F, unsigned Idx) { return RetOrArg(F, Idx, true); } typedef std::multimap<RetOrArg, RetOrArg> UseMap; /// This maps a return value or argument to any MaybeLive return values or /// arguments it uses. This allows the MaybeLive values to be marked live /// when any of its users is marked live. /// For example (indices are left out for clarity): /// - Uses[ret F] = ret G /// This means that F calls G, and F returns the value returned by G. /// - Uses[arg F] = ret G /// This means that some function calls G and passes its result as an /// argument to F. /// - Uses[ret F] = arg F /// This means that F returns one of its own arguments. /// - Uses[arg F] = arg G /// This means that G calls F and passes one of its own (G's) arguments /// directly to F. UseMap Uses; typedef std::set<RetOrArg> LiveSet; typedef std::set<const Function*> LiveFuncSet; /// This set contains all values that have been determined to be live. LiveSet LiveValues; /// This set contains all values that are cannot be changed in any way. LiveFuncSet LiveFunctions; typedef SmallVector<RetOrArg, 5> UseVector; // Map each LLVM function to corresponding metadata with debug info. If // the function is replaced with another one, we should patch the pointer // to LLVM function in metadata. // As the code generation for module is finished (and DIBuilder is // finalized) we assume that subprogram descriptors won't be changed, and // they are stored in map for short duration anyway. DenseMap<const Function *, DISubprogram *> FunctionDIs; protected: // DAH uses this to specify a different ID. explicit DAE(char &ID) : ModulePass(ID) {} public: static char ID; // Pass identification, replacement for typeid DAE() : ModulePass(ID) { initializeDAEPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; virtual bool ShouldHackArguments() const { return false; } private: Liveness MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses); Liveness SurveyUse(const Use *U, UseVector &MaybeLiveUses, unsigned RetValNum = -1U); Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses); void SurveyFunction(const Function &F); void MarkValue(const RetOrArg &RA, Liveness L, const UseVector &MaybeLiveUses); void MarkLive(const RetOrArg &RA); void MarkLive(const Function &F); void PropagateLiveness(const RetOrArg &RA); bool RemoveDeadStuffFromFunction(Function *F); bool DeleteDeadVarargs(Function &Fn); bool RemoveDeadArgumentsFromCallers(Function &Fn); }; } char DAE::ID = 0; INITIALIZE_PASS(DAE, "deadargelim", "Dead Argument Elimination", false, false) namespace { /// DAH - DeadArgumentHacking pass - Same as dead argument elimination, but /// deletes arguments to functions which are external. This is only for use /// by bugpoint. struct DAH : public DAE { static char ID; DAH() : DAE(ID) {} bool ShouldHackArguments() const override { return true; } }; } char DAH::ID = 0; INITIALIZE_PASS(DAH, "deadarghaX0r", "Dead Argument Hacking (BUGPOINT USE ONLY; DO NOT USE)", false, false) /// createDeadArgEliminationPass - This pass removes arguments from functions /// which are not used by the body of the function. /// ModulePass *llvm::createDeadArgEliminationPass() { return new DAE(); } ModulePass *llvm::createDeadArgHackingPass() { return new DAH(); } /// DeleteDeadVarargs - If this is an function that takes a ... list, and if /// llvm.vastart is never called, the varargs list is dead for the function. bool DAE::DeleteDeadVarargs(Function &Fn) { assert(Fn.getFunctionType()->isVarArg() && "Function isn't varargs!"); if (Fn.isDeclaration() || !Fn.hasLocalLinkage()) return false; // Ensure that the function is only directly called. if (Fn.hasAddressTaken()) return false; // Okay, we know we can transform this function if safe. Scan its body // looking for calls marked musttail or calls to llvm.vastart. for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { CallInst *CI = dyn_cast<CallInst>(I); if (!CI) continue; if (CI->isMustTailCall()) return false; if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) { if (II->getIntrinsicID() == Intrinsic::vastart) return false; } } } // If we get here, there are no calls to llvm.vastart in the function body, // remove the "..." and adjust all the calls. // Start by computing a new prototype for the function, which is the same as // the old function, but doesn't have isVarArg set. FunctionType *FTy = Fn.getFunctionType(); std::vector<Type*> Params(FTy->param_begin(), FTy->param_end()); FunctionType *NFTy = FunctionType::get(FTy->getReturnType(), Params, false); unsigned NumArgs = Params.size(); // Create the new function body and insert it into the module... Function *NF = Function::Create(NFTy, Fn.getLinkage()); NF->copyAttributesFrom(&Fn); Fn.getParent()->getFunctionList().insert(&Fn, NF); NF->takeName(&Fn); // Loop over all of the callers of the function, transforming the call sites // to pass in a smaller number of arguments into the new function. // std::vector<Value*> Args; for (Value::user_iterator I = Fn.user_begin(), E = Fn.user_end(); I != E; ) { CallSite CS(*I++); if (!CS) continue; Instruction *Call = CS.getInstruction(); // Pass all the same arguments. Args.assign(CS.arg_begin(), CS.arg_begin() + NumArgs); // Drop any attributes that were on the vararg arguments. AttributeSet PAL = CS.getAttributes(); if (!PAL.isEmpty() && PAL.getSlotIndex(PAL.getNumSlots() - 1) > NumArgs) { SmallVector<AttributeSet, 8> AttributesVec; for (unsigned i = 0; PAL.getSlotIndex(i) <= NumArgs; ++i) AttributesVec.push_back(PAL.getSlotAttributes(i)); if (PAL.hasAttributes(AttributeSet::FunctionIndex)) AttributesVec.push_back(AttributeSet::get(Fn.getContext(), PAL.getFnAttributes())); PAL = AttributeSet::get(Fn.getContext(), AttributesVec); } Instruction *New; if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), Args, "", Call); cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv()); cast<InvokeInst>(New)->setAttributes(PAL); } else { New = CallInst::Create(NF, Args, "", Call); cast<CallInst>(New)->setCallingConv(CS.getCallingConv()); cast<CallInst>(New)->setAttributes(PAL); if (cast<CallInst>(Call)->isTailCall()) cast<CallInst>(New)->setTailCall(); } New->setDebugLoc(Call->getDebugLoc()); Args.clear(); if (!Call->use_empty()) Call->replaceAllUsesWith(New); New->takeName(Call); // Finally, remove the old call from the program, reducing the use-count of // F. Call->eraseFromParent(); } // Since we have now created the new function, splice the body of the old // function right into the new function, leaving the old rotting hulk of the // function empty. NF->getBasicBlockList().splice(NF->begin(), Fn.getBasicBlockList()); // Loop over the argument list, transferring uses of the old arguments over to // the new arguments, also transferring over the names as well. While we're at // it, remove the dead arguments from the DeadArguments list. // for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end(), I2 = NF->arg_begin(); I != E; ++I, ++I2) { // Move the name and users over to the new version. I->replaceAllUsesWith(I2); I2->takeName(I); } // Patch the pointer to LLVM function in debug info descriptor. auto DI = FunctionDIs.find(&Fn); if (DI != FunctionDIs.end()) { DISubprogram *SP = DI->second; SP->replaceFunction(NF); // Ensure the map is updated so it can be reused on non-varargs argument // eliminations of the same function. FunctionDIs.erase(DI); FunctionDIs[NF] = SP; } // Fix up any BlockAddresses that refer to the function. Fn.replaceAllUsesWith(ConstantExpr::getBitCast(NF, Fn.getType())); // Delete the bitcast that we just created, so that NF does not // appear to be address-taken. NF->removeDeadConstantUsers(); // Finally, nuke the old function. Fn.eraseFromParent(); return true; } /// RemoveDeadArgumentsFromCallers - Checks if the given function has any /// arguments that are unused, and changes the caller parameters to be undefined /// instead. bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn) { // We cannot change the arguments if this TU does not define the function or // if the linker may choose a function body from another TU, even if the // nominal linkage indicates that other copies of the function have the same // semantics. In the below example, the dead load from %p may not have been // eliminated from the linker-chosen copy of f, so replacing %p with undef // in callers may introduce undefined behavior. // // define linkonce_odr void @f(i32* %p) { // %v = load i32 %p // ret void // } if (!Fn.isStrongDefinitionForLinker()) return false; // Functions with local linkage should already have been handled, except the // fragile (variadic) ones which we can improve here. if (Fn.hasLocalLinkage() && !Fn.getFunctionType()->isVarArg()) return false; if (Fn.use_empty()) return false; SmallVector<unsigned, 8> UnusedArgs; for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end(); I != E; ++I) { Argument *Arg = I; if (Arg->use_empty() && !Arg->hasByValOrInAllocaAttr()) UnusedArgs.push_back(Arg->getArgNo()); } if (UnusedArgs.empty()) return false; bool Changed = false; for (Use &U : Fn.uses()) { CallSite CS(U.getUser()); if (!CS || !CS.isCallee(&U)) continue; // Now go through all unused args and replace them with "undef". for (unsigned I = 0, E = UnusedArgs.size(); I != E; ++I) { unsigned ArgNo = UnusedArgs[I]; Value *Arg = CS.getArgument(ArgNo); CS.setArgument(ArgNo, UndefValue::get(Arg->getType())); ++NumArgumentsReplacedWithUndef; Changed = true; } } return Changed; } /// Convenience function that returns the number of return values. It returns 0 /// for void functions and 1 for functions not returning a struct. It returns /// the number of struct elements for functions returning a struct. static unsigned NumRetVals(const Function *F) { Type *RetTy = F->getReturnType(); if (RetTy->isVoidTy()) return 0; else if (StructType *STy = dyn_cast<StructType>(RetTy)) return STy->getNumElements(); else if (ArrayType *ATy = dyn_cast<ArrayType>(RetTy)) return ATy->getNumElements(); else return 1; } /// Returns the sub-type a function will return at a given Idx. Should /// correspond to the result type of an ExtractValue instruction executed with /// just that one Idx (i.e. only top-level structure is considered). static Type *getRetComponentType(const Function *F, unsigned Idx) { Type *RetTy = F->getReturnType(); assert(!RetTy->isVoidTy() && "void type has no subtype"); if (StructType *STy = dyn_cast<StructType>(RetTy)) return STy->getElementType(Idx); else if (ArrayType *ATy = dyn_cast<ArrayType>(RetTy)) return ATy->getElementType(); else return RetTy; } /// MarkIfNotLive - This checks Use for liveness in LiveValues. If Use is not /// live, it adds Use to the MaybeLiveUses argument. Returns the determined /// liveness of Use. DAE::Liveness DAE::MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses) { // We're live if our use or its Function is already marked as live. if (LiveFunctions.count(Use.F) || LiveValues.count(Use)) return Live; // We're maybe live otherwise, but remember that we must become live if // Use becomes live. MaybeLiveUses.push_back(Use); return MaybeLive; } /// SurveyUse - This looks at a single use of an argument or return value /// and determines if it should be alive or not. Adds this use to MaybeLiveUses /// if it causes the used value to become MaybeLive. /// /// RetValNum is the return value number to use when this use is used in a /// return instruction. This is used in the recursion, you should always leave /// it at 0. DAE::Liveness DAE::SurveyUse(const Use *U, UseVector &MaybeLiveUses, unsigned RetValNum) { const User *V = U->getUser(); if (const ReturnInst *RI = dyn_cast<ReturnInst>(V)) { // The value is returned from a function. It's only live when the // function's return value is live. We use RetValNum here, for the case // that U is really a use of an insertvalue instruction that uses the // original Use. const Function *F = RI->getParent()->getParent(); if (RetValNum != -1U) { RetOrArg Use = CreateRet(F, RetValNum); // We might be live, depending on the liveness of Use. return MarkIfNotLive(Use, MaybeLiveUses); } else { DAE::Liveness Result = MaybeLive; for (unsigned i = 0; i < NumRetVals(F); ++i) { RetOrArg Use = CreateRet(F, i); // We might be live, depending on the liveness of Use. If any // sub-value is live, then the entire value is considered live. This // is a conservative choice, and better tracking is possible. DAE::Liveness SubResult = MarkIfNotLive(Use, MaybeLiveUses); if (Result != Live) Result = SubResult; } return Result; } } if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(V)) { if (U->getOperandNo() != InsertValueInst::getAggregateOperandIndex() && IV->hasIndices()) // The use we are examining is inserted into an aggregate. Our liveness // depends on all uses of that aggregate, but if it is used as a return // value, only index at which we were inserted counts. RetValNum = *IV->idx_begin(); // Note that if we are used as the aggregate operand to the insertvalue, // we don't change RetValNum, but do survey all our uses. Liveness Result = MaybeLive; for (const Use &UU : IV->uses()) { Result = SurveyUse(&UU, MaybeLiveUses, RetValNum); if (Result == Live) break; } return Result; } if (auto CS = ImmutableCallSite(V)) { const Function *F = CS.getCalledFunction(); if (F) { // Used in a direct call. // Find the argument number. We know for sure that this use is an // argument, since if it was the function argument this would be an // indirect call and the we know can't be looking at a value of the // label type (for the invoke instruction). unsigned ArgNo = CS.getArgumentNo(U); if (ArgNo >= F->getFunctionType()->getNumParams()) // The value is passed in through a vararg! Must be live. return Live; assert(CS.getArgument(ArgNo) == CS->getOperand(U->getOperandNo()) && "Argument is not where we expected it"); // Value passed to a normal call. It's only live when the corresponding // argument to the called function turns out live. RetOrArg Use = CreateArg(F, ArgNo); return MarkIfNotLive(Use, MaybeLiveUses); } } // Used in any other way? Value must be live. return Live; } /// SurveyUses - This looks at all the uses of the given value /// Returns the Liveness deduced from the uses of this value. /// /// Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses. If /// the result is Live, MaybeLiveUses might be modified but its content should /// be ignored (since it might not be complete). DAE::Liveness DAE::SurveyUses(const Value *V, UseVector &MaybeLiveUses) { // Assume it's dead (which will only hold if there are no uses at all..). Liveness Result = MaybeLive; // Check each use. for (const Use &U : V->uses()) { Result = SurveyUse(&U, MaybeLiveUses); if (Result == Live) break; } return Result; } // SurveyFunction - This performs the initial survey of the specified function, // checking out whether or not it uses any of its incoming arguments or whether // any callers use the return value. This fills in the LiveValues set and Uses // map. // // We consider arguments of non-internal functions to be intrinsically alive as // well as arguments to functions which have their "address taken". // void DAE::SurveyFunction(const Function &F) { // Functions with inalloca parameters are expecting args in a particular // register and memory layout. if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca)) { MarkLive(F); return; } unsigned RetCount = NumRetVals(&F); // Assume all return values are dead typedef SmallVector<Liveness, 5> RetVals; RetVals RetValLiveness(RetCount, MaybeLive); typedef SmallVector<UseVector, 5> RetUses; // These vectors map each return value to the uses that make it MaybeLive, so // we can add those to the Uses map if the return value really turns out to be // MaybeLive. Initialized to a list of RetCount empty lists. RetUses MaybeLiveRetUses(RetCount); for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) if (const ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) if (RI->getNumOperands() != 0 && RI->getOperand(0)->getType() != F.getFunctionType()->getReturnType()) { // We don't support old style multiple return values. MarkLive(F); return; } if (!F.hasLocalLinkage() && (!ShouldHackArguments() || F.isIntrinsic())) { MarkLive(F); return; } DEBUG(dbgs() << "DAE - Inspecting callers for fn: " << F.getName() << "\n"); // Keep track of the number of live retvals, so we can skip checks once all // of them turn out to be live. unsigned NumLiveRetVals = 0; // Loop all uses of the function. for (const Use &U : F.uses()) { // If the function is PASSED IN as an argument, its address has been // taken. ImmutableCallSite CS(U.getUser()); if (!CS || !CS.isCallee(&U)) { MarkLive(F); return; } // If this use is anything other than a call site, the function is alive. const Instruction *TheCall = CS.getInstruction(); if (!TheCall) { // Not a direct call site? MarkLive(F); return; } // If we end up here, we are looking at a direct call to our function. // Now, check how our return value(s) is/are used in this caller. Don't // bother checking return values if all of them are live already. if (NumLiveRetVals == RetCount) continue; // Check all uses of the return value. for (const Use &U : TheCall->uses()) { if (ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(U.getUser())) { // This use uses a part of our return value, survey the uses of // that part and store the results for this index only. unsigned Idx = *Ext->idx_begin(); if (RetValLiveness[Idx] != Live) { RetValLiveness[Idx] = SurveyUses(Ext, MaybeLiveRetUses[Idx]); if (RetValLiveness[Idx] == Live) NumLiveRetVals++; } } else { // Used by something else than extractvalue. Survey, but assume that the // result applies to all sub-values. UseVector MaybeLiveAggregateUses; if (SurveyUse(&U, MaybeLiveAggregateUses) == Live) { NumLiveRetVals = RetCount; RetValLiveness.assign(RetCount, Live); break; } else { for (unsigned i = 0; i != RetCount; ++i) { if (RetValLiveness[i] != Live) MaybeLiveRetUses[i].append(MaybeLiveAggregateUses.begin(), MaybeLiveAggregateUses.end()); } } } } } // Now we've inspected all callers, record the liveness of our return values. for (unsigned i = 0; i != RetCount; ++i) MarkValue(CreateRet(&F, i), RetValLiveness[i], MaybeLiveRetUses[i]); DEBUG(dbgs() << "DAE - Inspecting args for fn: " << F.getName() << "\n"); // Now, check all of our arguments. unsigned i = 0; UseVector MaybeLiveArgUses; for (Function::const_arg_iterator AI = F.arg_begin(), E = F.arg_end(); AI != E; ++AI, ++i) { Liveness Result; if (F.getFunctionType()->isVarArg()) { // Variadic functions will already have a va_arg function expanded inside // them, making them potentially very sensitive to ABI changes resulting // from removing arguments entirely, so don't. For example AArch64 handles // register and stack HFAs very differently, and this is reflected in the // IR which has already been generated. Result = Live; } else { // See what the effect of this use is (recording any uses that cause // MaybeLive in MaybeLiveArgUses). Result = SurveyUses(AI, MaybeLiveArgUses); } // Mark the result. MarkValue(CreateArg(&F, i), Result, MaybeLiveArgUses); // Clear the vector again for the next iteration. MaybeLiveArgUses.clear(); } } /// MarkValue - This function marks the liveness of RA depending on L. If L is /// MaybeLive, it also takes all uses in MaybeLiveUses and records them in Uses, /// such that RA will be marked live if any use in MaybeLiveUses gets marked /// live later on. void DAE::MarkValue(const RetOrArg &RA, Liveness L, const UseVector &MaybeLiveUses) { switch (L) { case Live: MarkLive(RA); break; case MaybeLive: { // Note any uses of this value, so this return value can be // marked live whenever one of the uses becomes live. for (UseVector::const_iterator UI = MaybeLiveUses.begin(), UE = MaybeLiveUses.end(); UI != UE; ++UI) Uses.insert(std::make_pair(*UI, RA)); break; } } } /// MarkLive - Mark the given Function as alive, meaning that it cannot be /// changed in any way. Additionally, /// mark any values that are used as this function's parameters or by its return /// values (according to Uses) live as well. void DAE::MarkLive(const Function &F) { DEBUG(dbgs() << "DAE - Intrinsically live fn: " << F.getName() << "\n"); // Mark the function as live. LiveFunctions.insert(&F); // Mark all arguments as live. for (unsigned i = 0, e = F.arg_size(); i != e; ++i) PropagateLiveness(CreateArg(&F, i)); // Mark all return values as live. for (unsigned i = 0, e = NumRetVals(&F); i != e; ++i) PropagateLiveness(CreateRet(&F, i)); } /// MarkLive - Mark the given return value or argument as live. Additionally, /// mark any values that are used by this value (according to Uses) live as /// well. void DAE::MarkLive(const RetOrArg &RA) { if (LiveFunctions.count(RA.F)) return; // Function was already marked Live. if (!LiveValues.insert(RA).second) return; // We were already marked Live. DEBUG(dbgs() << "DAE - Marking " << RA.getDescription() << " live\n"); PropagateLiveness(RA); } /// PropagateLiveness - Given that RA is a live value, propagate it's liveness /// to any other values it uses (according to Uses). void DAE::PropagateLiveness(const RetOrArg &RA) { // We don't use upper_bound (or equal_range) here, because our recursive call // to ourselves is likely to cause the upper_bound (which is the first value // not belonging to RA) to become erased and the iterator invalidated. UseMap::iterator Begin = Uses.lower_bound(RA); UseMap::iterator E = Uses.end(); UseMap::iterator I; for (I = Begin; I != E && I->first == RA; ++I) MarkLive(I->second); // Erase RA from the Uses map (from the lower bound to wherever we ended up // after the loop). Uses.erase(Begin, I); } // RemoveDeadStuffFromFunction - Remove any arguments and return values from F // that are not in LiveValues. Transform the function and all of the callees of // the function to not have these arguments and return values. // bool DAE::RemoveDeadStuffFromFunction(Function *F) { // Don't modify fully live functions if (LiveFunctions.count(F)) return false; // Start by computing a new prototype for the function, which is the same as // the old function, but has fewer arguments and a different return type. FunctionType *FTy = F->getFunctionType(); std::vector<Type*> Params; // Keep track of if we have a live 'returned' argument bool HasLiveReturnedArg = false; // Set up to build a new list of parameter attributes. SmallVector<AttributeSet, 8> AttributesVec; const AttributeSet &PAL = F->getAttributes(); // Remember which arguments are still alive. SmallVector<bool, 10> ArgAlive(FTy->getNumParams(), false); // Construct the new parameter list from non-dead arguments. Also construct // a new set of parameter attributes to correspond. Skip the first parameter // attribute, since that belongs to the return value. unsigned i = 0; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I, ++i) { RetOrArg Arg = CreateArg(F, i); if (LiveValues.erase(Arg)) { Params.push_back(I->getType()); ArgAlive[i] = true; // Get the original parameter attributes (skipping the first one, that is // for the return value. if (PAL.hasAttributes(i + 1)) { AttrBuilder B(PAL, i + 1); if (B.contains(Attribute::Returned)) HasLiveReturnedArg = true; AttributesVec. push_back(AttributeSet::get(F->getContext(), Params.size(), B)); } } else { ++NumArgumentsEliminated; DEBUG(dbgs() << "DAE - Removing argument " << i << " (" << I->getName() << ") from " << F->getName() << "\n"); } } // Find out the new return value. Type *RetTy = FTy->getReturnType(); Type *NRetTy = nullptr; unsigned RetCount = NumRetVals(F); // -1 means unused, other numbers are the new index SmallVector<int, 5> NewRetIdxs(RetCount, -1); std::vector<Type*> RetTypes; // If there is a function with a live 'returned' argument but a dead return // value, then there are two possible actions: // 1) Eliminate the return value and take off the 'returned' attribute on the // argument. // 2) Retain the 'returned' attribute and treat the return value (but not the // entire function) as live so that it is not eliminated. // // It's not clear in the general case which option is more profitable because, // even in the absence of explicit uses of the return value, code generation // is free to use the 'returned' attribute to do things like eliding // save/restores of registers across calls. Whether or not this happens is // target and ABI-specific as well as depending on the amount of register // pressure, so there's no good way for an IR-level pass to figure this out. // // Fortunately, the only places where 'returned' is currently generated by // the FE are places where 'returned' is basically free and almost always a // performance win, so the second option can just be used always for now. // // This should be revisited if 'returned' is ever applied more liberally. if (RetTy->isVoidTy() || HasLiveReturnedArg) { NRetTy = RetTy; } else { // Look at each of the original return values individually. for (unsigned i = 0; i != RetCount; ++i) { RetOrArg Ret = CreateRet(F, i); if (LiveValues.erase(Ret)) { RetTypes.push_back(getRetComponentType(F, i)); NewRetIdxs[i] = RetTypes.size() - 1; } else { ++NumRetValsEliminated; DEBUG(dbgs() << "DAE - Removing return value " << i << " from " << F->getName() << "\n"); } } if (RetTypes.size() > 1) { // More than one return type? Reduce it down to size. if (StructType *STy = dyn_cast<StructType>(RetTy)) { // Make the new struct packed if we used to return a packed struct // already. NRetTy = StructType::get(STy->getContext(), RetTypes, STy->isPacked()); } else { assert(isa<ArrayType>(RetTy) && "unexpected multi-value return"); NRetTy = ArrayType::get(RetTypes[0], RetTypes.size()); } } else if (RetTypes.size() == 1) // One return type? Just a simple value then, but only if we didn't use to // return a struct with that simple value before. NRetTy = RetTypes.front(); else if (RetTypes.size() == 0) // No return types? Make it void, but only if we didn't use to return {}. NRetTy = Type::getVoidTy(F->getContext()); } assert(NRetTy && "No new return type found?"); // The existing function return attributes. AttributeSet RAttrs = PAL.getRetAttributes(); // Remove any incompatible attributes, but only if we removed all return // values. Otherwise, ensure that we don't have any conflicting attributes // here. Currently, this should not be possible, but special handling might be // required when new return value attributes are added. if (NRetTy->isVoidTy()) RAttrs = RAttrs.removeAttributes(NRetTy->getContext(), AttributeSet::ReturnIndex, AttributeFuncs::typeIncompatible(NRetTy)); else assert(!AttrBuilder(RAttrs, AttributeSet::ReturnIndex). overlaps(AttributeFuncs::typeIncompatible(NRetTy)) && "Return attributes no longer compatible?"); if (RAttrs.hasAttributes(AttributeSet::ReturnIndex)) AttributesVec.push_back(AttributeSet::get(NRetTy->getContext(), RAttrs)); if (PAL.hasAttributes(AttributeSet::FunctionIndex)) AttributesVec.push_back(AttributeSet::get(F->getContext(), PAL.getFnAttributes())); // Reconstruct the AttributesList based on the vector we constructed. AttributeSet NewPAL = AttributeSet::get(F->getContext(), AttributesVec); // Create the new function type based on the recomputed parameters. FunctionType *NFTy = FunctionType::get(NRetTy, Params, FTy->isVarArg()); // No change? if (NFTy == FTy) return false; // Create the new function body and insert it into the module... Function *NF = Function::Create(NFTy, F->getLinkage()); NF->copyAttributesFrom(F); NF->setAttributes(NewPAL); // Insert the new function before the old function, so we won't be processing // it again. F->getParent()->getFunctionList().insert(F, NF); NF->takeName(F); // Loop over all of the callers of the function, transforming the call sites // to pass in a smaller number of arguments into the new function. // std::vector<Value*> Args; while (!F->use_empty()) { CallSite CS(F->user_back()); Instruction *Call = CS.getInstruction(); AttributesVec.clear(); const AttributeSet &CallPAL = CS.getAttributes(); // The call return attributes. AttributeSet RAttrs = CallPAL.getRetAttributes(); // Adjust in case the function was changed to return void. RAttrs = RAttrs.removeAttributes(NRetTy->getContext(), AttributeSet::ReturnIndex, AttributeFuncs::typeIncompatible(NF->getReturnType())); if (RAttrs.hasAttributes(AttributeSet::ReturnIndex)) AttributesVec.push_back(AttributeSet::get(NF->getContext(), RAttrs)); // Declare these outside of the loops, so we can reuse them for the second // loop, which loops the varargs. CallSite::arg_iterator I = CS.arg_begin(); unsigned i = 0; // Loop over those operands, corresponding to the normal arguments to the // original function, and add those that are still alive. for (unsigned e = FTy->getNumParams(); i != e; ++I, ++i) if (ArgAlive[i]) { Args.push_back(*I); // Get original parameter attributes, but skip return attributes. if (CallPAL.hasAttributes(i + 1)) { AttrBuilder B(CallPAL, i + 1); // If the return type has changed, then get rid of 'returned' on the // call site. The alternative is to make all 'returned' attributes on // call sites keep the return value alive just like 'returned' // attributes on function declaration but it's less clearly a win // and this is not an expected case anyway if (NRetTy != RetTy && B.contains(Attribute::Returned)) B.removeAttribute(Attribute::Returned); AttributesVec. push_back(AttributeSet::get(F->getContext(), Args.size(), B)); } } // Push any varargs arguments on the list. Don't forget their attributes. for (CallSite::arg_iterator E = CS.arg_end(); I != E; ++I, ++i) { Args.push_back(*I); if (CallPAL.hasAttributes(i + 1)) { AttrBuilder B(CallPAL, i + 1); AttributesVec. push_back(AttributeSet::get(F->getContext(), Args.size(), B)); } } if (CallPAL.hasAttributes(AttributeSet::FunctionIndex)) AttributesVec.push_back(AttributeSet::get(Call->getContext(), CallPAL.getFnAttributes())); // Reconstruct the AttributesList based on the vector we constructed. AttributeSet NewCallPAL = AttributeSet::get(F->getContext(), AttributesVec); Instruction *New; if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), Args, "", Call); cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv()); cast<InvokeInst>(New)->setAttributes(NewCallPAL); } else { New = CallInst::Create(NF, Args, "", Call); cast<CallInst>(New)->setCallingConv(CS.getCallingConv()); cast<CallInst>(New)->setAttributes(NewCallPAL); if (cast<CallInst>(Call)->isTailCall()) cast<CallInst>(New)->setTailCall(); } New->setDebugLoc(Call->getDebugLoc()); Args.clear(); if (!Call->use_empty()) { if (New->getType() == Call->getType()) { // Return type not changed? Just replace users then. Call->replaceAllUsesWith(New); New->takeName(Call); } else if (New->getType()->isVoidTy()) { // Our return value has uses, but they will get removed later on. // Replace by null for now. if (!Call->getType()->isX86_MMXTy()) Call->replaceAllUsesWith(Constant::getNullValue(Call->getType())); } else { assert((RetTy->isStructTy() || RetTy->isArrayTy()) && "Return type changed, but not into a void. The old return type" " must have been a struct or an array!"); Instruction *InsertPt = Call; if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { BasicBlock::iterator IP = II->getNormalDest()->begin(); while (isa<PHINode>(IP)) ++IP; InsertPt = IP; } // We used to return a struct or array. Instead of doing smart stuff // with all the uses, we will just rebuild it using extract/insertvalue // chaining and let instcombine clean that up. // // Start out building up our return value from undef Value *RetVal = UndefValue::get(RetTy); for (unsigned i = 0; i != RetCount; ++i) if (NewRetIdxs[i] != -1) { Value *V; if (RetTypes.size() > 1) // We are still returning a struct, so extract the value from our // return value V = ExtractValueInst::Create(New, NewRetIdxs[i], "newret", InsertPt); else // We are now returning a single element, so just insert that V = New; // Insert the value at the old position RetVal = InsertValueInst::Create(RetVal, V, i, "oldret", InsertPt); } // Now, replace all uses of the old call instruction with the return // struct we built Call->replaceAllUsesWith(RetVal); New->takeName(Call); } } // Finally, remove the old call from the program, reducing the use-count of // F. Call->eraseFromParent(); } // Since we have now created the new function, splice the body of the old // function right into the new function, leaving the old rotting hulk of the // function empty. NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); // Loop over the argument list, transferring uses of the old arguments over to // the new arguments, also transferring over the names as well. i = 0; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), I2 = NF->arg_begin(); I != E; ++I, ++i) if (ArgAlive[i]) { // If this is a live argument, move the name and users over to the new // version. I->replaceAllUsesWith(I2); I2->takeName(I); ++I2; } else { // If this argument is dead, replace any uses of it with null constants // (these are guaranteed to become unused later on). if (!I->getType()->isX86_MMXTy()) I->replaceAllUsesWith(Constant::getNullValue(I->getType())); } // If we change the return value of the function we must rewrite any return // instructions. Check this now. if (F->getReturnType() != NF->getReturnType()) for (Function::iterator BB = NF->begin(), E = NF->end(); BB != E; ++BB) if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) { Value *RetVal; if (NFTy->getReturnType()->isVoidTy()) { RetVal = nullptr; } else { assert(RetTy->isStructTy() || RetTy->isArrayTy()); // The original return value was a struct or array, insert // extractvalue/insertvalue chains to extract only the values we need // to return and insert them into our new result. // This does generate messy code, but we'll let it to instcombine to // clean that up. Value *OldRet = RI->getOperand(0); // Start out building up our return value from undef RetVal = UndefValue::get(NRetTy); for (unsigned i = 0; i != RetCount; ++i) if (NewRetIdxs[i] != -1) { ExtractValueInst *EV = ExtractValueInst::Create(OldRet, i, "oldret", RI); if (RetTypes.size() > 1) { // We're still returning a struct, so reinsert the value into // our new return value at the new index RetVal = InsertValueInst::Create(RetVal, EV, NewRetIdxs[i], "newret", RI); } else { // We are now only returning a simple value, so just return the // extracted value. RetVal = EV; } } } // Replace the return instruction with one returning the new return // value (possibly 0 if we became void). ReturnInst::Create(F->getContext(), RetVal, RI); BB->getInstList().erase(RI); } // Patch the pointer to LLVM function in debug info descriptor. auto DI = FunctionDIs.find(F); if (DI != FunctionDIs.end()) DI->second->replaceFunction(NF); // Now that the old function is dead, delete it. F->eraseFromParent(); return true; } bool DAE::runOnModule(Module &M) { bool Changed = false; // Collect debug info descriptors for functions. FunctionDIs = makeSubprogramMap(M); // First pass: Do a simple check to see if any functions can have their "..." // removed. We can do this if they never call va_start. This loop cannot be // fused with the next loop, because deleting a function invalidates // information computed while surveying other functions. DEBUG(dbgs() << "DAE - Deleting dead varargs\n"); for (Module::iterator I = M.begin(), E = M.end(); I != E; ) { Function &F = *I++; if (F.getFunctionType()->isVarArg()) Changed |= DeleteDeadVarargs(F); } // Second phase:loop through the module, determining which arguments are live. // We assume all arguments are dead unless proven otherwise (allowing us to // determine that dead arguments passed into recursive functions are dead). // DEBUG(dbgs() << "DAE - Determining liveness\n"); for (auto &F : M) SurveyFunction(F); // Now, remove all dead arguments and return values from each function in // turn. for (Module::iterator I = M.begin(), E = M.end(); I != E; ) { // Increment now, because the function will probably get removed (ie. // replaced by a new one). Function *F = I++; Changed |= RemoveDeadStuffFromFunction(F); } // Finally, look for any unused parameters in functions with non-local // linkage and replace the passed in parameters with undef. for (auto &F : M) Changed |= RemoveDeadArgumentsFromCallers(F); return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/MergeFunctions.cpp
//===- MergeFunctions.cpp - Merge identical functions ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass looks for equivalent functions that are mergable and folds them. // // Order relation is defined on set of functions. It was made through // special function comparison procedure that returns // 0 when functions are equal, // -1 when Left function is less than right function, and // 1 for opposite case. We need total-ordering, so we need to maintain // four properties on the functions set: // a <= a (reflexivity) // if a <= b and b <= a then a = b (antisymmetry) // if a <= b and b <= c then a <= c (transitivity). // for all a and b: a <= b or b <= a (totality). // // Comparison iterates through each instruction in each basic block. // Functions are kept on binary tree. For each new function F we perform // lookup in binary tree. // In practice it works the following way: // -- We define Function* container class with custom "operator<" (FunctionPtr). // -- "FunctionPtr" instances are stored in std::set collection, so every // std::set::insert operation will give you result in log(N) time. // // When a match is found the functions are folded. If both functions are // overridable, we move the functionality into a new internal function and // leave two overridable thunks to it. // //===----------------------------------------------------------------------===// // // Future work: // // * virtual functions. // // Many functions have their address taken by the virtual function table for // the object they belong to. However, as long as it's only used for a lookup // and call, this is irrelevant, and we'd like to fold such functions. // // * be smarter about bitcasts. // // In order to fold functions, we will sometimes add either bitcast instructions // or bitcast constant expressions. Unfortunately, this can confound further // analysis since the two functions differ where one has a bitcast and the // other doesn't. We should learn to look through bitcasts. // // * Compare complex types with pointer types inside. // * Compare cross-reference cases. // * Compare complex expressions. // // All the three issues above could be described as ability to prove that // fA == fB == fC == fE == fF == fG in example below: // // void fA() { // fB(); // } // void fB() { // fA(); // } // // void fE() { // fF(); // } // void fF() { // fG(); // } // void fG() { // fE(); // } // // Simplest cross-reference case (fA <--> fB) was implemented in previous // versions of MergeFunctions, though it presented only in two function pairs // in test-suite (that counts >50k functions) // Though possibility to detect complex cross-referencing (e.g.: A->B->C->D->A) // could cover much more cases. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <vector> using namespace llvm; #define DEBUG_TYPE "mergefunc" STATISTIC(NumFunctionsMerged, "Number of functions merged"); STATISTIC(NumThunksWritten, "Number of thunks generated"); STATISTIC(NumAliasesWritten, "Number of aliases generated"); STATISTIC(NumDoubleWeak, "Number of new functions created"); #if 0 // HLSL Change static cl::opt<unsigned> NumFunctionsForSanityCheck( "mergefunc-sanity", cl::desc("How many functions in module could be used for " "MergeFunctions pass sanity check. " "'0' disables this check. Works only with '-debug' key."), cl::init(0), cl::Hidden); #endif namespace { /// FunctionComparator - Compares two functions to determine whether or not /// they will generate machine code with the same behaviour. DataLayout is /// used if available. The comparator always fails conservatively (erring on the /// side of claiming that two functions are different). class FunctionComparator { public: FunctionComparator(const Function *F1, const Function *F2) : FnL(F1), FnR(F2) {} /// Test whether the two functions have equivalent behaviour. int compare(); private: /// Test whether two basic blocks have equivalent behaviour. int compare(const BasicBlock *BBL, const BasicBlock *BBR); /// Constants comparison. /// Its analog to lexicographical comparison between hypothetical numbers /// of next format: /// <bitcastability-trait><raw-bit-contents> /// /// 1. Bitcastability. /// Check whether L's type could be losslessly bitcasted to R's type. /// On this stage method, in case when lossless bitcast is not possible /// method returns -1 or 1, thus also defining which type is greater in /// context of bitcastability. /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight /// to the contents comparison. /// If types differ, remember types comparison result and check /// whether we still can bitcast types. /// Stage 1: Types that satisfies isFirstClassType conditions are always /// greater then others. /// Stage 2: Vector is greater then non-vector. /// If both types are vectors, then vector with greater bitwidth is /// greater. /// If both types are vectors with the same bitwidth, then types /// are bitcastable, and we can skip other stages, and go to contents /// comparison. /// Stage 3: Pointer types are greater than non-pointers. If both types are /// pointers of the same address space - go to contents comparison. /// Different address spaces: pointer with greater address space is /// greater. /// Stage 4: Types are neither vectors, nor pointers. And they differ. /// We don't know how to bitcast them. So, we better don't do it, /// and return types comparison result (so it determines the /// relationship among constants we don't know how to bitcast). /// /// Just for clearance, let's see how the set of constants could look /// on single dimension axis: /// /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors] /// Where: NFCT - Not a FirstClassType /// FCT - FirstClassTyp: /// /// 2. Compare raw contents. /// It ignores types on this stage and only compares bits from L and R. /// Returns 0, if L and R has equivalent contents. /// -1 or 1 if values are different. /// Pretty trivial: /// 2.1. If contents are numbers, compare numbers. /// Ints with greater bitwidth are greater. Ints with same bitwidths /// compared by their contents. /// 2.2. "And so on". Just to avoid discrepancies with comments /// perhaps it would be better to read the implementation itself. /// 3. And again about overall picture. Let's look back at how the ordered set /// of constants will look like: /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors] /// /// Now look, what could be inside [FCT, "others"], for example: /// [FCT, "others"] = /// [ /// [double 0.1], [double 1.23], /// [i32 1], [i32 2], /// { double 1.0 }, ; StructTyID, NumElements = 1 /// { i32 1 }, ; StructTyID, NumElements = 1 /// { double 1, i32 1 }, ; StructTyID, NumElements = 2 /// { i32 1, double 1 } ; StructTyID, NumElements = 2 /// ] /// /// Let's explain the order. Float numbers will be less than integers, just /// because of cmpType terms: FloatTyID < IntegerTyID. /// Floats (with same fltSemantics) are sorted according to their value. /// Then you can see integers, and they are, like a floats, /// could be easy sorted among each others. /// The structures. Structures are grouped at the tail, again because of their /// TypeID: StructTyID > IntegerTyID > FloatTyID. /// Structures with greater number of elements are greater. Structures with /// greater elements going first are greater. /// The same logic with vectors, arrays and other possible complex types. /// /// Bitcastable constants. /// Let's assume, that some constant, belongs to some group of /// "so-called-equal" values with different types, and at the same time /// belongs to another group of constants with equal types /// and "really" equal values. /// /// Now, prove that this is impossible: /// /// If constant A with type TyA is bitcastable to B with type TyB, then: /// 1. All constants with equal types to TyA, are bitcastable to B. Since /// those should be vectors (if TyA is vector), pointers /// (if TyA is pointer), or else (if TyA equal to TyB), those types should /// be equal to TyB. /// 2. All constants with non-equal, but bitcastable types to TyA, are /// bitcastable to B. /// Once again, just because we allow it to vectors and pointers only. /// This statement could be expanded as below: /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to /// vector B, and thus bitcastable to B as well. /// 2.2. All pointers of the same address space, no matter what they point to, /// bitcastable. So if C is pointer, it could be bitcasted to A and to B. /// So any constant equal or bitcastable to A is equal or bitcastable to B. /// QED. /// /// In another words, for pointers and vectors, we ignore top-level type and /// look at their particular properties (bit-width for vectors, and /// address space for pointers). /// If these properties are equal - compare their contents. int cmpConstants(const Constant *L, const Constant *R); /// Assign or look up previously assigned numbers for the two values, and /// return whether the numbers are equal. Numbers are assigned in the order /// visited. /// Comparison order: /// Stage 0: Value that is function itself is always greater then others. /// If left and right values are references to their functions, then /// they are equal. /// Stage 1: Constants are greater than non-constants. /// If both left and right are constants, then the result of /// cmpConstants is used as cmpValues result. /// Stage 2: InlineAsm instances are greater than others. If both left and /// right are InlineAsm instances, InlineAsm* pointers casted to /// integers and compared as numbers. /// Stage 3: For all other cases we compare order we meet these values in /// their functions. If right value was met first during scanning, /// then left value is greater. /// In another words, we compare serial numbers, for more details /// see comments for sn_mapL and sn_mapR. int cmpValues(const Value *L, const Value *R); /// Compare two Instructions for equivalence, similar to /// Instruction::isSameOperationAs but with modifications to the type /// comparison. /// Stages are listed in "most significant stage first" order: /// On each stage below, we do comparison between some left and right /// operation parts. If parts are non-equal, we assign parts comparison /// result to the operation comparison result and exit from method. /// Otherwise we proceed to the next stage. /// Stages: /// 1. Operations opcodes. Compared as numbers. /// 2. Number of operands. /// 3. Operation types. Compared with cmpType method. /// 4. Compare operation subclass optional data as stream of bytes: /// just convert it to integers and call cmpNumbers. /// 5. Compare in operation operand types with cmpType in /// most significant operand first order. /// 6. Last stage. Check operations for some specific attributes. /// For example, for Load it would be: /// 6.1.Load: volatile (as boolean flag) /// 6.2.Load: alignment (as integer numbers) /// 6.3.Load: synch-scope (as integer numbers) /// 6.4.Load: range metadata (as integer numbers) /// On this stage its better to see the code, since its not more than 10-15 /// strings for particular instruction, and could change sometimes. int cmpOperations(const Instruction *L, const Instruction *R) const; /// Compare two GEPs for equivalent pointer arithmetic. /// Parts to be compared for each comparison stage, /// most significant stage first: /// 1. Address space. As numbers. /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method). /// 3. Pointer operand type (using cmpType method). /// 4. Number of operands. /// 5. Compare operands, using cmpValues method. int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR); int cmpGEPs(const GetElementPtrInst *GEPL, const GetElementPtrInst *GEPR) { return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR)); } /// cmpType - compares two types, /// defines total ordering among the types set. /// /// Return values: /// 0 if types are equal, /// -1 if Left is less than Right, /// +1 if Left is greater than Right. /// /// Description: /// Comparison is broken onto stages. Like in lexicographical comparison /// stage coming first has higher priority. /// On each explanation stage keep in mind total ordering properties. /// /// 0. Before comparison we coerce pointer types of 0 address space to /// integer. /// We also don't bother with same type at left and right, so /// just return 0 in this case. /// /// 1. If types are of different kind (different type IDs). /// Return result of type IDs comparison, treating them as numbers. /// 2. If types are vectors or integers, compare Type* values as numbers. /// 3. Types has same ID, so check whether they belongs to the next group: /// * Void /// * Float /// * Double /// * X86_FP80 /// * FP128 /// * PPC_FP128 /// * Label /// * Metadata /// If so - return 0, yes - we can treat these types as equal only because /// their IDs are same. /// 4. If Left and Right are pointers, return result of address space /// comparison (numbers comparison). We can treat pointer types of same /// address space as equal. /// 5. If types are complex. /// Then both Left and Right are to be expanded and their element types will /// be checked with the same way. If we get Res != 0 on some stage, return it. /// Otherwise return 0. /// 6. For all other cases put llvm_unreachable. int cmpTypes(Type *TyL, Type *TyR) const; int cmpNumbers(uint64_t L, uint64_t R) const; int cmpAPInts(const APInt &L, const APInt &R) const; int cmpAPFloats(const APFloat &L, const APFloat &R) const; int cmpStrings(StringRef L, StringRef R) const; int cmpAttrs(const AttributeSet L, const AttributeSet R) const; // The two functions undergoing comparison. const Function *FnL, *FnR; /// Assign serial numbers to values from left function, and values from /// right function. /// Explanation: /// Being comparing functions we need to compare values we meet at left and /// right sides. /// Its easy to sort things out for external values. It just should be /// the same value at left and right. /// But for local values (those were introduced inside function body) /// we have to ensure they were introduced at exactly the same place, /// and plays the same role. /// Let's assign serial number to each value when we meet it first time. /// Values that were met at same place will be with same serial numbers. /// In this case it would be good to explain few points about values assigned /// to BBs and other ways of implementation (see below). /// /// 1. Safety of BB reordering. /// It's safe to change the order of BasicBlocks in function. /// Relationship with other functions and serial numbering will not be /// changed in this case. /// As follows from FunctionComparator::compare(), we do CFG walk: we start /// from the entry, and then take each terminator. So it doesn't matter how in /// fact BBs are ordered in function. And since cmpValues are called during /// this walk, the numbering depends only on how BBs located inside the CFG. /// So the answer is - yes. We will get the same numbering. /// /// 2. Impossibility to use dominance properties of values. /// If we compare two instruction operands: first is usage of local /// variable AL from function FL, and second is usage of local variable AR /// from FR, we could compare their origins and check whether they are /// defined at the same place. /// But, we are still not able to compare operands of PHI nodes, since those /// could be operands from further BBs we didn't scan yet. /// So it's impossible to use dominance properties in general. DenseMap<const Value*, int> sn_mapL, sn_mapR; }; class FunctionNode { mutable AssertingVH<Function> F; public: FunctionNode(Function *F) : F(F) {} Function *getFunc() const { return F; } /// Replace the reference to the function F by the function G, assuming their /// implementations are equal. void replaceBy(Function *G) const { assert(!(*this < FunctionNode(G)) && !(FunctionNode(G) < *this) && "The two functions must be equal"); F = G; } void release() { F = 0; } bool operator<(const FunctionNode &RHS) const { return (FunctionComparator(F, RHS.getFunc()).compare()) == -1; } }; } int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const { if (L < R) return -1; if (L > R) return 1; return 0; } int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const { if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth())) return Res; if (L.ugt(R)) return 1; if (R.ugt(L)) return -1; return 0; } int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const { if (int Res = cmpNumbers((uint64_t)&L.getSemantics(), (uint64_t)&R.getSemantics())) return Res; return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt()); } int FunctionComparator::cmpStrings(StringRef L, StringRef R) const { // Prevent heavy comparison, compare sizes first. if (int Res = cmpNumbers(L.size(), R.size())) return Res; // Compare strings lexicographically only when it is necessary: only when // strings are equal in size. return L.compare(R); } int FunctionComparator::cmpAttrs(const AttributeSet L, const AttributeSet R) const { if (int Res = cmpNumbers(L.getNumSlots(), R.getNumSlots())) return Res; for (unsigned i = 0, e = L.getNumSlots(); i != e; ++i) { AttributeSet::iterator LI = L.begin(i), LE = L.end(i), RI = R.begin(i), RE = R.end(i); for (; LI != LE && RI != RE; ++LI, ++RI) { Attribute LA = *LI; Attribute RA = *RI; if (LA < RA) return -1; if (RA < LA) return 1; } if (LI != LE) return 1; if (RI != RE) return -1; } return 0; } /// Constants comparison: /// 1. Check whether type of L constant could be losslessly bitcasted to R /// type. /// 2. Compare constant contents. /// For more details see declaration comments. int FunctionComparator::cmpConstants(const Constant *L, const Constant *R) { Type *TyL = L->getType(); Type *TyR = R->getType(); // Check whether types are bitcastable. This part is just re-factored // Type::canLosslesslyBitCastTo method, but instead of returning true/false, // we also pack into result which type is "less" for us. int TypesRes = cmpTypes(TyL, TyR); if (TypesRes != 0) { // Types are different, but check whether we can bitcast them. if (!TyL->isFirstClassType()) { if (TyR->isFirstClassType()) return -1; // Neither TyL nor TyR are values of first class type. Return the result // of comparing the types return TypesRes; } if (!TyR->isFirstClassType()) { if (TyL->isFirstClassType()) return 1; return TypesRes; } // Vector -> Vector conversions are always lossless if the two vector types // have the same size, otherwise not. unsigned TyLWidth = 0; unsigned TyRWidth = 0; if (const VectorType *VecTyL = dyn_cast<VectorType>(TyL)) TyLWidth = VecTyL->getBitWidth(); if (const VectorType *VecTyR = dyn_cast<VectorType>(TyR)) TyRWidth = VecTyR->getBitWidth(); if (TyLWidth != TyRWidth) return cmpNumbers(TyLWidth, TyRWidth); // Zero bit-width means neither TyL nor TyR are vectors. if (!TyLWidth) { PointerType *PTyL = dyn_cast<PointerType>(TyL); PointerType *PTyR = dyn_cast<PointerType>(TyR); if (PTyL && PTyR) { unsigned AddrSpaceL = PTyL->getAddressSpace(); unsigned AddrSpaceR = PTyR->getAddressSpace(); if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR)) return Res; } if (PTyL) return 1; if (PTyR) return -1; // TyL and TyR aren't vectors, nor pointers. We don't know how to // bitcast them. return TypesRes; } } // OK, types are bitcastable, now check constant contents. if (L->isNullValue() && R->isNullValue()) return TypesRes; if (L->isNullValue() && !R->isNullValue()) return 1; if (!L->isNullValue() && R->isNullValue()) return -1; if (int Res = cmpNumbers(L->getValueID(), R->getValueID())) return Res; switch (L->getValueID()) { case Value::UndefValueVal: return TypesRes; case Value::ConstantIntVal: { const APInt &LInt = cast<ConstantInt>(L)->getValue(); const APInt &RInt = cast<ConstantInt>(R)->getValue(); return cmpAPInts(LInt, RInt); } case Value::ConstantFPVal: { const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF(); const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF(); return cmpAPFloats(LAPF, RAPF); } case Value::ConstantArrayVal: { const ConstantArray *LA = cast<ConstantArray>(L); const ConstantArray *RA = cast<ConstantArray>(R); uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements(); uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements(); if (int Res = cmpNumbers(NumElementsL, NumElementsR)) return Res; for (uint64_t i = 0; i < NumElementsL; ++i) { if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)), cast<Constant>(RA->getOperand(i)))) return Res; } return 0; } case Value::ConstantStructVal: { const ConstantStruct *LS = cast<ConstantStruct>(L); const ConstantStruct *RS = cast<ConstantStruct>(R); unsigned NumElementsL = cast<StructType>(TyL)->getNumElements(); unsigned NumElementsR = cast<StructType>(TyR)->getNumElements(); if (int Res = cmpNumbers(NumElementsL, NumElementsR)) return Res; for (unsigned i = 0; i != NumElementsL; ++i) { if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)), cast<Constant>(RS->getOperand(i)))) return Res; } return 0; } case Value::ConstantVectorVal: { const ConstantVector *LV = cast<ConstantVector>(L); const ConstantVector *RV = cast<ConstantVector>(R); unsigned NumElementsL = cast<VectorType>(TyL)->getNumElements(); unsigned NumElementsR = cast<VectorType>(TyR)->getNumElements(); if (int Res = cmpNumbers(NumElementsL, NumElementsR)) return Res; for (uint64_t i = 0; i < NumElementsL; ++i) { if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)), cast<Constant>(RV->getOperand(i)))) return Res; } return 0; } case Value::ConstantExprVal: { const ConstantExpr *LE = cast<ConstantExpr>(L); const ConstantExpr *RE = cast<ConstantExpr>(R); unsigned NumOperandsL = LE->getNumOperands(); unsigned NumOperandsR = RE->getNumOperands(); if (int Res = cmpNumbers(NumOperandsL, NumOperandsR)) return Res; for (unsigned i = 0; i < NumOperandsL; ++i) { if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)), cast<Constant>(RE->getOperand(i)))) return Res; } return 0; } case Value::FunctionVal: case Value::GlobalVariableVal: case Value::GlobalAliasVal: default: // Unknown constant, cast L and R pointers to numbers and compare. return cmpNumbers((uint64_t)L, (uint64_t)R); } } /// cmpType - compares two types, /// defines total ordering among the types set. /// See method declaration comments for more details. int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const { PointerType *PTyL = dyn_cast<PointerType>(TyL); PointerType *PTyR = dyn_cast<PointerType>(TyR); const DataLayout &DL = FnL->getParent()->getDataLayout(); if (PTyL && PTyL->getAddressSpace() == 0) TyL = DL.getIntPtrType(TyL); if (PTyR && PTyR->getAddressSpace() == 0) TyR = DL.getIntPtrType(TyR); if (TyL == TyR) return 0; if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID())) return Res; switch (TyL->getTypeID()) { default: llvm_unreachable("Unknown type!"); // Fall through in Release mode. case Type::IntegerTyID: case Type::VectorTyID: // TyL == TyR would have returned true earlier. return cmpNumbers((uint64_t)TyL, (uint64_t)TyR); case Type::VoidTyID: case Type::FloatTyID: case Type::DoubleTyID: case Type::X86_FP80TyID: case Type::FP128TyID: case Type::PPC_FP128TyID: case Type::LabelTyID: case Type::MetadataTyID: return 0; case Type::PointerTyID: { assert(PTyL && PTyR && "Both types must be pointers here."); return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace()); } case Type::StructTyID: { StructType *STyL = cast<StructType>(TyL); StructType *STyR = cast<StructType>(TyR); if (STyL->getNumElements() != STyR->getNumElements()) return cmpNumbers(STyL->getNumElements(), STyR->getNumElements()); if (STyL->isPacked() != STyR->isPacked()) return cmpNumbers(STyL->isPacked(), STyR->isPacked()); for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) { if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i))) return Res; } return 0; } case Type::FunctionTyID: { FunctionType *FTyL = cast<FunctionType>(TyL); FunctionType *FTyR = cast<FunctionType>(TyR); if (FTyL->getNumParams() != FTyR->getNumParams()) return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams()); if (FTyL->isVarArg() != FTyR->isVarArg()) return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg()); if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType())) return Res; for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) { if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i))) return Res; } return 0; } case Type::ArrayTyID: { ArrayType *ATyL = cast<ArrayType>(TyL); ArrayType *ATyR = cast<ArrayType>(TyR); if (ATyL->getNumElements() != ATyR->getNumElements()) return cmpNumbers(ATyL->getNumElements(), ATyR->getNumElements()); return cmpTypes(ATyL->getElementType(), ATyR->getElementType()); } } } // Determine whether the two operations are the same except that pointer-to-A // and pointer-to-B are equivalent. This should be kept in sync with // Instruction::isSameOperationAs. // Read method declaration comments for more details. int FunctionComparator::cmpOperations(const Instruction *L, const Instruction *R) const { // Differences from Instruction::isSameOperationAs: // * replace type comparison with calls to isEquivalentType. // * we test for I->hasSameSubclassOptionalData (nuw/nsw/tail) at the top // * because of the above, we don't test for the tail bit on calls later on if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode())) return Res; if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands())) return Res; if (int Res = cmpTypes(L->getType(), R->getType())) return Res; if (int Res = cmpNumbers(L->getRawSubclassOptionalData(), R->getRawSubclassOptionalData())) return Res; if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) { if (int Res = cmpTypes(AI->getAllocatedType(), cast<AllocaInst>(R)->getAllocatedType())) return Res; if (int Res = cmpNumbers(AI->getAlignment(), cast<AllocaInst>(R)->getAlignment())) return Res; } // We have two instructions of identical opcode and #operands. Check to see // if all operands are the same type for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) { if (int Res = cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType())) return Res; } // Check special state that is a part of some instructions. if (const LoadInst *LI = dyn_cast<LoadInst>(L)) { if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile())) return Res; if (int Res = cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment())) return Res; if (int Res = cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering())) return Res; if (int Res = cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope())) return Res; return cmpNumbers((uint64_t)LI->getMetadata(LLVMContext::MD_range), (uint64_t)cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range)); } if (const StoreInst *SI = dyn_cast<StoreInst>(L)) { if (int Res = cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile())) return Res; if (int Res = cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment())) return Res; if (int Res = cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering())) return Res; return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope()); } if (const CmpInst *CI = dyn_cast<CmpInst>(L)) return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate()); if (const CallInst *CI = dyn_cast<CallInst>(L)) { if (int Res = cmpNumbers(CI->getCallingConv(), cast<CallInst>(R)->getCallingConv())) return Res; if (int Res = cmpAttrs(CI->getAttributes(), cast<CallInst>(R)->getAttributes())) return Res; return cmpNumbers( (uint64_t)CI->getMetadata(LLVMContext::MD_range), (uint64_t)cast<CallInst>(R)->getMetadata(LLVMContext::MD_range)); } if (const InvokeInst *CI = dyn_cast<InvokeInst>(L)) { if (int Res = cmpNumbers(CI->getCallingConv(), cast<InvokeInst>(R)->getCallingConv())) return Res; if (int Res = cmpAttrs(CI->getAttributes(), cast<InvokeInst>(R)->getAttributes())) return Res; return cmpNumbers( (uint64_t)CI->getMetadata(LLVMContext::MD_range), (uint64_t)cast<InvokeInst>(R)->getMetadata(LLVMContext::MD_range)); } if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) { ArrayRef<unsigned> LIndices = IVI->getIndices(); ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices(); if (int Res = cmpNumbers(LIndices.size(), RIndices.size())) return Res; for (size_t i = 0, e = LIndices.size(); i != e; ++i) { if (int Res = cmpNumbers(LIndices[i], RIndices[i])) return Res; } } if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) { ArrayRef<unsigned> LIndices = EVI->getIndices(); ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices(); if (int Res = cmpNumbers(LIndices.size(), RIndices.size())) return Res; for (size_t i = 0, e = LIndices.size(); i != e; ++i) { if (int Res = cmpNumbers(LIndices[i], RIndices[i])) return Res; } } if (const FenceInst *FI = dyn_cast<FenceInst>(L)) { if (int Res = cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering())) return Res; return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope()); } if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) { if (int Res = cmpNumbers(CXI->isVolatile(), cast<AtomicCmpXchgInst>(R)->isVolatile())) return Res; if (int Res = cmpNumbers(CXI->isWeak(), cast<AtomicCmpXchgInst>(R)->isWeak())) return Res; if (int Res = cmpNumbers(CXI->getSuccessOrdering(), cast<AtomicCmpXchgInst>(R)->getSuccessOrdering())) return Res; if (int Res = cmpNumbers(CXI->getFailureOrdering(), cast<AtomicCmpXchgInst>(R)->getFailureOrdering())) return Res; return cmpNumbers(CXI->getSynchScope(), cast<AtomicCmpXchgInst>(R)->getSynchScope()); } if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) { if (int Res = cmpNumbers(RMWI->getOperation(), cast<AtomicRMWInst>(R)->getOperation())) return Res; if (int Res = cmpNumbers(RMWI->isVolatile(), cast<AtomicRMWInst>(R)->isVolatile())) return Res; if (int Res = cmpNumbers(RMWI->getOrdering(), cast<AtomicRMWInst>(R)->getOrdering())) return Res; return cmpNumbers(RMWI->getSynchScope(), cast<AtomicRMWInst>(R)->getSynchScope()); } return 0; } // Determine whether two GEP operations perform the same underlying arithmetic. // Read method declaration comments for more details. int FunctionComparator::cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR) { unsigned int ASL = GEPL->getPointerAddressSpace(); unsigned int ASR = GEPR->getPointerAddressSpace(); if (int Res = cmpNumbers(ASL, ASR)) return Res; // When we have target data, we can reduce the GEP down to the value in bytes // added to the address. const DataLayout &DL = FnL->getParent()->getDataLayout(); unsigned BitWidth = DL.getPointerSizeInBits(ASL); APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0); if (GEPL->accumulateConstantOffset(DL, OffsetL) && GEPR->accumulateConstantOffset(DL, OffsetR)) return cmpAPInts(OffsetL, OffsetR); if (int Res = cmpNumbers((uint64_t)GEPL->getPointerOperand()->getType(), (uint64_t)GEPR->getPointerOperand()->getType())) return Res; if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands())) return Res; for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) { if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i))) return Res; } return 0; } /// Compare two values used by the two functions under pair-wise comparison. If /// this is the first time the values are seen, they're added to the mapping so /// that we will detect mismatches on next use. /// See comments in declaration for more details. int FunctionComparator::cmpValues(const Value *L, const Value *R) { // Catch self-reference case. if (L == FnL) { if (R == FnR) return 0; return -1; } if (R == FnR) { if (L == FnL) return 0; return 1; } const Constant *ConstL = dyn_cast<Constant>(L); const Constant *ConstR = dyn_cast<Constant>(R); if (ConstL && ConstR) { if (L == R) return 0; return cmpConstants(ConstL, ConstR); } if (ConstL) return 1; if (ConstR) return -1; const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L); const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R); if (InlineAsmL && InlineAsmR) return cmpNumbers((uint64_t)L, (uint64_t)R); if (InlineAsmL) return 1; if (InlineAsmR) return -1; auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())), RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size())); return cmpNumbers(LeftSN.first->second, RightSN.first->second); } // Test whether two basic blocks have equivalent behaviour. int FunctionComparator::compare(const BasicBlock *BBL, const BasicBlock *BBR) { BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end(); BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end(); do { if (int Res = cmpValues(InstL, InstR)) return Res; const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(InstL); const GetElementPtrInst *GEPR = dyn_cast<GetElementPtrInst>(InstR); if (GEPL && !GEPR) return 1; if (GEPR && !GEPL) return -1; if (GEPL && GEPR) { if (int Res = cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand())) return Res; if (int Res = cmpGEPs(GEPL, GEPR)) return Res; } else { if (int Res = cmpOperations(InstL, InstR)) return Res; assert(InstL->getNumOperands() == InstR->getNumOperands()); for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) { Value *OpL = InstL->getOperand(i); Value *OpR = InstR->getOperand(i); if (int Res = cmpValues(OpL, OpR)) return Res; if (int Res = cmpNumbers(OpL->getValueID(), OpR->getValueID())) return Res; // TODO: Already checked in cmpOperation if (int Res = cmpTypes(OpL->getType(), OpR->getType())) return Res; } } ++InstL, ++InstR; } while (InstL != InstLE && InstR != InstRE); if (InstL != InstLE && InstR == InstRE) return 1; if (InstL == InstLE && InstR != InstRE) return -1; return 0; } // Test whether the two functions have equivalent behaviour. int FunctionComparator::compare() { sn_mapL.clear(); sn_mapR.clear(); if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes())) return Res; if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC())) return Res; if (FnL->hasGC()) { if (int Res = cmpNumbers((uint64_t)FnL->getGC(), (uint64_t)FnR->getGC())) return Res; } if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection())) return Res; if (FnL->hasSection()) { if (int Res = cmpStrings(FnL->getSection(), FnR->getSection())) return Res; } if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg())) return Res; // TODO: if it's internal and only used in direct calls, we could handle this // case too. if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv())) return Res; if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType())) return Res; assert(FnL->arg_size() == FnR->arg_size() && "Identically typed functions have different numbers of args!"); // Visit the arguments so that they get enumerated in the order they're // passed in. for (Function::const_arg_iterator ArgLI = FnL->arg_begin(), ArgRI = FnR->arg_begin(), ArgLE = FnL->arg_end(); ArgLI != ArgLE; ++ArgLI, ++ArgRI) { if (cmpValues(ArgLI, ArgRI) != 0) llvm_unreachable("Arguments repeat!"); } // We do a CFG-ordered walk since the actual ordering of the blocks in the // linked list is immaterial. Our walk starts at the entry block for both // functions, then takes each block from each terminator in order. As an // artifact, this also means that unreachable blocks are ignored. SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs; SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1. FnLBBs.push_back(&FnL->getEntryBlock()); FnRBBs.push_back(&FnR->getEntryBlock()); VisitedBBs.insert(FnLBBs[0]); while (!FnLBBs.empty()) { const BasicBlock *BBL = FnLBBs.pop_back_val(); const BasicBlock *BBR = FnRBBs.pop_back_val(); if (int Res = cmpValues(BBL, BBR)) return Res; if (int Res = compare(BBL, BBR)) return Res; const TerminatorInst *TermL = BBL->getTerminator(); const TerminatorInst *TermR = BBR->getTerminator(); assert(TermL->getNumSuccessors() == TermR->getNumSuccessors()); for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) { if (!VisitedBBs.insert(TermL->getSuccessor(i)).second) continue; FnLBBs.push_back(TermL->getSuccessor(i)); FnRBBs.push_back(TermR->getSuccessor(i)); } } return 0; } namespace { /// MergeFunctions finds functions which will generate identical machine code, /// by considering all pointer types to be equivalent. Once identified, /// MergeFunctions will fold them by replacing a call to one to a call to a /// bitcast of the other. /// class MergeFunctions : public ModulePass { public: static char ID; MergeFunctions() : ModulePass(ID), HasGlobalAliases(false) { initializeMergeFunctionsPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; private: typedef std::set<FunctionNode> FnTreeType; /// A work queue of functions that may have been modified and should be /// analyzed again. std::vector<WeakTrackingVH> Deferred; /// Checks the rules of order relation introduced among functions set. /// Returns true, if sanity check has been passed, and false if failed. bool doSanityCheck(std::vector<WeakTrackingVH> &Worklist); /// Insert a ComparableFunction into the FnTree, or merge it away if it's /// equal to one that's already present. bool insert(Function *NewFunction); /// Remove a Function from the FnTree and queue it up for a second sweep of /// analysis. void remove(Function *F); /// Find the functions that use this Value and remove them from FnTree and /// queue the functions. void removeUsers(Value *V); /// Replace all direct calls of Old with calls of New. Will bitcast New if /// necessary to make types match. void replaceDirectCallers(Function *Old, Function *New); /// Merge two equivalent functions. Upon completion, G may be deleted, or may /// be converted into a thunk. In either case, it should never be visited /// again. void mergeTwoFunctions(Function *F, Function *G); /// Replace G with a thunk or an alias to F. Deletes G. void writeThunkOrAlias(Function *F, Function *G); /// Replace G with a simple tail call to bitcast(F). Also replace direct uses /// of G with bitcast(F). Deletes G. void writeThunk(Function *F, Function *G); /// Replace G with an alias to F. Deletes G. void writeAlias(Function *F, Function *G); /// Replace function F with function G in the function tree. void replaceFunctionInTree(FnTreeType::iterator &IterToF, Function *G); /// The set of all distinct functions. Use the insert() and remove() methods /// to modify it. FnTreeType FnTree; /// Whether or not the target supports global aliases. bool HasGlobalAliases; }; } // end anonymous namespace char MergeFunctions::ID = 0; INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false) ModulePass *llvm::createMergeFunctionsPass() { return new MergeFunctions(); } bool MergeFunctions::doSanityCheck(std::vector<WeakTrackingVH> &Worklist) { #if 0 // Begin HLSL Change (NumFunctionsForSanityCheck is always zero) if (const unsigned Max = NumFunctionsForSanityCheck) { unsigned TripleNumber = 0; bool Valid = true; dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n"; unsigned i = 0; for (std::vector<WeakTrackingVH>::iterator I = Worklist.begin(), E = Worklist.end(); I != E && i < Max; ++I, ++i) { unsigned j = i; for (std::vector<WeakTrackingVH>::iterator J = I; J != E && j < Max; ++J, ++j) { Function *F1 = cast<Function>(*I); Function *F2 = cast<Function>(*J); int Res1 = FunctionComparator(F1, F2).compare(); int Res2 = FunctionComparator(F2, F1).compare(); // If F1 <= F2, then F2 >= F1, otherwise report failure. if (Res1 != -Res2) { dbgs() << "MERGEFUNC-SANITY: Non-symmetric; triple: " << TripleNumber << "\n"; F1->dump(); F2->dump(); Valid = false; } if (Res1 == 0) continue; unsigned k = j; for (std::vector<WeakTrackingVH>::iterator K = J; K != E && k < Max; ++k, ++K, ++TripleNumber) { if (K == J) continue; Function *F3 = cast<Function>(*K); int Res3 = FunctionComparator(F1, F3).compare(); int Res4 = FunctionComparator(F2, F3).compare(); bool Transitive = true; if (Res1 != 0 && Res1 == Res4) { // F1 > F2, F2 > F3 => F1 > F3 Transitive = Res3 == Res1; } else if (Res3 != 0 && Res3 == -Res4) { // F1 > F3, F3 > F2 => F1 > F2 Transitive = Res3 == Res1; } else if (Res4 != 0 && -Res3 == Res4) { // F2 > F3, F3 > F1 => F2 > F1 Transitive = Res4 == -Res1; } if (!Transitive) { dbgs() << "MERGEFUNC-SANITY: Non-transitive; triple: " << TripleNumber << "\n"; dbgs() << "Res1, Res3, Res4: " << Res1 << ", " << Res3 << ", " << Res4 << "\n"; F1->dump(); F2->dump(); F3->dump(); Valid = false; } } } } dbgs() << "MERGEFUNC-SANITY: " << (Valid ? "Passed." : "Failed.") << "\n"; return Valid; } #endif // End HLSL Change return true; } bool MergeFunctions::runOnModule(Module &M) { bool Changed = false; for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) Deferred.push_back(WeakTrackingVH(I)); } do { std::vector<WeakTrackingVH> Worklist; Deferred.swap(Worklist); DEBUG(doSanityCheck(Worklist)); DEBUG(dbgs() << "size of module: " << M.size() << '\n'); DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n'); // Insert only strong functions and merge them. Strong function merging // always deletes one of them. for (std::vector<WeakTrackingVH>::iterator I = Worklist.begin(), E = Worklist.end(); I != E; ++I) { if (!*I) continue; Function *F = cast<Function>(*I); if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() && !F->mayBeOverridden()) { Changed |= insert(F); } } // Insert only weak functions and merge them. By doing these second we // create thunks to the strong function when possible. When two weak // functions are identical, we create a new strong function with two weak // weak thunks to it which are identical but not mergable. for (std::vector<WeakTrackingVH>::iterator I = Worklist.begin(), E = Worklist.end(); I != E; ++I) { if (!*I) continue; Function *F = cast<Function>(*I); if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() && F->mayBeOverridden()) { Changed |= insert(F); } } DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n'); } while (!Deferred.empty()); FnTree.clear(); return Changed; } // Replace direct callers of Old with New. void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) { Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType()); for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) { Use *U = &*UI; ++UI; CallSite CS(U->getUser()); if (CS && CS.isCallee(U)) { remove(CS.getInstruction()->getParent()->getParent()); U->set(BitcastNew); } } } // Replace G with an alias to F if possible, or else a thunk to F. Deletes G. void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) { if (HasGlobalAliases && G->hasUnnamedAddr()) { if (G->hasExternalLinkage() || G->hasLocalLinkage() || G->hasWeakLinkage()) { writeAlias(F, G); return; } } writeThunk(F, G); } // Helper for writeThunk, // Selects proper bitcast operation, // but a bit simpler then CastInst::getCastOpcode. static Value *createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) { Type *SrcTy = V->getType(); if (SrcTy->isStructTy()) { assert(DestTy->isStructTy()); assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements()); Value *Result = UndefValue::get(DestTy); for (unsigned int I = 0, E = SrcTy->getStructNumElements(); I < E; ++I) { Value *Element = createCast( Builder, Builder.CreateExtractValue(V, makeArrayRef(I)), DestTy->getStructElementType(I)); Result = Builder.CreateInsertValue(Result, Element, makeArrayRef(I)); } return Result; } assert(!DestTy->isStructTy()); if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) return Builder.CreateIntToPtr(V, DestTy); else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) return Builder.CreatePtrToInt(V, DestTy); else return Builder.CreateBitCast(V, DestTy); } // Replace G with a simple tail call to bitcast(F). Also replace direct uses // of G with bitcast(F). Deletes G. void MergeFunctions::writeThunk(Function *F, Function *G) { if (!G->mayBeOverridden()) { // Redirect direct callers of G to F. replaceDirectCallers(G, F); } // If G was internal then we may have replaced all uses of G with F. If so, // stop here and delete G. There's no need for a thunk. if (G->hasLocalLinkage() && G->use_empty()) { G->eraseFromParent(); return; } Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "", G->getParent()); BasicBlock *BB = BasicBlock::Create(F->getContext(), "", NewG); IRBuilder<false> Builder(BB); SmallVector<Value *, 16> Args; unsigned i = 0; FunctionType *FFTy = F->getFunctionType(); for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end(); AI != AE; ++AI) { Args.push_back(createCast(Builder, (Value*)AI, FFTy->getParamType(i))); ++i; } CallInst *CI = Builder.CreateCall(F, Args); CI->setTailCall(); CI->setCallingConv(F->getCallingConv()); if (NewG->getReturnType()->isVoidTy()) { Builder.CreateRetVoid(); } else { Builder.CreateRet(createCast(Builder, CI, NewG->getReturnType())); } NewG->copyAttributesFrom(G); NewG->takeName(G); removeUsers(G); G->replaceAllUsesWith(NewG); G->eraseFromParent(); DEBUG(dbgs() << "writeThunk: " << NewG->getName() << '\n'); ++NumThunksWritten; } // Replace G with an alias to F and delete G. void MergeFunctions::writeAlias(Function *F, Function *G) { PointerType *PTy = G->getType(); auto *GA = GlobalAlias::create(PTy, G->getLinkage(), "", F); F->setAlignment(std::max(F->getAlignment(), G->getAlignment())); GA->takeName(G); GA->setVisibility(G->getVisibility()); removeUsers(G); G->replaceAllUsesWith(GA); G->eraseFromParent(); DEBUG(dbgs() << "writeAlias: " << GA->getName() << '\n'); ++NumAliasesWritten; } // Merge two equivalent functions. Upon completion, Function G is deleted. void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) { if (F->mayBeOverridden()) { assert(G->mayBeOverridden()); // Make them both thunks to the same internal function. Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "", F->getParent()); H->copyAttributesFrom(F); H->takeName(F); removeUsers(F); F->replaceAllUsesWith(H); unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment()); if (HasGlobalAliases) { writeAlias(F, G); writeAlias(F, H); } else { writeThunk(F, G); writeThunk(F, H); } F->setAlignment(MaxAlignment); F->setLinkage(GlobalValue::PrivateLinkage); ++NumDoubleWeak; } else { writeThunkOrAlias(F, G); } ++NumFunctionsMerged; } /// Replace function F for function G in the map. void MergeFunctions::replaceFunctionInTree(FnTreeType::iterator &IterToF, Function *G) { Function *F = IterToF->getFunc(); // A total order is already guaranteed otherwise because we process strong // functions before weak functions. assert(((F->mayBeOverridden() && G->mayBeOverridden()) || (!F->mayBeOverridden() && !G->mayBeOverridden())) && "Only change functions if both are strong or both are weak"); (void)F; IterToF->replaceBy(G); } // Insert a ComparableFunction into the FnTree, or merge it away if equal to one // that was already inserted. bool MergeFunctions::insert(Function *NewFunction) { std::pair<FnTreeType::iterator, bool> Result = FnTree.insert(FunctionNode(NewFunction)); if (Result.second) { DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n'); return false; } const FunctionNode &OldF = *Result.first; // Don't merge tiny functions, since it can just end up making the function // larger. // FIXME: Should still merge them if they are unnamed_addr and produce an // alias. if (NewFunction->size() == 1) { if (NewFunction->front().size() <= 2) { DEBUG(dbgs() << NewFunction->getName() << " is to small to bother merging\n"); return false; } } // Impose a total order (by name) on the replacement of functions. This is // important when operating on more than one module independently to prevent // cycles of thunks calling each other when the modules are linked together. // // When one function is weak and the other is strong there is an order imposed // already. We process strong functions before weak functions. if ((OldF.getFunc()->mayBeOverridden() && NewFunction->mayBeOverridden()) || (!OldF.getFunc()->mayBeOverridden() && !NewFunction->mayBeOverridden())) if (OldF.getFunc()->getName() > NewFunction->getName()) { // Swap the two functions. Function *F = OldF.getFunc(); replaceFunctionInTree(Result.first, NewFunction); NewFunction = F; assert(OldF.getFunc() != F && "Must have swapped the functions."); } // Never thunk a strong function to a weak function. assert(!OldF.getFunc()->mayBeOverridden() || NewFunction->mayBeOverridden()); DEBUG(dbgs() << " " << OldF.getFunc()->getName() << " == " << NewFunction->getName() << '\n'); Function *DeleteF = NewFunction; mergeTwoFunctions(OldF.getFunc(), DeleteF); return true; } // Remove a function from FnTree. If it was already in FnTree, add // it to Deferred so that we'll look at it in the next round. void MergeFunctions::remove(Function *F) { // We need to make sure we remove F, not a function "equal" to F per the // function equality comparator. FnTreeType::iterator found = FnTree.find(FunctionNode(F)); size_t Erased = 0; if (found != FnTree.end() && found->getFunc() == F) { Erased = 1; FnTree.erase(found); } if (Erased) { DEBUG(dbgs() << "Removed " << F->getName() << " from set and deferred it.\n"); Deferred.emplace_back(F); } } // For each instruction used by the value, remove() the function that contains // the instruction. This should happen right before a call to RAUW. void MergeFunctions::removeUsers(Value *V) { std::vector<Value *> Worklist; Worklist.push_back(V); while (!Worklist.empty()) { Value *V = Worklist.back(); Worklist.pop_back(); for (User *U : V->users()) { if (Instruction *I = dyn_cast<Instruction>(U)) { remove(I->getParent()->getParent()); } else if (isa<GlobalValue>(U)) { // do nothing } else if (Constant *C = dyn_cast<Constant>(U)) { for (User *UU : C->users()) Worklist.push_back(UU); } } } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/GlobalDCE.cpp
//===-- GlobalDCE.cpp - DCE unreachable internal functions ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This transform is designed to eliminate unreachable internal globals from the // program. It uses an aggressive algorithm, searching out globals that are // known to be alive. After it finds all of the globals which are needed, it // deletes whatever is left over. This allows it to delete recursive chunks of // the program which are unreachable. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Transforms/Utils/CtorUtils.h" #include "llvm/Transforms/Utils/GlobalStatus.h" #include "llvm/Pass.h" #include <unordered_map> using namespace llvm; #define DEBUG_TYPE "globaldce" STATISTIC(NumAliases , "Number of global aliases removed"); STATISTIC(NumFunctions, "Number of functions removed"); STATISTIC(NumVariables, "Number of global variables removed"); namespace { struct GlobalDCE : public ModulePass { static char ID; // Pass identification, replacement for typeid GlobalDCE() : ModulePass(ID) { initializeGlobalDCEPass(*PassRegistry::getPassRegistry()); } // run - Do the GlobalDCE pass on the specified module, optionally updating // the specified callgraph to reflect the changes. // bool runOnModule(Module &M) override; private: SmallPtrSet<GlobalValue*, 32> AliveGlobals; SmallPtrSet<Constant *, 8> SeenConstants; std::unordered_multimap<Comdat *, GlobalValue *> ComdatMembers; /// GlobalIsNeeded - mark the specific global value as needed, and /// recursively mark anything that it uses as also needed. void GlobalIsNeeded(GlobalValue *GV); void MarkUsedGlobalsAsNeeded(Constant *C); bool RemoveUnusedGlobalValue(GlobalValue &GV); }; } /// Returns true if F contains only a single "ret" instruction. static bool isEmptyFunction(Function *F) { BasicBlock &Entry = F->getEntryBlock(); if (Entry.size() != 1 || !isa<ReturnInst>(Entry.front())) return false; ReturnInst &RI = cast<ReturnInst>(Entry.front()); return RI.getReturnValue() == nullptr; } char GlobalDCE::ID = 0; INITIALIZE_PASS(GlobalDCE, "globaldce", "Dead Global Elimination", false, false) ModulePass *llvm::createGlobalDCEPass() { return new GlobalDCE(); } bool GlobalDCE::runOnModule(Module &M) { bool Changed = false; // Remove empty functions from the global ctors list. Changed |= optimizeGlobalCtorsList(M, isEmptyFunction); // Collect the set of members for each comdat. for (Function &F : M) if (Comdat *C = F.getComdat()) ComdatMembers.insert(std::make_pair(C, &F)); for (GlobalVariable &GV : M.globals()) if (Comdat *C = GV.getComdat()) ComdatMembers.insert(std::make_pair(C, &GV)); for (GlobalAlias &GA : M.aliases()) if (Comdat *C = GA.getComdat()) ComdatMembers.insert(std::make_pair(C, &GA)); // Loop over the module, adding globals which are obviously necessary. for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { Changed |= RemoveUnusedGlobalValue(*I); // Functions with external linkage are needed if they have a body if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) { if (!I->isDiscardableIfUnused()) GlobalIsNeeded(I); } // HLSL Change Starts - look for instructions that refer to a // variable through a metadata record lookup; currently high-level // does not use these instructions, so the pre-DXIL-gen DCE takes // care of unused resources that might affect signatures. // HLSL Change Ends } for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { Changed |= RemoveUnusedGlobalValue(*I); // Externally visible & appending globals are needed, if they have an // initializer. if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) { if (!I->isDiscardableIfUnused()) GlobalIsNeeded(I); } } for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); I != E; ++I) { Changed |= RemoveUnusedGlobalValue(*I); // Externally visible aliases are needed. if (!I->isDiscardableIfUnused()) { GlobalIsNeeded(I); } } // Now that all globals which are needed are in the AliveGlobals set, we loop // through the program, deleting those which are not alive. // // The first pass is to drop initializers of global variables which are dead. std::vector<GlobalVariable*> DeadGlobalVars; // Keep track of dead globals for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) if (!AliveGlobals.count(I)) { DeadGlobalVars.push_back(I); // Keep track of dead globals if (I->hasInitializer()) { Constant *Init = I->getInitializer(); I->setInitializer(nullptr); if (isSafeToDestroyConstant(Init)) Init->destroyConstant(); } } // The second pass drops the bodies of functions which are dead... std::vector<Function*> DeadFunctions; for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (!AliveGlobals.count(I)) { DeadFunctions.push_back(I); // Keep track of dead globals if (!I->isDeclaration()) I->deleteBody(); } // The third pass drops targets of aliases which are dead... std::vector<GlobalAlias*> DeadAliases; for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); I != E; ++I) if (!AliveGlobals.count(I)) { DeadAliases.push_back(I); I->setAliasee(nullptr); } if (!DeadFunctions.empty()) { // Now that all interferences have been dropped, delete the actual objects // themselves. for (unsigned i = 0, e = DeadFunctions.size(); i != e; ++i) { RemoveUnusedGlobalValue(*DeadFunctions[i]); M.CallRemoveGlobalHook(DeadFunctions[i]); // HLSL Change M.getFunctionList().erase(DeadFunctions[i]); } NumFunctions += DeadFunctions.size(); Changed = true; } if (!DeadGlobalVars.empty()) { for (unsigned i = 0, e = DeadGlobalVars.size(); i != e; ++i) { RemoveUnusedGlobalValue(*DeadGlobalVars[i]); M.CallRemoveGlobalHook(DeadGlobalVars[i]); // HLSL Change M.getGlobalList().erase(DeadGlobalVars[i]); } NumVariables += DeadGlobalVars.size(); Changed = true; } // Now delete any dead aliases. if (!DeadAliases.empty()) { for (unsigned i = 0, e = DeadAliases.size(); i != e; ++i) { RemoveUnusedGlobalValue(*DeadAliases[i]); M.getAliasList().erase(DeadAliases[i]); } NumAliases += DeadAliases.size(); Changed = true; } // Make sure that all memory is released AliveGlobals.clear(); SeenConstants.clear(); ComdatMembers.clear(); return Changed; } /// GlobalIsNeeded - the specific global value as needed, and /// recursively mark anything that it uses as also needed. void GlobalDCE::GlobalIsNeeded(GlobalValue *G) { // If the global is already in the set, no need to reprocess it. if (!AliveGlobals.insert(G).second) return; if (Comdat *C = G->getComdat()) { for (auto &&CM : make_range(ComdatMembers.equal_range(C))) GlobalIsNeeded(CM.second); } if (GlobalVariable *GV = dyn_cast<GlobalVariable>(G)) { // If this is a global variable, we must make sure to add any global values // referenced by the initializer to the alive set. if (GV->hasInitializer()) MarkUsedGlobalsAsNeeded(GV->getInitializer()); } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(G)) { // The target of a global alias is needed. MarkUsedGlobalsAsNeeded(GA->getAliasee()); } else { // Otherwise this must be a function object. We have to scan the body of // the function looking for constants and global values which are used as // operands. Any operands of these types must be processed to ensure that // any globals used will be marked as needed. Function *F = cast<Function>(G); if (F->hasPrefixData()) MarkUsedGlobalsAsNeeded(F->getPrefixData()); if (F->hasPrologueData()) MarkUsedGlobalsAsNeeded(F->getPrologueData()); if (F->hasPersonalityFn()) MarkUsedGlobalsAsNeeded(F->getPersonalityFn()); for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) for (User::op_iterator U = I->op_begin(), E = I->op_end(); U != E; ++U) if (GlobalValue *GV = dyn_cast<GlobalValue>(*U)) GlobalIsNeeded(GV); else if (Constant *C = dyn_cast<Constant>(*U)) MarkUsedGlobalsAsNeeded(C); } } void GlobalDCE::MarkUsedGlobalsAsNeeded(Constant *C) { if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) return GlobalIsNeeded(GV); // Loop over all of the operands of the constant, adding any globals they // use to the list of needed globals. for (User::op_iterator I = C->op_begin(), E = C->op_end(); I != E; ++I) { // If we've already processed this constant there's no need to do it again. Constant *Op = dyn_cast<Constant>(*I); if (Op && SeenConstants.insert(Op).second) MarkUsedGlobalsAsNeeded(Op); } } // RemoveUnusedGlobalValue - Loop over all of the uses of the specified // GlobalValue, looking for the constant pointer ref that may be pointing to it. // If found, check to see if the constant pointer ref is safe to destroy, and if // so, nuke it. This will reduce the reference count on the global value, which // might make it deader. // bool GlobalDCE::RemoveUnusedGlobalValue(GlobalValue &GV) { if (GV.use_empty()) return false; GV.removeDeadConstantUsers(); return GV.use_empty(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/IPO.cpp
//===-- IPO.cpp -----------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the common infrastructure (including C bindings) for // libLLVMIPO.a, which implements several transformations over the LLVM // intermediate representation. // //===----------------------------------------------------------------------===// #include "llvm-c/Initialization.h" #include "llvm-c/Transforms/IPO.h" #include "llvm/InitializePasses.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Transforms/IPO.h" using namespace llvm; void llvm::initializeIPO(PassRegistry &Registry) { #if 0 // HLSL Change Starts: Disable ArgPromotion initializeArgPromotionPass(Registry); #endif // HLSL Change Ends initializeConstantMergePass(Registry); initializeDAEPass(Registry); initializeDAHPass(Registry); initializeFunctionAttrsPass(Registry); initializeGlobalDCEPass(Registry); initializeGlobalOptPass(Registry); initializeIPCPPass(Registry); initializeAlwaysInlinerPass(Registry); initializeSimpleInlinerPass(Registry); initializeInternalizePassPass(Registry); initializeLoopExtractorPass(Registry); initializeBlockExtractorPassPass(Registry); initializeSingleLoopExtractorPass(Registry); initializeLowerBitSetsPass(Registry); initializeMergeFunctionsPass(Registry); initializePartialInlinerPass(Registry); initializePruneEHPass(Registry); initializeStripDeadPrototypesPassPass(Registry); initializeStripSymbolsPass(Registry); initializeStripDebugDeclarePass(Registry); initializeStripDeadDebugInfoPass(Registry); initializeStripNonDebugSymbolsPass(Registry); initializeBarrierNoopPass(Registry); initializeEliminateAvailableExternallyPass(Registry); } void LLVMInitializeIPO(LLVMPassRegistryRef R) { initializeIPO(*unwrap(R)); } #if 0 // HLSL Change Starts: Disable ArgPromotion void LLVMAddArgumentPromotionPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createArgumentPromotionPass()); } #endif // HLSL Change Ends void LLVMAddConstantMergePass(LLVMPassManagerRef PM) { unwrap(PM)->add(createConstantMergePass()); } void LLVMAddDeadArgEliminationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createDeadArgEliminationPass()); } void LLVMAddFunctionAttrsPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createFunctionAttrsPass()); } void LLVMAddFunctionInliningPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createFunctionInliningPass()); } void LLVMAddAlwaysInlinerPass(LLVMPassManagerRef PM) { unwrap(PM)->add(llvm::createAlwaysInlinerPass()); } void LLVMAddGlobalDCEPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createGlobalDCEPass()); } void LLVMAddGlobalOptimizerPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createGlobalOptimizerPass()); } void LLVMAddIPConstantPropagationPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createIPConstantPropagationPass()); } void LLVMAddPruneEHPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createPruneEHPass()); } void LLVMAddIPSCCPPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createIPSCCPPass()); } void LLVMAddInternalizePass(LLVMPassManagerRef PM, unsigned AllButMain) { std::vector<const char *> Export; if (AllButMain) Export.push_back("main"); unwrap(PM)->add(createInternalizePass(Export)); } void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createStripDeadPrototypesPass()); } void LLVMAddStripSymbolsPass(LLVMPassManagerRef PM) { unwrap(PM)->add(createStripSymbolsPass()); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/ArgumentPromotion.cpp
//===-- ArgumentPromotion.cpp - Promote by-reference arguments ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass promotes "by reference" arguments to be "by value" arguments. In // practice, this means looking for internal functions that have pointer // arguments. If it can prove, through the use of alias analysis, that an // argument is *only* loaded, then it can pass the value into the function // instead of the address of the value. This can cause recursive simplification // of code and lead to the elimination of allocas (especially in C++ template // code like the STL). // // This pass also handles aggregate arguments that are passed into a function, // scalarizing them if the elements of the aggregate are only loaded. Note that // by default it refuses to scalarize aggregates which would require passing in // more than three operands to the function, because passing thousands of // operands for a large array or structure is unprofitable! This limit can be // configured or disabled, however. // // Note that this transformation could also be done for arguments that are only // stored to (returning the value instead), but does not currently. This case // would be best handled when and if LLVM begins supporting multiple return // values from functions. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraphSCCPass.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CFG.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include <set> using namespace llvm; #define DEBUG_TYPE "argpromotion" STATISTIC(NumArgumentsPromoted , "Number of pointer arguments promoted"); STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted"); STATISTIC(NumByValArgsPromoted , "Number of byval arguments promoted"); STATISTIC(NumArgumentsDead , "Number of dead pointer args eliminated"); namespace { /// ArgPromotion - The 'by reference' to 'by value' argument promotion pass. /// struct ArgPromotion : public CallGraphSCCPass { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AliasAnalysis>(); CallGraphSCCPass::getAnalysisUsage(AU); } bool runOnSCC(CallGraphSCC &SCC) override; static char ID; // Pass identification, replacement for typeid explicit ArgPromotion(unsigned maxElements = 3) : CallGraphSCCPass(ID), maxElements(maxElements) { initializeArgPromotionPass(*PassRegistry::getPassRegistry()); } /// A vector used to hold the indices of a single GEP instruction typedef std::vector<uint64_t> IndicesVector; private: bool isDenselyPacked(Type *type, const DataLayout &DL); bool canPaddingBeAccessed(Argument *Arg); CallGraphNode *PromoteArguments(CallGraphNode *CGN); bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const; CallGraphNode *DoPromotion(Function *F, SmallPtrSetImpl<Argument*> &ArgsToPromote, SmallPtrSetImpl<Argument*> &ByValArgsToTransform); using llvm::Pass::doInitialization; bool doInitialization(CallGraph &CG) override; /// The maximum number of elements to expand, or 0 for unlimited. unsigned maxElements; DenseMap<const Function *, DISubprogram *> FunctionDIs; }; } char ArgPromotion::ID = 0; INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion", "Promote 'by reference' arguments to scalars", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) INITIALIZE_PASS_END(ArgPromotion, "argpromotion", "Promote 'by reference' arguments to scalars", false, false) Pass *llvm::createArgumentPromotionPass(unsigned maxElements) { return new ArgPromotion(maxElements); } bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) { bool Changed = false, LocalChange; do { // Iterate until we stop promoting from this SCC. LocalChange = false; // Attempt to promote arguments from all functions in this SCC. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { if (CallGraphNode *CGN = PromoteArguments(*I)) { LocalChange = true; SCC.ReplaceNode(*I, CGN); } } Changed |= LocalChange; // Remember that we changed something. } while (LocalChange); return Changed; } /// \brief Checks if a type could have padding bytes. bool ArgPromotion::isDenselyPacked(Type *type, const DataLayout &DL) { // There is no size information, so be conservative. if (!type->isSized()) return false; // If the alloc size is not equal to the storage size, then there are padding // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type)) return false; if (!isa<CompositeType>(type)) return true; // For homogenous sequential types, check for padding within members. if (SequentialType *seqTy = dyn_cast<SequentialType>(type)) return isa<PointerType>(seqTy) || isDenselyPacked(seqTy->getElementType(), DL); // Check for padding within and between elements of a struct. StructType *StructTy = cast<StructType>(type); const StructLayout *Layout = DL.getStructLayout(StructTy); uint64_t StartPos = 0; for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) { Type *ElTy = StructTy->getElementType(i); if (!isDenselyPacked(ElTy, DL)) return false; if (StartPos != Layout->getElementOffsetInBits(i)) return false; StartPos += DL.getTypeAllocSizeInBits(ElTy); } return true; } /// \brief Checks if the padding bytes of an argument could be accessed. bool ArgPromotion::canPaddingBeAccessed(Argument *arg) { assert(arg->hasByValAttr()); // Track all the pointers to the argument to make sure they are not captured. SmallPtrSet<Value *, 16> PtrValues; PtrValues.insert(arg); // Track all of the stores. SmallVector<StoreInst *, 16> Stores; // Scan through the uses recursively to make sure the pointer is always used // sanely. SmallVector<Value *, 16> WorkList; WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end()); while (!WorkList.empty()) { Value *V = WorkList.back(); WorkList.pop_back(); if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) { if (PtrValues.insert(V).second) WorkList.insert(WorkList.end(), V->user_begin(), V->user_end()); } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) { Stores.push_back(Store); } else if (!isa<LoadInst>(V)) { return true; } } // Check to make sure the pointers aren't captured for (StoreInst *Store : Stores) if (PtrValues.count(Store->getValueOperand())) return true; return false; } /// PromoteArguments - This method checks the specified function to see if there /// are any promotable arguments and if it is safe to promote the function (for /// example, all callers are direct). If safe to promote some arguments, it /// calls the DoPromotion method. /// CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) { Function *F = CGN->getFunction(); // Make sure that it is local to this module. if (!F || !F->hasLocalLinkage()) return nullptr; // Don't promote arguments for variadic functions. Adding, removing, or // changing non-pack parameters can change the classification of pack // parameters. Frontends encode that classification at the call site in the // IR, while in the callee the classification is determined dynamically based // on the number of registers consumed so far. if (F->isVarArg()) return nullptr; // First check: see if there are any pointer arguments! If not, quick exit. SmallVector<Argument*, 16> PointerArgs; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) if (I->getType()->isPointerTy()) PointerArgs.push_back(I); if (PointerArgs.empty()) return nullptr; // Second check: make sure that all callers are direct callers. We can't // transform functions that have indirect callers. Also see if the function // is self-recursive. bool isSelfRecursive = false; for (Use &U : F->uses()) { CallSite CS(U.getUser()); // Must be a direct call. if (CS.getInstruction() == nullptr || !CS.isCallee(&U)) return nullptr; if (CS.getInstruction()->getParent()->getParent() == F) isSelfRecursive = true; } const DataLayout &DL = F->getParent()->getDataLayout(); // Check to see which arguments are promotable. If an argument is promotable, // add it to ArgsToPromote. SmallPtrSet<Argument*, 8> ArgsToPromote; SmallPtrSet<Argument*, 8> ByValArgsToTransform; for (unsigned i = 0, e = PointerArgs.size(); i != e; ++i) { Argument *PtrArg = PointerArgs[i]; Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType(); // Replace sret attribute with noalias. This reduces register pressure by // avoiding a register copy. if (PtrArg->hasStructRetAttr()) { unsigned ArgNo = PtrArg->getArgNo(); F->setAttributes( F->getAttributes() .removeAttribute(F->getContext(), ArgNo + 1, Attribute::StructRet) .addAttribute(F->getContext(), ArgNo + 1, Attribute::NoAlias)); for (Use &U : F->uses()) { CallSite CS(U.getUser()); CS.setAttributes( CS.getAttributes() .removeAttribute(F->getContext(), ArgNo + 1, Attribute::StructRet) .addAttribute(F->getContext(), ArgNo + 1, Attribute::NoAlias)); } } // If this is a byval argument, and if the aggregate type is small, just // pass the elements, which is always safe, if the passed value is densely // packed or if we can prove the padding bytes are never accessed. This does // not apply to inalloca. bool isSafeToPromote = PtrArg->hasByValAttr() && (isDenselyPacked(AgTy, DL) || !canPaddingBeAccessed(PtrArg)); if (isSafeToPromote) { if (StructType *STy = dyn_cast<StructType>(AgTy)) { if (maxElements > 0 && STy->getNumElements() > maxElements) { DEBUG(dbgs() << "argpromotion disable promoting argument '" << PtrArg->getName() << "' because it would require adding more" << " than " << maxElements << " arguments to the function.\n"); continue; } // If all the elements are single-value types, we can promote it. bool AllSimple = true; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { if (!STy->getElementType(i)->isSingleValueType()) { AllSimple = false; break; } } // Safe to transform, don't even bother trying to "promote" it. // Passing the elements as a scalar will allow scalarrepl to hack on // the new alloca we introduce. if (AllSimple) { ByValArgsToTransform.insert(PtrArg); continue; } } } // If the argument is a recursive type and we're in a recursive // function, we could end up infinitely peeling the function argument. if (isSelfRecursive) { if (StructType *STy = dyn_cast<StructType>(AgTy)) { bool RecursiveType = false; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { if (STy->getElementType(i) == PtrArg->getType()) { RecursiveType = true; break; } } if (RecursiveType) continue; } } // Otherwise, see if we can promote the pointer to its value. if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValOrInAllocaAttr())) ArgsToPromote.insert(PtrArg); } // No promotable pointer arguments. if (ArgsToPromote.empty() && ByValArgsToTransform.empty()) return nullptr; return DoPromotion(F, ArgsToPromote, ByValArgsToTransform); } /// AllCallersPassInValidPointerForArgument - Return true if we can prove that /// all callees pass in a valid pointer for the specified function argument. static bool AllCallersPassInValidPointerForArgument(Argument *Arg) { Function *Callee = Arg->getParent(); const DataLayout &DL = Callee->getParent()->getDataLayout(); unsigned ArgNo = Arg->getArgNo(); // Look at all call sites of the function. At this pointer we know we only // have direct callees. for (User *U : Callee->users()) { CallSite CS(U); assert(CS && "Should only have direct calls!"); if (!isDereferenceablePointer(CS.getArgument(ArgNo), DL)) return false; } return true; } /// Returns true if Prefix is a prefix of longer. That means, Longer has a size /// that is greater than or equal to the size of prefix, and each of the /// elements in Prefix is the same as the corresponding elements in Longer. /// /// This means it also returns true when Prefix and Longer are equal! static bool IsPrefix(const ArgPromotion::IndicesVector &Prefix, const ArgPromotion::IndicesVector &Longer) { if (Prefix.size() > Longer.size()) return false; return std::equal(Prefix.begin(), Prefix.end(), Longer.begin()); } /// Checks if Indices, or a prefix of Indices, is in Set. static bool PrefixIn(const ArgPromotion::IndicesVector &Indices, std::set<ArgPromotion::IndicesVector> &Set) { std::set<ArgPromotion::IndicesVector>::iterator Low; Low = Set.upper_bound(Indices); if (Low != Set.begin()) Low--; // Low is now the last element smaller than or equal to Indices. This means // it points to a prefix of Indices (possibly Indices itself), if such // prefix exists. // // This load is safe if any prefix of its operands is safe to load. return Low != Set.end() && IsPrefix(*Low, Indices); } /// Mark the given indices (ToMark) as safe in the given set of indices /// (Safe). Marking safe usually means adding ToMark to Safe. However, if there /// is already a prefix of Indices in Safe, Indices are implicitely marked safe /// already. Furthermore, any indices that Indices is itself a prefix of, are /// removed from Safe (since they are implicitely safe because of Indices now). static void MarkIndicesSafe(const ArgPromotion::IndicesVector &ToMark, std::set<ArgPromotion::IndicesVector> &Safe) { std::set<ArgPromotion::IndicesVector>::iterator Low; Low = Safe.upper_bound(ToMark); // Guard against the case where Safe is empty if (Low != Safe.begin()) Low--; // Low is now the last element smaller than or equal to Indices. This // means it points to a prefix of Indices (possibly Indices itself), if // such prefix exists. if (Low != Safe.end()) { if (IsPrefix(*Low, ToMark)) // If there is already a prefix of these indices (or exactly these // indices) marked a safe, don't bother adding these indices return; // Increment Low, so we can use it as a "insert before" hint ++Low; } // Insert Low = Safe.insert(Low, ToMark); ++Low; // If there we're a prefix of longer index list(s), remove those std::set<ArgPromotion::IndicesVector>::iterator End = Safe.end(); while (Low != End && IsPrefix(ToMark, *Low)) { std::set<ArgPromotion::IndicesVector>::iterator Remove = Low; ++Low; Safe.erase(Remove); } } /// isSafeToPromoteArgument - As you might guess from the name of this method, /// it checks to see if it is both safe and useful to promote the argument. /// This method limits promotion of aggregates to only promote up to three /// elements of the aggregate in order to avoid exploding the number of /// arguments passed in. bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByValOrInAlloca) const { typedef std::set<IndicesVector> GEPIndicesSet; // Quick exit for unused arguments if (Arg->use_empty()) return true; // We can only promote this argument if all of the uses are loads, or are GEP // instructions (with constant indices) that are subsequently loaded. // // Promoting the argument causes it to be loaded in the caller // unconditionally. This is only safe if we can prove that either the load // would have happened in the callee anyway (ie, there is a load in the entry // block) or the pointer passed in at every call site is guaranteed to be // valid. // In the former case, invalid loads can happen, but would have happened // anyway, in the latter case, invalid loads won't happen. This prevents us // from introducing an invalid load that wouldn't have happened in the // original code. // // This set will contain all sets of indices that are loaded in the entry // block, and thus are safe to unconditionally load in the caller. // // This optimization is also safe for InAlloca parameters, because it verifies // that the address isn't captured. GEPIndicesSet SafeToUnconditionallyLoad; // This set contains all the sets of indices that we are planning to promote. // This makes it possible to limit the number of arguments added. GEPIndicesSet ToPromote; // If the pointer is always valid, any load with first index 0 is valid. if (isByValOrInAlloca || AllCallersPassInValidPointerForArgument(Arg)) SafeToUnconditionallyLoad.insert(IndicesVector(1, 0)); // First, iterate the entry block and mark loads of (geps of) arguments as // safe. BasicBlock *EntryBlock = Arg->getParent()->begin(); // Declare this here so we can reuse it IndicesVector Indices; for (BasicBlock::iterator I = EntryBlock->begin(), E = EntryBlock->end(); I != E; ++I) if (LoadInst *LI = dyn_cast<LoadInst>(I)) { Value *V = LI->getPointerOperand(); if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { V = GEP->getPointerOperand(); if (V == Arg) { // This load actually loads (part of) Arg? Check the indices then. Indices.reserve(GEP->getNumIndices()); for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); II != IE; ++II) if (ConstantInt *CI = dyn_cast<ConstantInt>(*II)) Indices.push_back(CI->getSExtValue()); else // We found a non-constant GEP index for this argument? Bail out // right away, can't promote this argument at all. return false; // Indices checked out, mark them as safe MarkIndicesSafe(Indices, SafeToUnconditionallyLoad); Indices.clear(); } } else if (V == Arg) { // Direct loads are equivalent to a GEP with a single 0 index. MarkIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad); } } // Now, iterate all uses of the argument to see if there are any uses that are // not (GEP+)loads, or any (GEP+)loads that are not safe to promote. SmallVector<LoadInst*, 16> Loads; IndicesVector Operands; for (Use &U : Arg->uses()) { User *UR = U.getUser(); Operands.clear(); if (LoadInst *LI = dyn_cast<LoadInst>(UR)) { // Don't hack volatile/atomic loads if (!LI->isSimple()) return false; Loads.push_back(LI); // Direct loads are equivalent to a GEP with a zero index and then a load. Operands.push_back(0); } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) { if (GEP->use_empty()) { // Dead GEP's cause trouble later. Just remove them if we run into // them. getAnalysis<AliasAnalysis>().deleteValue(GEP); GEP->eraseFromParent(); // TODO: This runs the above loop over and over again for dead GEPs // Couldn't we just do increment the UI iterator earlier and erase the // use? return isSafeToPromoteArgument(Arg, isByValOrInAlloca); } // Ensure that all of the indices are constants. for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end(); i != e; ++i) if (ConstantInt *C = dyn_cast<ConstantInt>(*i)) Operands.push_back(C->getSExtValue()); else return false; // Not a constant operand GEP! // Ensure that the only users of the GEP are load instructions. for (User *GEPU : GEP->users()) if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) { // Don't hack volatile/atomic loads if (!LI->isSimple()) return false; Loads.push_back(LI); } else { // Other uses than load? return false; } } else { return false; // Not a load or a GEP. } // Now, see if it is safe to promote this load / loads of this GEP. Loading // is safe if Operands, or a prefix of Operands, is marked as safe. if (!PrefixIn(Operands, SafeToUnconditionallyLoad)) return false; // See if we are already promoting a load with these indices. If not, check // to make sure that we aren't promoting too many elements. If so, nothing // to do. if (ToPromote.find(Operands) == ToPromote.end()) { if (maxElements > 0 && ToPromote.size() == maxElements) { DEBUG(dbgs() << "argpromotion not promoting argument '" << Arg->getName() << "' because it would require adding more " << "than " << maxElements << " arguments to the function.\n"); // We limit aggregate promotion to only promoting up to a fixed number // of elements of the aggregate. return false; } ToPromote.insert(std::move(Operands)); } } if (Loads.empty()) return true; // No users, this is a dead argument. // Okay, now we know that the argument is only used by load instructions and // it is safe to unconditionally perform all of them. Use alias analysis to // check to see if the pointer is guaranteed to not be modified from entry of // the function to each of the load instructions. // Because there could be several/many load instructions, remember which // blocks we know to be transparent to the load. SmallPtrSet<BasicBlock*, 16> TranspBlocks; AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); for (unsigned i = 0, e = Loads.size(); i != e; ++i) { // Check to see if the load is invalidated from the start of the block to // the load itself. LoadInst *Load = Loads[i]; BasicBlock *BB = Load->getParent(); MemoryLocation Loc = MemoryLocation::get(Load); if (AA.canInstructionRangeModRef(BB->front(), *Load, Loc, AliasAnalysis::Mod)) return false; // Pointer is invalidated! // Now check every path from the entry block to the load for transparency. // To do this, we perform a depth first search on the inverse CFG from the // loading block. for (BasicBlock *P : predecessors(BB)) { for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks)) if (AA.canBasicBlockModify(*TranspBB, Loc)) return false; } } // If the path from the entry of the function to each load is free of // instructions that potentially invalidate the load, we can make the // transformation! return true; } /// DoPromotion - This method actually performs the promotion of the specified /// arguments, and returns the new function. At this point, we know that it's /// safe to do so. CallGraphNode *ArgPromotion::DoPromotion(Function *F, SmallPtrSetImpl<Argument*> &ArgsToPromote, SmallPtrSetImpl<Argument*> &ByValArgsToTransform) { // Start by computing a new prototype for the function, which is the same as // the old function, but has modified arguments. FunctionType *FTy = F->getFunctionType(); std::vector<Type*> Params; typedef std::set<std::pair<Type *, IndicesVector>> ScalarizeTable; // ScalarizedElements - If we are promoting a pointer that has elements // accessed out of it, keep track of which elements are accessed so that we // can add one argument for each. // // Arguments that are directly loaded will have a zero element value here, to // handle cases where there are both a direct load and GEP accesses. // std::map<Argument*, ScalarizeTable> ScalarizedElements; // OriginalLoads - Keep track of a representative load instruction from the // original function so that we can tell the alias analysis implementation // what the new GEP/Load instructions we are inserting look like. // We need to keep the original loads for each argument and the elements // of the argument that are accessed. std::map<std::pair<Argument*, IndicesVector>, LoadInst*> OriginalLoads; // Attribute - Keep track of the parameter attributes for the arguments // that we are *not* promoting. For the ones that we do promote, the parameter // attributes are lost SmallVector<AttributeSet, 8> AttributesVec; const AttributeSet &PAL = F->getAttributes(); // Add any return attributes. if (PAL.hasAttributes(AttributeSet::ReturnIndex)) AttributesVec.push_back(AttributeSet::get(F->getContext(), PAL.getRetAttributes())); // First, determine the new argument list unsigned ArgIndex = 1; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I, ++ArgIndex) { if (ByValArgsToTransform.count(I)) { // Simple byval argument? Just add all the struct element types. Type *AgTy = cast<PointerType>(I->getType())->getElementType(); StructType *STy = cast<StructType>(AgTy); Params.insert(Params.end(), STy->element_begin(), STy->element_end()); ++NumByValArgsPromoted; } else if (!ArgsToPromote.count(I)) { // Unchanged argument Params.push_back(I->getType()); AttributeSet attrs = PAL.getParamAttributes(ArgIndex); if (attrs.hasAttributes(ArgIndex)) { AttrBuilder B(attrs, ArgIndex); AttributesVec. push_back(AttributeSet::get(F->getContext(), Params.size(), B)); } } else if (I->use_empty()) { // Dead argument (which are always marked as promotable) ++NumArgumentsDead; } else { // Okay, this is being promoted. This means that the only uses are loads // or GEPs which are only used by loads // In this table, we will track which indices are loaded from the argument // (where direct loads are tracked as no indices). ScalarizeTable &ArgIndices = ScalarizedElements[I]; for (User *U : I->users()) { Instruction *UI = cast<Instruction>(U); Type *SrcTy; if (LoadInst *L = dyn_cast<LoadInst>(UI)) SrcTy = L->getType(); else SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType(); IndicesVector Indices; Indices.reserve(UI->getNumOperands() - 1); // Since loads will only have a single operand, and GEPs only a single // non-index operand, this will record direct loads without any indices, // and gep+loads with the GEP indices. for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end(); II != IE; ++II) Indices.push_back(cast<ConstantInt>(*II)->getSExtValue()); // GEPs with a single 0 index can be merged with direct loads if (Indices.size() == 1 && Indices.front() == 0) Indices.clear(); ArgIndices.insert(std::make_pair(SrcTy, Indices)); LoadInst *OrigLoad; if (LoadInst *L = dyn_cast<LoadInst>(UI)) OrigLoad = L; else // Take any load, we will use it only to update Alias Analysis OrigLoad = cast<LoadInst>(UI->user_back()); OriginalLoads[std::make_pair(I, Indices)] = OrigLoad; } // Add a parameter to the function for each element passed in. for (ScalarizeTable::iterator SI = ArgIndices.begin(), E = ArgIndices.end(); SI != E; ++SI) { // not allowed to dereference ->begin() if size() is 0 Params.push_back(GetElementPtrInst::getIndexedType( cast<PointerType>(I->getType()->getScalarType())->getElementType(), SI->second)); assert(Params.back()); } if (ArgIndices.size() == 1 && ArgIndices.begin()->second.empty()) ++NumArgumentsPromoted; else ++NumAggregatesPromoted; } } // Add any function attributes. if (PAL.hasAttributes(AttributeSet::FunctionIndex)) AttributesVec.push_back(AttributeSet::get(FTy->getContext(), PAL.getFnAttributes())); Type *RetTy = FTy->getReturnType(); // Construct the new function type using the new arguments. FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg()); // Create the new function body and insert it into the module. Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName()); NF->copyAttributesFrom(F); // Patch the pointer to LLVM function in debug info descriptor. auto DI = FunctionDIs.find(F); if (DI != FunctionDIs.end()) { DISubprogram *SP = DI->second; SP->replaceFunction(NF); // Ensure the map is updated so it can be reused on subsequent argument // promotions of the same function. FunctionDIs.erase(DI); FunctionDIs[NF] = SP; } DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n" << "From: " << *F); // Recompute the parameter attributes list based on the new arguments for // the function. NF->setAttributes(AttributeSet::get(F->getContext(), AttributesVec)); AttributesVec.clear(); F->getParent()->getFunctionList().insert(F, NF); NF->takeName(F); // Get the alias analysis information that we need to update to reflect our // changes. AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); // Get the callgraph information that we need to update to reflect our // changes. CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); // Get a new callgraph node for NF. CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF); // Loop over all of the callers of the function, transforming the call sites // to pass in the loaded pointers. // SmallVector<Value*, 16> Args; while (!F->use_empty()) { CallSite CS(F->user_back()); assert(CS.getCalledFunction() == F); Instruction *Call = CS.getInstruction(); const AttributeSet &CallPAL = CS.getAttributes(); // Add any return attributes. if (CallPAL.hasAttributes(AttributeSet::ReturnIndex)) AttributesVec.push_back(AttributeSet::get(F->getContext(), CallPAL.getRetAttributes())); // Loop over the operands, inserting GEP and loads in the caller as // appropriate. CallSite::arg_iterator AI = CS.arg_begin(); ArgIndex = 1; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I, ++AI, ++ArgIndex) if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { Args.push_back(*AI); // Unmodified argument if (CallPAL.hasAttributes(ArgIndex)) { AttrBuilder B(CallPAL, ArgIndex); AttributesVec. push_back(AttributeSet::get(F->getContext(), Args.size(), B)); } } else if (ByValArgsToTransform.count(I)) { // Emit a GEP and load for each element of the struct. Type *AgTy = cast<PointerType>(I->getType())->getElementType(); StructType *STy = cast<StructType>(AgTy); Value *Idxs[2] = { ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr }; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); Value *Idx = GetElementPtrInst::Create( STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i), Call); // TODO: Tell AA about the new values? Args.push_back(new LoadInst(Idx, Idx->getName()+".val", Call)); } } else if (!I->use_empty()) { // Non-dead argument: insert GEPs and loads as appropriate. ScalarizeTable &ArgIndices = ScalarizedElements[I]; // Store the Value* version of the indices in here, but declare it now // for reuse. std::vector<Value*> Ops; for (ScalarizeTable::iterator SI = ArgIndices.begin(), E = ArgIndices.end(); SI != E; ++SI) { Value *V = *AI; LoadInst *OrigLoad = OriginalLoads[std::make_pair(I, SI->second)]; if (!SI->second.empty()) { Ops.reserve(SI->second.size()); Type *ElTy = V->getType(); for (IndicesVector::const_iterator II = SI->second.begin(), IE = SI->second.end(); II != IE; ++II) { // Use i32 to index structs, and i64 for others (pointers/arrays). // This satisfies GEP constraints. Type *IdxTy = (ElTy->isStructTy() ? Type::getInt32Ty(F->getContext()) : Type::getInt64Ty(F->getContext())); Ops.push_back(ConstantInt::get(IdxTy, *II)); // Keep track of the type we're currently indexing. ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II); } // And create a GEP to extract those indices. V = GetElementPtrInst::Create(SI->first, V, Ops, V->getName() + ".idx", Call); Ops.clear(); } // Since we're replacing a load make sure we take the alignment // of the previous load. LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call); newLoad->setAlignment(OrigLoad->getAlignment()); // Transfer the AA info too. AAMDNodes AAInfo; OrigLoad->getAAMetadata(AAInfo); newLoad->setAAMetadata(AAInfo); Args.push_back(newLoad); } } // Push any varargs arguments on the list. for (; AI != CS.arg_end(); ++AI, ++ArgIndex) { Args.push_back(*AI); if (CallPAL.hasAttributes(ArgIndex)) { AttrBuilder B(CallPAL, ArgIndex); AttributesVec. push_back(AttributeSet::get(F->getContext(), Args.size(), B)); } } // Add any function attributes. if (CallPAL.hasAttributes(AttributeSet::FunctionIndex)) AttributesVec.push_back(AttributeSet::get(Call->getContext(), CallPAL.getFnAttributes())); Instruction *New; if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) { New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), Args, "", Call); cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv()); cast<InvokeInst>(New)->setAttributes(AttributeSet::get(II->getContext(), AttributesVec)); } else { New = CallInst::Create(NF, Args, "", Call); cast<CallInst>(New)->setCallingConv(CS.getCallingConv()); cast<CallInst>(New)->setAttributes(AttributeSet::get(New->getContext(), AttributesVec)); if (cast<CallInst>(Call)->isTailCall()) cast<CallInst>(New)->setTailCall(); } New->setDebugLoc(Call->getDebugLoc()); Args.clear(); AttributesVec.clear(); // Update the alias analysis implementation to know that we are replacing // the old call with a new one. AA.replaceWithNewValue(Call, New); // Update the callgraph to know that the callsite has been transformed. CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()]; CalleeNode->replaceCallEdge(CS, CallSite(New), NF_CGN); if (!Call->use_empty()) { Call->replaceAllUsesWith(New); New->takeName(Call); } // Finally, remove the old call from the program, reducing the use-count of // F. Call->eraseFromParent(); } // Since we have now created the new function, splice the body of the old // function right into the new function, leaving the old rotting hulk of the // function empty. NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); // Loop over the argument list, transferring uses of the old arguments over to // the new arguments, also transferring over the names as well. // for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), I2 = NF->arg_begin(); I != E; ++I) { if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { // If this is an unmodified argument, move the name and users over to the // new version. I->replaceAllUsesWith(I2); I2->takeName(I); AA.replaceWithNewValue(I, I2); ++I2; continue; } if (ByValArgsToTransform.count(I)) { // In the callee, we create an alloca, and store each of the new incoming // arguments into the alloca. Instruction *InsertPt = NF->begin()->begin(); // Just add all the struct element types. Type *AgTy = cast<PointerType>(I->getType())->getElementType(); Value *TheAlloca = new AllocaInst(AgTy, nullptr, "", InsertPt); StructType *STy = cast<StructType>(AgTy); Value *Idxs[2] = { ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr }; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i); Value *Idx = GetElementPtrInst::Create( AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(i), InsertPt); I2->setName(I->getName()+"."+Twine(i)); new StoreInst(I2++, Idx, InsertPt); } // Anything that used the arg should now use the alloca. I->replaceAllUsesWith(TheAlloca); TheAlloca->takeName(I); AA.replaceWithNewValue(I, TheAlloca); // If the alloca is used in a call, we must clear the tail flag since // the callee now uses an alloca from the caller. for (User *U : TheAlloca->users()) { CallInst *Call = dyn_cast<CallInst>(U); if (!Call) continue; Call->setTailCall(false); } continue; } if (I->use_empty()) { AA.deleteValue(I); continue; } // Otherwise, if we promoted this argument, then all users are load // instructions (or GEPs with only load users), and all loads should be // using the new argument that we added. ScalarizeTable &ArgIndices = ScalarizedElements[I]; while (!I->use_empty()) { if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) { assert(ArgIndices.begin()->second.empty() && "Load element should sort to front!"); I2->setName(I->getName()+".val"); LI->replaceAllUsesWith(I2); AA.replaceWithNewValue(LI, I2); LI->eraseFromParent(); DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName() << "' in function '" << F->getName() << "'\n"); } else { GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back()); IndicesVector Operands; Operands.reserve(GEP->getNumIndices()); for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end(); II != IE; ++II) Operands.push_back(cast<ConstantInt>(*II)->getSExtValue()); // GEPs with a single 0 index can be merged with direct loads if (Operands.size() == 1 && Operands.front() == 0) Operands.clear(); Function::arg_iterator TheArg = I2; for (ScalarizeTable::iterator It = ArgIndices.begin(); It->second != Operands; ++It, ++TheArg) { assert(It != ArgIndices.end() && "GEP not handled??"); } std::string NewName = I->getName(); for (unsigned i = 0, e = Operands.size(); i != e; ++i) { NewName += "." + utostr(Operands[i]); } NewName += ".val"; TheArg->setName(NewName); DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName() << "' of function '" << NF->getName() << "'\n"); // All of the uses must be load instructions. Replace them all with // the argument specified by ArgNo. while (!GEP->use_empty()) { LoadInst *L = cast<LoadInst>(GEP->user_back()); L->replaceAllUsesWith(TheArg); AA.replaceWithNewValue(L, TheArg); L->eraseFromParent(); } AA.deleteValue(GEP); GEP->eraseFromParent(); } } // Increment I2 past all of the arguments added for this promoted pointer. std::advance(I2, ArgIndices.size()); } // Tell the alias analysis that the old function is about to disappear. AA.replaceWithNewValue(F, NF); NF_CGN->stealCalledFunctionsFrom(CG[F]); // Now that the old function is dead, delete it. If there is a dangling // reference to the CallgraphNode, just leave the dead function around for // someone else to nuke. CallGraphNode *CGN = CG[F]; if (CGN->getNumReferences() == 0) delete CG.removeFunctionFromModule(CGN); else F->setLinkage(Function::ExternalLinkage); return NF_CGN; } bool ArgPromotion::doInitialization(CallGraph &CG) { FunctionDIs = makeSubprogramMap(CG.getModule()); return CallGraphSCCPass::doInitialization(CG); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/PartialInlining.cpp
//===- PartialInlining.cpp - Inline parts of functions --------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass performs partial inlining, typically by inlining an if statement // that surrounds the body of the function. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/CodeExtractor.h" using namespace llvm; #define DEBUG_TYPE "partialinlining" STATISTIC(NumPartialInlined, "Number of functions partially inlined"); namespace { struct PartialInliner : public ModulePass { void getAnalysisUsage(AnalysisUsage &AU) const override { } static char ID; // Pass identification, replacement for typeid PartialInliner() : ModulePass(ID) { initializePartialInlinerPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module& M) override; private: Function* unswitchFunction(Function* F); }; } char PartialInliner::ID = 0; INITIALIZE_PASS(PartialInliner, "partial-inliner", "Partial Inliner", false, false) ModulePass* llvm::createPartialInliningPass() { return new PartialInliner(); } Function* PartialInliner::unswitchFunction(Function* F) { // First, verify that this function is an unswitching candidate... BasicBlock* entryBlock = F->begin(); BranchInst *BR = dyn_cast<BranchInst>(entryBlock->getTerminator()); if (!BR || BR->isUnconditional()) return nullptr; BasicBlock* returnBlock = nullptr; BasicBlock* nonReturnBlock = nullptr; unsigned returnCount = 0; for (BasicBlock *BB : successors(entryBlock)) { if (isa<ReturnInst>(BB->getTerminator())) { returnBlock = BB; returnCount++; } else nonReturnBlock = BB; } if (returnCount != 1) return nullptr; // Clone the function, so that we can hack away on it. ValueToValueMapTy VMap; Function* duplicateFunction = CloneFunction(F, VMap, /*ModuleLevelChanges=*/false); duplicateFunction->setLinkage(GlobalValue::InternalLinkage); F->getParent()->getFunctionList().push_back(duplicateFunction); BasicBlock* newEntryBlock = cast<BasicBlock>(VMap[entryBlock]); BasicBlock* newReturnBlock = cast<BasicBlock>(VMap[returnBlock]); BasicBlock* newNonReturnBlock = cast<BasicBlock>(VMap[nonReturnBlock]); // Go ahead and update all uses to the duplicate, so that we can just // use the inliner functionality when we're done hacking. F->replaceAllUsesWith(duplicateFunction); // Special hackery is needed with PHI nodes that have inputs from more than // one extracted block. For simplicity, just split the PHIs into a two-level // sequence of PHIs, some of which will go in the extracted region, and some // of which will go outside. BasicBlock* preReturn = newReturnBlock; newReturnBlock = newReturnBlock->splitBasicBlock( newReturnBlock->getFirstNonPHI()); BasicBlock::iterator I = preReturn->begin(); BasicBlock::iterator Ins = newReturnBlock->begin(); while (I != preReturn->end()) { PHINode* OldPhi = dyn_cast<PHINode>(I); if (!OldPhi) break; PHINode* retPhi = PHINode::Create(OldPhi->getType(), 2, "", Ins); OldPhi->replaceAllUsesWith(retPhi); Ins = newReturnBlock->getFirstNonPHI(); retPhi->addIncoming(I, preReturn); retPhi->addIncoming(OldPhi->getIncomingValueForBlock(newEntryBlock), newEntryBlock); OldPhi->removeIncomingValue(newEntryBlock); ++I; } newEntryBlock->getTerminator()->replaceUsesOfWith(preReturn, newReturnBlock); // Gather up the blocks that we're going to extract. std::vector<BasicBlock*> toExtract; toExtract.push_back(newNonReturnBlock); for (Function::iterator FI = duplicateFunction->begin(), FE = duplicateFunction->end(); FI != FE; ++FI) if (&*FI != newEntryBlock && &*FI != newReturnBlock && &*FI != newNonReturnBlock) toExtract.push_back(FI); // The CodeExtractor needs a dominator tree. DominatorTree DT; DT.recalculate(*duplicateFunction); // Extract the body of the if. Function* extractedFunction = CodeExtractor(toExtract, &DT).extractCodeRegion(); InlineFunctionInfo IFI; // Inline the top-level if test into all callers. std::vector<User *> Users(duplicateFunction->user_begin(), duplicateFunction->user_end()); for (std::vector<User*>::iterator UI = Users.begin(), UE = Users.end(); UI != UE; ++UI) if (CallInst *CI = dyn_cast<CallInst>(*UI)) InlineFunction(CI, IFI); else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) InlineFunction(II, IFI); // Ditch the duplicate, since we're done with it, and rewrite all remaining // users (function pointers, etc.) back to the original function. duplicateFunction->replaceAllUsesWith(F); duplicateFunction->eraseFromParent(); ++NumPartialInlined; return extractedFunction; } bool PartialInliner::runOnModule(Module& M) { std::vector<Function*> worklist; worklist.reserve(M.size()); for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) if (!FI->use_empty() && !FI->isDeclaration()) worklist.push_back(&*FI); bool changed = false; while (!worklist.empty()) { Function* currFunc = worklist.back(); worklist.pop_back(); if (currFunc->use_empty()) continue; bool recursive = false; for (User *U : currFunc->users()) if (Instruction* I = dyn_cast<Instruction>(U)) if (I->getParent()->getParent() == currFunc) { recursive = true; break; } if (recursive) continue; if (Function* newFunc = unswitchFunction(currFunc)) { worklist.push_back(newFunc); changed = true; } } return changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/FunctionAttrs.cpp
//===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a simple interprocedural pass which walks the // call-graph, looking for functions which do not access or only read // non-local memory, and marking them readnone/readonly. It does the // same with function arguments independently, marking them readonly/ // readnone/nocapture. Finally, well-known library call declarations // are marked with all attributes that are consistent with the // function's standard definition. This pass is implemented as a // bottom-up traversal of the call-graph. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/SCCIterator.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraphSCCPass.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Analysis/TargetLibraryInfo.h" using namespace llvm; #define DEBUG_TYPE "functionattrs" STATISTIC(NumReadNone, "Number of functions marked readnone"); STATISTIC(NumReadOnly, "Number of functions marked readonly"); STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); STATISTIC(NumReadNoneArg, "Number of arguments marked readnone"); STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly"); STATISTIC(NumNoAlias, "Number of function returns marked noalias"); STATISTIC(NumAnnotated, "Number of attributes added to library functions"); namespace { struct FunctionAttrs : public CallGraphSCCPass { static char ID; // Pass identification, replacement for typeid FunctionAttrs() : CallGraphSCCPass(ID), AA(nullptr) { initializeFunctionAttrsPass(*PassRegistry::getPassRegistry()); } // runOnSCC - Analyze the SCC, performing the transformation if possible. bool runOnSCC(CallGraphSCC &SCC) override; // AddReadAttrs - Deduce readonly/readnone attributes for the SCC. bool AddReadAttrs(const CallGraphSCC &SCC); // AddArgumentAttrs - Deduce nocapture attributes for the SCC. bool AddArgumentAttrs(const CallGraphSCC &SCC); // IsFunctionMallocLike - Does this function allocate new memory? bool IsFunctionMallocLike(Function *F, SmallPtrSet<Function*, 8> &) const; // AddNoAliasAttrs - Deduce noalias attributes for the SCC. bool AddNoAliasAttrs(const CallGraphSCC &SCC); // Utility methods used by inferPrototypeAttributes to add attributes // and maintain annotation statistics. void setDoesNotAccessMemory(Function &F) { if (!F.doesNotAccessMemory()) { F.setDoesNotAccessMemory(); ++NumAnnotated; } } void setOnlyReadsMemory(Function &F) { if (!F.onlyReadsMemory()) { F.setOnlyReadsMemory(); ++NumAnnotated; } } void setDoesNotThrow(Function &F) { if (!F.doesNotThrow()) { F.setDoesNotThrow(); ++NumAnnotated; } } void setDoesNotCapture(Function &F, unsigned n) { if (!F.doesNotCapture(n)) { F.setDoesNotCapture(n); ++NumAnnotated; } } void setOnlyReadsMemory(Function &F, unsigned n) { if (!F.onlyReadsMemory(n)) { F.setOnlyReadsMemory(n); ++NumAnnotated; } } void setDoesNotAlias(Function &F, unsigned n) { if (!F.doesNotAlias(n)) { F.setDoesNotAlias(n); ++NumAnnotated; } } // inferPrototypeAttributes - Analyze the name and prototype of the // given function and set any applicable attributes. Returns true // if any attributes were set and false otherwise. bool inferPrototypeAttributes(Function &F); // annotateLibraryCalls - Adds attributes to well-known standard library // call declarations. bool annotateLibraryCalls(const CallGraphSCC &SCC); void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addRequired<AliasAnalysis>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); CallGraphSCCPass::getAnalysisUsage(AU); } private: AliasAnalysis *AA; TargetLibraryInfo *TLI; }; } char FunctionAttrs::ID = 0; INITIALIZE_PASS_BEGIN(FunctionAttrs, "functionattrs", "Deduce function attributes", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(FunctionAttrs, "functionattrs", "Deduce function attributes", false, false) Pass *llvm::createFunctionAttrsPass() { return new FunctionAttrs(); } /// AddReadAttrs - Deduce readonly/readnone attributes for the SCC. bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) { SmallPtrSet<Function*, 8> SCCNodes; // Fill SCCNodes with the elements of the SCC. Used for quickly // looking up whether a given CallGraphNode is in this SCC. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) SCCNodes.insert((*I)->getFunction()); // Check if any of the functions in the SCC read or write memory. If they // write memory then they can't be marked readnone or readonly. bool ReadsMemory = false; for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { Function *F = (*I)->getFunction(); if (!F || F->hasFnAttribute(Attribute::OptimizeNone)) // External node or node we don't want to optimize - assume it may write // memory and give up. return false; AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(F); if (MRB == AliasAnalysis::DoesNotAccessMemory) // Already perfect! continue; // Definitions with weak linkage may be overridden at linktime with // something that writes memory, so treat them like declarations. if (F->isDeclaration() || F->mayBeOverridden()) { if (!AliasAnalysis::onlyReadsMemory(MRB)) // May write memory. Just give up. return false; ReadsMemory = true; continue; } // Scan the function body for instructions that may read or write memory. for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { Instruction *I = &*II; // Some instructions can be ignored even if they read or write memory. // Detect these now, skipping to the next instruction if one is found. CallSite CS(cast<Value>(I)); if (CS) { // Ignore calls to functions in the same SCC. if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) continue; AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(CS); // If the call doesn't access arbitrary memory, we may be able to // figure out something. if (AliasAnalysis::onlyAccessesArgPointees(MRB)) { // If the call does access argument pointees, check each argument. if (AliasAnalysis::doesAccessArgPointees(MRB)) // Check whether all pointer arguments point to local memory, and // ignore calls that only access local memory. for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); CI != CE; ++CI) { Value *Arg = *CI; if (Arg->getType()->isPointerTy()) { AAMDNodes AAInfo; I->getAAMetadata(AAInfo); MemoryLocation Loc(Arg, MemoryLocation::UnknownSize, AAInfo); if (!AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) { if (MRB & AliasAnalysis::Mod) // Writes non-local memory. Give up. return false; if (MRB & AliasAnalysis::Ref) // Ok, it reads non-local memory. ReadsMemory = true; } } } continue; } // The call could access any memory. If that includes writes, give up. if (MRB & AliasAnalysis::Mod) return false; // If it reads, note it. if (MRB & AliasAnalysis::Ref) ReadsMemory = true; continue; } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { // Ignore non-volatile loads from local memory. (Atomic is okay here.) if (!LI->isVolatile()) { MemoryLocation Loc = MemoryLocation::get(LI); if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) continue; } } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { // Ignore non-volatile stores to local memory. (Atomic is okay here.) if (!SI->isVolatile()) { MemoryLocation Loc = MemoryLocation::get(SI); if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) continue; } } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) { // Ignore vaargs on local memory. MemoryLocation Loc = MemoryLocation::get(VI); if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) continue; } // Any remaining instructions need to be taken seriously! Check if they // read or write memory. if (I->mayWriteToMemory()) // Writes memory. Just give up. return false; // If this instruction may read memory, remember that. ReadsMemory |= I->mayReadFromMemory(); } } // Success! Functions in this SCC do not access memory, or only read memory. // Give them the appropriate attribute. bool MadeChange = false; for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { Function *F = (*I)->getFunction(); if (F->doesNotAccessMemory()) // Already perfect! continue; if (F->onlyReadsMemory() && ReadsMemory) // No change. continue; MadeChange = true; // Clear out any existing attributes. AttrBuilder B; B.addAttribute(Attribute::ReadOnly) .addAttribute(Attribute::ReadNone); F->removeAttributes(AttributeSet::FunctionIndex, AttributeSet::get(F->getContext(), AttributeSet::FunctionIndex, B)); // Add in the new attribute. F->addAttribute(AttributeSet::FunctionIndex, ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); if (ReadsMemory) ++NumReadOnly; else ++NumReadNone; } return MadeChange; } namespace { // For a given pointer Argument, this retains a list of Arguments of functions // in the same SCC that the pointer data flows into. We use this to build an // SCC of the arguments. struct ArgumentGraphNode { Argument *Definition; SmallVector<ArgumentGraphNode*, 4> Uses; }; class ArgumentGraph { // We store pointers to ArgumentGraphNode objects, so it's important that // that they not move around upon insert. typedef std::map<Argument*, ArgumentGraphNode> ArgumentMapTy; ArgumentMapTy ArgumentMap; // There is no root node for the argument graph, in fact: // void f(int *x, int *y) { if (...) f(x, y); } // is an example where the graph is disconnected. The SCCIterator requires a // single entry point, so we maintain a fake ("synthetic") root node that // uses every node. Because the graph is directed and nothing points into // the root, it will not participate in any SCCs (except for its own). ArgumentGraphNode SyntheticRoot; public: ArgumentGraph() { SyntheticRoot.Definition = nullptr; } typedef SmallVectorImpl<ArgumentGraphNode*>::iterator iterator; iterator begin() { return SyntheticRoot.Uses.begin(); } iterator end() { return SyntheticRoot.Uses.end(); } ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; } ArgumentGraphNode *operator[](Argument *A) { ArgumentGraphNode &Node = ArgumentMap[A]; Node.Definition = A; SyntheticRoot.Uses.push_back(&Node); return &Node; } }; // This tracker checks whether callees are in the SCC, and if so it does not // consider that a capture, instead adding it to the "Uses" list and // continuing with the analysis. struct ArgumentUsesTracker : public CaptureTracker { ArgumentUsesTracker(const SmallPtrSet<Function*, 8> &SCCNodes) : Captured(false), SCCNodes(SCCNodes) {} void tooManyUses() override { Captured = true; } bool captured(const Use *U) override { CallSite CS(U->getUser()); if (!CS.getInstruction()) { Captured = true; return true; } Function *F = CS.getCalledFunction(); if (!F || !SCCNodes.count(F)) { Captured = true; return true; } bool Found = false; Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end(); for (CallSite::arg_iterator PI = CS.arg_begin(), PE = CS.arg_end(); PI != PE; ++PI, ++AI) { if (AI == AE) { assert(F->isVarArg() && "More params than args in non-varargs call"); Captured = true; return true; } if (PI == U) { Uses.push_back(AI); Found = true; break; } } assert(Found && "Capturing call-site captured nothing?"); (void)Found; return false; } bool Captured; // True only if certainly captured (used outside our SCC). SmallVector<Argument*, 4> Uses; // Uses within our SCC. const SmallPtrSet<Function*, 8> &SCCNodes; }; } namespace llvm { template<> struct GraphTraits<ArgumentGraphNode*> { typedef ArgumentGraphNode NodeType; typedef SmallVectorImpl<ArgumentGraphNode*>::iterator ChildIteratorType; static inline NodeType *getEntryNode(NodeType *A) { return A; } static inline ChildIteratorType child_begin(NodeType *N) { return N->Uses.begin(); } static inline ChildIteratorType child_end(NodeType *N) { return N->Uses.end(); } }; template<> struct GraphTraits<ArgumentGraph*> : public GraphTraits<ArgumentGraphNode*> { static NodeType *getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); } static ChildIteratorType nodes_begin(ArgumentGraph *AG) { return AG->begin(); } static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); } }; } // Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone. static Attribute::AttrKind determinePointerReadAttrs(Argument *A, const SmallPtrSet<Argument*, 8> &SCCNodes) { SmallVector<Use*, 32> Worklist; SmallSet<Use*, 32> Visited; int Count = 0; // inalloca arguments are always clobbered by the call. if (A->hasInAllocaAttr()) return Attribute::None; bool IsRead = false; // We don't need to track IsWritten. If A is written to, return immediately. for (Use &U : A->uses()) { if (Count++ >= 20) return Attribute::None; Visited.insert(&U); Worklist.push_back(&U); } while (!Worklist.empty()) { Use *U = Worklist.pop_back_val(); Instruction *I = cast<Instruction>(U->getUser()); Value *V = U->get(); switch (I->getOpcode()) { case Instruction::BitCast: case Instruction::GetElementPtr: case Instruction::PHI: case Instruction::Select: case Instruction::AddrSpaceCast: // The original value is not read/written via this if the new value isn't. for (Use &UU : I->uses()) if (Visited.insert(&UU).second) Worklist.push_back(&UU); break; case Instruction::Call: case Instruction::Invoke: { bool Captures = true; if (I->getType()->isVoidTy()) Captures = false; auto AddUsersToWorklistIfCapturing = [&] { if (Captures) for (Use &UU : I->uses()) if (Visited.insert(&UU).second) Worklist.push_back(&UU); }; CallSite CS(I); if (CS.doesNotAccessMemory()) { AddUsersToWorklistIfCapturing(); continue; } Function *F = CS.getCalledFunction(); if (!F) { if (CS.onlyReadsMemory()) { IsRead = true; AddUsersToWorklistIfCapturing(); continue; } return Attribute::None; } Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end(); CallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end(); for (CallSite::arg_iterator A = B; A != E; ++A, ++AI) { if (A->get() == V) { if (AI == AE) { assert(F->isVarArg() && "More params than args in non-varargs call."); return Attribute::None; } Captures &= !CS.doesNotCapture(A - B); if (SCCNodes.count(AI)) continue; if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(A - B)) return Attribute::None; if (!CS.doesNotAccessMemory(A - B)) IsRead = true; } } AddUsersToWorklistIfCapturing(); break; } case Instruction::Load: IsRead = true; break; case Instruction::ICmp: case Instruction::Ret: break; default: return Attribute::None; } } return IsRead ? Attribute::ReadOnly : Attribute::ReadNone; } /// AddArgumentAttrs - Deduce nocapture attributes for the SCC. bool FunctionAttrs::AddArgumentAttrs(const CallGraphSCC &SCC) { bool Changed = false; SmallPtrSet<Function*, 8> SCCNodes; // Fill SCCNodes with the elements of the SCC. Used for quickly // looking up whether a given CallGraphNode is in this SCC. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { Function *F = (*I)->getFunction(); if (F && !F->isDeclaration() && !F->mayBeOverridden() && !F->hasFnAttribute(Attribute::OptimizeNone)) SCCNodes.insert(F); } ArgumentGraph AG; AttrBuilder B; B.addAttribute(Attribute::NoCapture); // Check each function in turn, determining which pointer arguments are not // captured. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { Function *F = (*I)->getFunction(); if (!F || F->hasFnAttribute(Attribute::OptimizeNone)) // External node or function we're trying not to optimize - only a problem // for arguments that we pass to it. continue; // Definitions with weak linkage may be overridden at linktime with // something that captures pointers, so treat them like declarations. if (F->isDeclaration() || F->mayBeOverridden()) continue; // Functions that are readonly (or readnone) and nounwind and don't return // a value can't capture arguments. Don't analyze them. if (F->onlyReadsMemory() && F->doesNotThrow() && F->getReturnType()->isVoidTy()) { for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; ++A) { if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { A->addAttr(AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); ++NumNoCapture; Changed = true; } } continue; } for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; ++A) { if (!A->getType()->isPointerTy()) continue; bool HasNonLocalUses = false; if (!A->hasNoCaptureAttr()) { ArgumentUsesTracker Tracker(SCCNodes); PointerMayBeCaptured(A, &Tracker); if (!Tracker.Captured) { if (Tracker.Uses.empty()) { // If it's trivially not captured, mark it nocapture now. A->addAttr(AttributeSet::get(F->getContext(), A->getArgNo()+1, B)); ++NumNoCapture; Changed = true; } else { // If it's not trivially captured and not trivially not captured, // then it must be calling into another function in our SCC. Save // its particulars for Argument-SCC analysis later. ArgumentGraphNode *Node = AG[A]; for (SmallVectorImpl<Argument*>::iterator UI = Tracker.Uses.begin(), UE = Tracker.Uses.end(); UI != UE; ++UI) { Node->Uses.push_back(AG[*UI]); if (*UI != A) HasNonLocalUses = true; } } } // Otherwise, it's captured. Don't bother doing SCC analysis on it. } if (!HasNonLocalUses && !A->onlyReadsMemory()) { // Can we determine that it's readonly/readnone without doing an SCC? // Note that we don't allow any calls at all here, or else our result // will be dependent on the iteration order through the functions in the // SCC. SmallPtrSet<Argument*, 8> Self; Self.insert(A); Attribute::AttrKind R = determinePointerReadAttrs(A, Self); if (R != Attribute::None) { AttrBuilder B; B.addAttribute(R); A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); Changed = true; R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; } } } } // The graph we've collected is partial because we stopped scanning for // argument uses once we solved the argument trivially. These partial nodes // show up as ArgumentGraphNode objects with an empty Uses list, and for // these nodes the final decision about whether they capture has already been // made. If the definition doesn't have a 'nocapture' attribute by now, it // captures. for (scc_iterator<ArgumentGraph*> I = scc_begin(&AG); !I.isAtEnd(); ++I) { const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I; if (ArgumentSCC.size() == 1) { if (!ArgumentSCC[0]->Definition) continue; // synthetic root node // eg. "void f(int* x) { if (...) f(x); }" if (ArgumentSCC[0]->Uses.size() == 1 && ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { Argument *A = ArgumentSCC[0]->Definition; A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); ++NumNoCapture; Changed = true; } continue; } bool SCCCaptured = false; for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) { ArgumentGraphNode *Node = *I; if (Node->Uses.empty()) { if (!Node->Definition->hasNoCaptureAttr()) SCCCaptured = true; } } if (SCCCaptured) continue; SmallPtrSet<Argument*, 8> ArgumentSCCNodes; // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for // quickly looking up whether a given Argument is in this ArgumentSCC. for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); I != E; ++I) { ArgumentSCCNodes.insert((*I)->Definition); } for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); I != E && !SCCCaptured; ++I) { ArgumentGraphNode *N = *I; for (SmallVectorImpl<ArgumentGraphNode*>::iterator UI = N->Uses.begin(), UE = N->Uses.end(); UI != UE; ++UI) { Argument *A = (*UI)->Definition; if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A)) continue; SCCCaptured = true; break; } } if (SCCCaptured) continue; for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { Argument *A = ArgumentSCC[i]->Definition; A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); ++NumNoCapture; Changed = true; } // We also want to compute readonly/readnone. With a small number of false // negatives, we can assume that any pointer which is captured isn't going // to be provably readonly or readnone, since by definition we can't // analyze all uses of a captured pointer. // // The false negatives happen when the pointer is captured by a function // that promises readonly/readnone behaviour on the pointer, then the // pointer's lifetime ends before anything that writes to arbitrary memory. // Also, a readonly/readnone pointer may be returned, but returning a // pointer is capturing it. Attribute::AttrKind ReadAttr = Attribute::ReadNone; for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { Argument *A = ArgumentSCC[i]->Definition; Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes); if (K == Attribute::ReadNone) continue; if (K == Attribute::ReadOnly) { ReadAttr = Attribute::ReadOnly; continue; } ReadAttr = K; break; } if (ReadAttr != Attribute::None) { AttrBuilder B, R; B.addAttribute(ReadAttr); R.addAttribute(Attribute::ReadOnly) .addAttribute(Attribute::ReadNone); for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { Argument *A = ArgumentSCC[i]->Definition; // Clear out existing readonly/readnone attributes A->removeAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, R)); A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; Changed = true; } } } return Changed; } /// IsFunctionMallocLike - A function is malloc-like if it returns either null /// or a pointer that doesn't alias any other pointer visible to the caller. bool FunctionAttrs::IsFunctionMallocLike(Function *F, SmallPtrSet<Function*, 8> &SCCNodes) const { SmallSetVector<Value *, 8> FlowsToReturn; for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) if (ReturnInst *Ret = dyn_cast<ReturnInst>(I->getTerminator())) FlowsToReturn.insert(Ret->getReturnValue()); for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { Value *RetVal = FlowsToReturn[i]; if (Constant *C = dyn_cast<Constant>(RetVal)) { if (!C->isNullValue() && !isa<UndefValue>(C)) return false; continue; } if (isa<Argument>(RetVal)) return false; if (Instruction *RVI = dyn_cast<Instruction>(RetVal)) switch (RVI->getOpcode()) { // Extend the analysis by looking upwards. case Instruction::BitCast: case Instruction::GetElementPtr: case Instruction::AddrSpaceCast: FlowsToReturn.insert(RVI->getOperand(0)); continue; case Instruction::Select: { SelectInst *SI = cast<SelectInst>(RVI); FlowsToReturn.insert(SI->getTrueValue()); FlowsToReturn.insert(SI->getFalseValue()); continue; } case Instruction::PHI: { PHINode *PN = cast<PHINode>(RVI); for (Value *IncValue : PN->incoming_values()) FlowsToReturn.insert(IncValue); continue; } // Check whether the pointer came from an allocation. case Instruction::Alloca: break; case Instruction::Call: case Instruction::Invoke: { CallSite CS(RVI); if (CS.paramHasAttr(0, Attribute::NoAlias)) break; if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) break; } LLVM_FALLTHROUGH; // HLSL Change default: return false; // Did not come from an allocation. } if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) return false; } return true; } /// AddNoAliasAttrs - Deduce noalias attributes for the SCC. bool FunctionAttrs::AddNoAliasAttrs(const CallGraphSCC &SCC) { SmallPtrSet<Function*, 8> SCCNodes; // Fill SCCNodes with the elements of the SCC. Used for quickly // looking up whether a given CallGraphNode is in this SCC. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) SCCNodes.insert((*I)->getFunction()); // Check each function in turn, determining which functions return noalias // pointers. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { Function *F = (*I)->getFunction(); if (!F || F->hasFnAttribute(Attribute::OptimizeNone)) // External node or node we don't want to optimize - skip it; return false; // Already noalias. if (F->doesNotAlias(0)) continue; // Definitions with weak linkage may be overridden at linktime, so // treat them like declarations. if (F->isDeclaration() || F->mayBeOverridden()) return false; // We annotate noalias return values, which are only applicable to // pointer types. if (!F->getReturnType()->isPointerTy()) continue; if (!IsFunctionMallocLike(F, SCCNodes)) return false; } bool MadeChange = false; for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { Function *F = (*I)->getFunction(); if (F->doesNotAlias(0) || !F->getReturnType()->isPointerTy()) continue; F->setDoesNotAlias(0); ++NumNoAlias; MadeChange = true; } return MadeChange; } /// inferPrototypeAttributes - Analyze the name and prototype of the /// given function and set any applicable attributes. Returns true /// if any attributes were set and false otherwise. bool FunctionAttrs::inferPrototypeAttributes(Function &F) { if (F.hasFnAttribute(Attribute::OptimizeNone)) return false; FunctionType *FTy = F.getFunctionType(); LibFunc::Func TheLibFunc; if (!(TLI->getLibFunc(F.getName(), TheLibFunc) && TLI->has(TheLibFunc))) return false; switch (TheLibFunc) { case LibFunc::strlen: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setOnlyReadsMemory(F); setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::strchr: case LibFunc::strrchr: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isIntegerTy()) return false; setOnlyReadsMemory(F); setDoesNotThrow(F); break; case LibFunc::strtol: case LibFunc::strtod: case LibFunc::strtof: case LibFunc::strtoul: case LibFunc::strtoll: case LibFunc::strtold: case LibFunc::strtoull: if (FTy->getNumParams() < 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); break; case LibFunc::strcpy: case LibFunc::stpcpy: case LibFunc::strcat: case LibFunc::strncat: case LibFunc::strncpy: case LibFunc::stpncpy: if (FTy->getNumParams() < 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::strxfrm: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::strcmp: //0,1 case LibFunc::strspn: // 0,1 case LibFunc::strncmp: // 0,1 case LibFunc::strcspn: //0,1 case LibFunc::strcoll: //0,1 case LibFunc::strcasecmp: // 0,1 case LibFunc::strncasecmp: // if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setOnlyReadsMemory(F); setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); break; case LibFunc::strstr: case LibFunc::strpbrk: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setOnlyReadsMemory(F); setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::strtok: case LibFunc::strtok_r: if (FTy->getNumParams() < 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::scanf: if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::setbuf: case LibFunc::setvbuf: if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::strdup: case LibFunc::strndup: if (FTy->getNumParams() < 1 || !FTy->getReturnType()->isPointerTy() || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::stat: case LibFunc::statvfs: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); break; case LibFunc::sscanf: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; case LibFunc::sprintf: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::snprintf: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(2)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 3); setOnlyReadsMemory(F, 3); break; case LibFunc::setitimer: if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy() || !FTy->getParamType(2)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); setDoesNotCapture(F, 3); setOnlyReadsMemory(F, 2); break; case LibFunc::system: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; // May throw; "system" is a valid pthread cancellation point. setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::malloc: if (FTy->getNumParams() != 1 || !FTy->getReturnType()->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); break; case LibFunc::memcmp: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setOnlyReadsMemory(F); setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); break; case LibFunc::memchr: case LibFunc::memrchr: if (FTy->getNumParams() != 3) return false; setOnlyReadsMemory(F); setDoesNotThrow(F); break; case LibFunc::modf: case LibFunc::modff: case LibFunc::modfl: if (FTy->getNumParams() < 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::memcpy: case LibFunc::memccpy: case LibFunc::memmove: if (FTy->getNumParams() < 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::memalign: if (!FTy->getReturnType()->isPointerTy()) return false; setDoesNotAlias(F, 0); break; case LibFunc::mkdir: if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::mktime: if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::realloc: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getReturnType()->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 1); break; case LibFunc::read: if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy()) return false; // May throw; "read" is a valid pthread cancellation point. setDoesNotCapture(F, 2); break; case LibFunc::rewind: if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::rmdir: case LibFunc::remove: case LibFunc::realpath: if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::rename: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; case LibFunc::readlink: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); break; case LibFunc::write: if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy()) return false; // May throw; "write" is a valid pthread cancellation point. setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::bcopy: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); break; case LibFunc::bcmp: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setOnlyReadsMemory(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); break; case LibFunc::bzero: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::calloc: if (FTy->getNumParams() != 2 || !FTy->getReturnType()->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); break; case LibFunc::chmod: case LibFunc::chown: if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::ctermid: case LibFunc::clearerr: case LibFunc::closedir: if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::atoi: case LibFunc::atol: case LibFunc::atof: case LibFunc::atoll: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setOnlyReadsMemory(F); setDoesNotCapture(F, 1); break; case LibFunc::access: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::fopen: if (FTy->getNumParams() != 2 || !FTy->getReturnType()->isPointerTy() || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; case LibFunc::fdopen: if (FTy->getNumParams() != 2 || !FTy->getReturnType()->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::feof: case LibFunc::free: case LibFunc::fseek: case LibFunc::ftell: case LibFunc::fgetc: case LibFunc::fseeko: case LibFunc::ftello: case LibFunc::fileno: case LibFunc::fflush: case LibFunc::fclose: case LibFunc::fsetpos: case LibFunc::flockfile: case LibFunc::funlockfile: case LibFunc::ftrylockfile: if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::ferror: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F); break; case LibFunc::fputc: case LibFunc::fstat: case LibFunc::frexp: case LibFunc::frexpf: case LibFunc::frexpl: case LibFunc::fstatvfs: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::fgets: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(2)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 3); break; case LibFunc::fread: if (FTy->getNumParams() != 4 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(3)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 4); break; case LibFunc::fwrite: if (FTy->getNumParams() != 4 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(3)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 4); break; case LibFunc::fputs: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); break; case LibFunc::fscanf: case LibFunc::fprintf: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::fgetpos: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); break; case LibFunc::getc: case LibFunc::getlogin_r: case LibFunc::getc_unlocked: if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::getenv: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setOnlyReadsMemory(F); setDoesNotCapture(F, 1); break; case LibFunc::gets: case LibFunc::getchar: setDoesNotThrow(F); break; case LibFunc::getitimer: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::getpwnam: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::ungetc: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::uname: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::unlink: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::unsetenv: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::utime: case LibFunc::utimes: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; case LibFunc::putc: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::puts: case LibFunc::printf: case LibFunc::perror: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::pread: if (FTy->getNumParams() != 4 || !FTy->getParamType(1)->isPointerTy()) return false; // May throw; "pread" is a valid pthread cancellation point. setDoesNotCapture(F, 2); break; case LibFunc::pwrite: if (FTy->getNumParams() != 4 || !FTy->getParamType(1)->isPointerTy()) return false; // May throw; "pwrite" is a valid pthread cancellation point. setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::putchar: setDoesNotThrow(F); break; case LibFunc::popen: if (FTy->getNumParams() != 2 || !FTy->getReturnType()->isPointerTy() || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; case LibFunc::pclose: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::vscanf: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::vsscanf: if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy() || !FTy->getParamType(2)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; case LibFunc::vfscanf: if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy() || !FTy->getParamType(2)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::valloc: if (!FTy->getReturnType()->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); break; case LibFunc::vprintf: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::vfprintf: case LibFunc::vsprintf: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::vsnprintf: if (FTy->getNumParams() != 4 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(2)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 3); setOnlyReadsMemory(F, 3); break; case LibFunc::open: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy()) return false; // May throw; "open" is a valid pthread cancellation point. setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::opendir: if (FTy->getNumParams() != 1 || !FTy->getReturnType()->isPointerTy() || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::tmpfile: if (!FTy->getReturnType()->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); break; case LibFunc::times: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::htonl: case LibFunc::htons: case LibFunc::ntohl: case LibFunc::ntohs: setDoesNotThrow(F); setDoesNotAccessMemory(F); break; case LibFunc::lstat: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); break; case LibFunc::lchown: if (FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::qsort: if (FTy->getNumParams() != 4 || !FTy->getParamType(3)->isPointerTy()) return false; // May throw; places call through function pointer. setDoesNotCapture(F, 4); break; case LibFunc::dunder_strdup: case LibFunc::dunder_strndup: if (FTy->getNumParams() < 1 || !FTy->getReturnType()->isPointerTy() || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::dunder_strtok_r: if (FTy->getNumParams() != 3 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 2); break; case LibFunc::under_IO_getc: if (FTy->getNumParams() != 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::under_IO_putc: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::dunder_isoc99_scanf: if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::stat64: case LibFunc::lstat64: case LibFunc::statvfs64: if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); break; case LibFunc::dunder_isoc99_sscanf: if (FTy->getNumParams() < 1 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; #if 0 // HLSL Change Starts - Exclude potentially duplicate 64bit versions case LibFunc::fopen64: if (FTy->getNumParams() != 2 || !FTy->getReturnType()->isPointerTy() || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); setOnlyReadsMemory(F, 1); setOnlyReadsMemory(F, 2); break; case LibFunc::fseeko64: case LibFunc::ftello64: if (FTy->getNumParams() == 0 || !FTy->getParamType(0)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 1); break; case LibFunc::tmpfile64: if (!FTy->getReturnType()->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotAlias(F, 0); break; #endif // HLSL Change Ends - Exclude potentially duplicate 64bit versions case LibFunc::fstat64: case LibFunc::fstatvfs64: if (FTy->getNumParams() != 2 || !FTy->getParamType(1)->isPointerTy()) return false; setDoesNotThrow(F); setDoesNotCapture(F, 2); break; case LibFunc::open64: if (FTy->getNumParams() < 2 || !FTy->getParamType(0)->isPointerTy()) return false; // May throw; "open" is a valid pthread cancellation point. setDoesNotCapture(F, 1); setOnlyReadsMemory(F, 1); break; case LibFunc::gettimeofday: if (FTy->getNumParams() != 2 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy()) return false; // Currently some platforms have the restrict keyword on the arguments to // gettimeofday. To be conservative, do not add noalias to gettimeofday's // arguments. setDoesNotThrow(F); setDoesNotCapture(F, 1); setDoesNotCapture(F, 2); break; default: // Didn't mark any attributes. return false; } return true; } /// annotateLibraryCalls - Adds attributes to well-known standard library /// call declarations. bool FunctionAttrs::annotateLibraryCalls(const CallGraphSCC &SCC) { bool MadeChange = false; // Check each function in turn annotating well-known library function // declarations with attributes. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { Function *F = (*I)->getFunction(); if (F && F->isDeclaration()) MadeChange |= inferPrototypeAttributes(*F); } return MadeChange; } bool FunctionAttrs::runOnSCC(CallGraphSCC &SCC) { AA = &getAnalysis<AliasAnalysis>(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); bool Changed = annotateLibraryCalls(SCC); Changed |= AddReadAttrs(SCC); Changed |= AddArgumentAttrs(SCC); Changed |= AddNoAliasAttrs(SCC); return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/BarrierNoopPass.cpp
//===- BarrierNoopPass.cpp - A barrier pass for the pass manager ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // NOTE: DO NOT USE THIS IF AVOIDABLE // // This pass is a nonce pass intended to allow manipulation of the implicitly // nesting pass manager. For example, it can be used to cause a CGSCC pass // manager to be closed prior to running a new collection of function passes. // // FIXME: This is a huge HACK. This should be removed when the pass manager's // nesting is made explicit instead of implicit. // //===----------------------------------------------------------------------===// #include "llvm/Pass.h" #include "llvm/Transforms/IPO.h" using namespace llvm; namespace { /// \brief A nonce module pass used to place a barrier in a pass manager. /// /// There is no mechanism for ending a CGSCC pass manager once one is started. /// This prevents extension points from having clear deterministic ordering /// when they are phrased as non-module passes. class BarrierNoop : public ModulePass { public: static char ID; // Pass identification. BarrierNoop() : ModulePass(ID) { initializeBarrierNoopPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override { return false; } }; } ModulePass *llvm::createBarrierNoopPass() { return new BarrierNoop(); } char BarrierNoop::ID = 0; INITIALIZE_PASS(BarrierNoop, "barrier", "A No-Op Barrier Pass", false, false)
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/CMakeLists.txt
set(HLSL_IGNORE_SOURCES ArgumentPromotion.cpp) add_llvm_library(LLVMipo BarrierNoopPass.cpp ConstantMerge.cpp DeadArgumentElimination.cpp ElimAvailExtern.cpp ExtractGV.cpp FunctionAttrs.cpp GlobalDCE.cpp GlobalOpt.cpp IPConstantPropagation.cpp IPO.cpp InlineAlways.cpp InlineSimple.cpp Inliner.cpp Internalize.cpp LoopExtractor.cpp LowerBitSets.cpp MergeFunctions.cpp PartialInlining.cpp PassManagerBuilder.cpp PruneEH.cpp StripDeadPrototypes.cpp StripSymbols.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms/IPO ) add_dependencies(LLVMipo intrinsics_gen) target_link_libraries(LLVMipo PUBLIC LLVMDXIL LLVMHLSL) # HLSL Change
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/StripDeadPrototypes.cpp
//===-- StripDeadPrototypes.cpp - Remove unused function declarations ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass loops over all of the functions in the input module, looking for // dead declarations and removes them. Dead declarations are declarations of // functions for which no implementation is available (i.e., declarations for // unused library functions). // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" using namespace llvm; #define DEBUG_TYPE "strip-dead-prototypes" STATISTIC(NumDeadPrototypes, "Number of dead prototypes removed"); namespace { /// @brief Pass to remove unused function declarations. class StripDeadPrototypesPass : public ModulePass { public: static char ID; // Pass identification, replacement for typeid StripDeadPrototypesPass() : ModulePass(ID) { initializeStripDeadPrototypesPassPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; }; } // end anonymous namespace char StripDeadPrototypesPass::ID = 0; INITIALIZE_PASS(StripDeadPrototypesPass, "strip-dead-prototypes", "Strip Unused Function Prototypes", false, false) bool StripDeadPrototypesPass::runOnModule(Module &M) { bool MadeChange = false; // Erase dead function prototypes. for (Module::iterator I = M.begin(), E = M.end(); I != E; ) { Function *F = I++; // Function must be a prototype and unused. if (F->isDeclaration() && F->use_empty()) { F->eraseFromParent(); ++NumDeadPrototypes; MadeChange = true; } } // Erase dead global var prototypes. for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ) { GlobalVariable *GV = I++; // Global must be a prototype and unused. if (GV->isDeclaration() && GV->use_empty()) GV->eraseFromParent(); } // Return an indication of whether we changed anything or not. return MadeChange; } ModulePass *llvm::createStripDeadPrototypesPass() { return new StripDeadPrototypesPass(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/Inliner.cpp
//===- Inliner.cpp - Code common to all inliners --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the mechanics required to implement inlining without // missing any calls and updating the call graph. The decisions of which calls // are profitable to inline are implemented elsewhere. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO/InlinerPass.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/InlineCost.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; #define DEBUG_TYPE "inline" STATISTIC(NumInlined, "Number of functions inlined"); STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); // STATISTIC(NumMergedAllocas, "Number of allocas merged together"); // HLSL Change - unused // This weirdly named statistic tracks the number of times that, when attempting // to inline a function A into B, we analyze the callers of B in order to see // if those would be more profitable and blocked inline steps. STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed"); #if 0 // HLSL Change Starts static cl::opt<int> InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore, cl::desc("Control the amount of inlining to perform (default = 225)")); static cl::opt<int> HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325), cl::desc("Threshold for inlining functions with inline hint")); // We instroduce this threshold to help performance of instrumentation based // PGO before we actually hook up inliner with analysis passes such as BPI and // BFI. static cl::opt<int> ColdThreshold("inlinecold-threshold", cl::Hidden, cl::init(225), cl::desc("Threshold for inlining functions with cold attribute")); #else struct NullOpt { NullOpt(int val) : _val(val) {} int _val; int getNumOccurrences() const { return 0; } operator int() const { return _val; } }; static const NullOpt InlineLimit(225); static const NullOpt HintThreshold(325); static const NullOpt ColdThreshold(225); #endif // HLSL Change Ends // Threshold to use when optsize is specified (and there is no -inline-limit). const int OptSizeThreshold = 75; Inliner::Inliner(char &ID) : CallGraphSCCPass(ID), InlineThreshold(InlineLimit), InsertLifetime(true) {} Inliner::Inliner(char &ID, int Threshold, bool InsertLifetime) : CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ? unsigned(InlineLimit) : Threshold), InsertLifetime(InsertLifetime) {} /// For this class, we declare that we require and preserve the call graph. /// If the derived class implements this method, it should /// always explicitly call the implementation here. void Inliner::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<AliasAnalysis>(); AU.addRequired<AssumptionCacheTracker>(); CallGraphSCCPass::getAnalysisUsage(AU); } typedef DenseMap<ArrayType*, std::vector<AllocaInst*> > InlinedArrayAllocasTy; /// \brief If the inlined function had a higher stack protection level than the /// calling function, then bump up the caller's stack protection level. static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) { // If upgrading the SSP attribute, clear out the old SSP Attributes first. // Having multiple SSP attributes doesn't actually hurt, but it adds useless // clutter to the IR. AttrBuilder B; B.addAttribute(Attribute::StackProtect) .addAttribute(Attribute::StackProtectStrong) .addAttribute(Attribute::StackProtectReq); AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(), AttributeSet::FunctionIndex, B); if (Callee->hasFnAttribute(Attribute::SafeStack)) { Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr); Caller->addFnAttr(Attribute::SafeStack); } else if (Callee->hasFnAttribute(Attribute::StackProtectReq) && !Caller->hasFnAttribute(Attribute::SafeStack)) { Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr); Caller->addFnAttr(Attribute::StackProtectReq); } else if (Callee->hasFnAttribute(Attribute::StackProtectStrong) && !Caller->hasFnAttribute(Attribute::SafeStack) && !Caller->hasFnAttribute(Attribute::StackProtectReq)) { Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr); Caller->addFnAttr(Attribute::StackProtectStrong); } else if (Callee->hasFnAttribute(Attribute::StackProtect) && !Caller->hasFnAttribute(Attribute::SafeStack) && !Caller->hasFnAttribute(Attribute::StackProtectReq) && !Caller->hasFnAttribute(Attribute::StackProtectStrong)) Caller->addFnAttr(Attribute::StackProtect); } /// If it is possible to inline the specified call site, /// do so and update the CallGraph for this operation. /// /// This function also does some basic book-keeping to update the IR. The /// InlinedArrayAllocas map keeps track of any allocas that are already /// available from other functions inlined into the caller. If we are able to /// inline this call site we attempt to reuse already available allocas or add /// any new allocas to the set if not possible. static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI, InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, bool InsertLifetime) { Function *Callee = CS.getCalledFunction(); Function *Caller = CS.getCaller(); // Try to inline the function. Get the list of static allocas that were // inlined. if (!InlineFunction(CS, IFI, InsertLifetime)) return false; AdjustCallerSSPLevel(Caller, Callee); // HLSL Change Begin- not merge allocas. // Merge alloca will make alloca which has one def become multi def. // SROA will fail to remove the merged allocas. return true; // HLSL Change End. #if 0 // HLSL Change - disable unused code. // Look at all of the allocas that we inlined through this call site. If we // have already inlined other allocas through other calls into this function, // then we know that they have disjoint lifetimes and that we can merge them. // // There are many heuristics possible for merging these allocas, and the // different options have different tradeoffs. One thing that we *really* // don't want to hurt is SRoA: once inlining happens, often allocas are no // longer address taken and so they can be promoted. // // Our "solution" for that is to only merge allocas whose outermost type is an // array type. These are usually not promoted because someone is using a // variable index into them. These are also often the most important ones to // merge. // // A better solution would be to have real memory lifetime markers in the IR // and not have the inliner do any merging of allocas at all. This would // allow the backend to do proper stack slot coloring of all allocas that // *actually make it to the backend*, which is really what we want. // // Because we don't have this information, we do this simple and useful hack. // SmallPtrSet<AllocaInst*, 16> UsedAllocas; // When processing our SCC, check to see if CS was inlined from some other // call site. For example, if we're processing "A" in this code: // A() { B() } // B() { x = alloca ... C() } // C() { y = alloca ... } // Assume that C was not inlined into B initially, and so we're processing A // and decide to inline B into A. Doing this makes an alloca available for // reuse and makes a callsite (C) available for inlining. When we process // the C call site we don't want to do any alloca merging between X and Y // because their scopes are not disjoint. We could make this smarter by // keeping track of the inline history for each alloca in the // InlinedArrayAllocas but this isn't likely to be a significant win. if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. return true; // Loop over all the allocas we have so far and see if they can be merged with // a previously inlined alloca. If not, remember that we had it. for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size(); AllocaNo != e; ++AllocaNo) { AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; // Don't bother trying to merge array allocations (they will usually be // canonicalized to be an allocation *of* an array), or allocations whose // type is not itself an array (because we're afraid of pessimizing SRoA). ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); if (!ATy || AI->isArrayAllocation()) continue; // Get the list of all available allocas for this array type. std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy]; // Loop over the allocas in AllocasForType to see if we can reuse one. Note // that we have to be careful not to reuse the same "available" alloca for // multiple different allocas that we just inlined, we use the 'UsedAllocas' // set to keep track of which "available" allocas are being used by this // function. Also, AllocasForType can be empty of course! bool MergedAwayAlloca = false; for (AllocaInst *AvailableAlloca : AllocasForType) { unsigned Align1 = AI->getAlignment(), Align2 = AvailableAlloca->getAlignment(); // The available alloca has to be in the right function, not in some other // function in this SCC. if (AvailableAlloca->getParent() != AI->getParent()) continue; // If the inlined function already uses this alloca then we can't reuse // it. if (!UsedAllocas.insert(AvailableAlloca).second) continue; // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare // success! DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI << "\n\t\tINTO: " << *AvailableAlloca << '\n'); AI->replaceAllUsesWith(AvailableAlloca); if (Align1 != Align2) { if (!Align1 || !Align2) { const DataLayout &DL = Caller->getParent()->getDataLayout(); unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType()); Align1 = Align1 ? Align1 : TypeAlign; Align2 = Align2 ? Align2 : TypeAlign; } if (Align1 > Align2) AvailableAlloca->setAlignment(AI->getAlignment()); } AI->eraseFromParent(); MergedAwayAlloca = true; ++NumMergedAllocas; IFI.StaticAllocas[AllocaNo] = nullptr; break; } // If we already nuked the alloca, we're done with it. if (MergedAwayAlloca) continue; // If we were unable to merge away the alloca either because there are no // allocas of the right type available or because we reused them all // already, remember that this alloca came from an inlined function and mark // it used so we don't reuse it for other allocas from this inline // operation. AllocasForType.push_back(AI); UsedAllocas.insert(AI); } return true; #endif } unsigned Inliner::getInlineThreshold(CallSite CS) const { int thres = InlineThreshold; // -inline-threshold or else selected by // overall opt level // If -inline-threshold is not given, listen to the optsize attribute when it // would decrease the threshold. Function *Caller = CS.getCaller(); bool OptSize = Caller && !Caller->isDeclaration() && Caller->hasFnAttribute(Attribute::OptimizeForSize); if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres) thres = OptSizeThreshold; // Listen to the inlinehint attribute when it would increase the threshold // and the caller does not need to minimize its size. Function *Callee = CS.getCalledFunction(); bool InlineHint = Callee && !Callee->isDeclaration() && Callee->hasFnAttribute(Attribute::InlineHint); if (InlineHint && HintThreshold > thres && !Caller->hasFnAttribute(Attribute::MinSize)) thres = HintThreshold; // Listen to the cold attribute when it would decrease the threshold. bool ColdCallee = Callee && !Callee->isDeclaration() && Callee->hasFnAttribute(Attribute::Cold); // Command line argument for InlineLimit will override the default // ColdThreshold. If we have -inline-threshold but no -inlinecold-threshold, // do not use the default cold threshold even if it is smaller. if ((InlineLimit.getNumOccurrences() == 0 || ColdThreshold.getNumOccurrences() > 0) && ColdCallee && ColdThreshold < thres) thres = ColdThreshold; return thres; } static void emitAnalysis(CallSite CS, const Twine &Msg) { Function *Caller = CS.getCaller(); LLVMContext &Ctx = Caller->getContext(); DebugLoc DLoc = CS.getInstruction()->getDebugLoc(); emitOptimizationRemarkAnalysis(Ctx, DEBUG_TYPE, *Caller, DLoc, Msg); } /// Return true if the inliner should attempt to inline at the given CallSite. bool Inliner::shouldInline(CallSite CS) { InlineCost IC = getInlineCost(CS); if (IC.isAlways()) { DEBUG(dbgs() << " Inlining: cost=always" << ", Call: " << *CS.getInstruction() << "\n"); emitAnalysis(CS, Twine(CS.getCalledFunction()->getName()) + " should always be inlined (cost=always)"); return true; } if (IC.isNever()) { DEBUG(dbgs() << " NOT Inlining: cost=never" << ", Call: " << *CS.getInstruction() << "\n"); emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() + " should never be inlined (cost=never)")); return false; } Function *Caller = CS.getCaller(); if (!IC) { DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost() << ", thres=" << (IC.getCostDelta() + IC.getCost()) << ", Call: " << *CS.getInstruction() << "\n"); emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() + " too costly to inline (cost=") + Twine(IC.getCost()) + ", threshold=" + Twine(IC.getCostDelta() + IC.getCost()) + ")"); return false; } // Try to detect the case where the current inlining candidate caller (call // it B) is a static or linkonce-ODR function and is an inlining candidate // elsewhere, and the current candidate callee (call it C) is large enough // that inlining it into B would make B too big to inline later. In these // circumstances it may be best not to inline C into B, but to inline B into // its callers. // // This only applies to static and linkonce-ODR functions because those are // expected to be available for inlining in the translation units where they // are used. Thus we will always have the opportunity to make local inlining // decisions. Importantly the linkonce-ODR linkage covers inline functions // and templates in C++. // // FIXME: All of this logic should be sunk into getInlineCost. It relies on // the internal implementation of the inline cost metrics rather than // treating them as truly abstract units etc. if (Caller->hasLocalLinkage() || Caller->hasLinkOnceODRLinkage()) { int TotalSecondaryCost = 0; // The candidate cost to be imposed upon the current function. int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1); // This bool tracks what happens if we do NOT inline C into B. bool callerWillBeRemoved = Caller->hasLocalLinkage(); // This bool tracks what happens if we DO inline C into B. bool inliningPreventsSomeOuterInline = false; for (User *U : Caller->users()) { CallSite CS2(U); // If this isn't a call to Caller (it could be some other sort // of reference) skip it. Such references will prevent the caller // from being removed. if (!CS2 || CS2.getCalledFunction() != Caller) { callerWillBeRemoved = false; continue; } InlineCost IC2 = getInlineCost(CS2); ++NumCallerCallersAnalyzed; if (!IC2) { callerWillBeRemoved = false; continue; } if (IC2.isAlways()) continue; // See if inlining or original callsite would erase the cost delta of // this callsite. We subtract off the penalty for the call instruction, // which we would be deleting. if (IC2.getCostDelta() <= CandidateCost) { inliningPreventsSomeOuterInline = true; TotalSecondaryCost += IC2.getCost(); } } // If all outer calls to Caller would get inlined, the cost for the last // one is set very low by getInlineCost, in anticipation that Caller will // be removed entirely. We did not account for this above unless there // is only one caller of Caller. if (callerWillBeRemoved && !Caller->use_empty()) TotalSecondaryCost += InlineConstants::LastCallToStaticBonus; if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) { DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() << " Cost = " << IC.getCost() << ", outer Cost = " << TotalSecondaryCost << '\n'); emitAnalysis( CS, Twine("Not inlining. Cost of inlining " + CS.getCalledFunction()->getName() + " increases the cost of inlining " + CS.getCaller()->getName() + " in other contexts")); return false; } } DEBUG(dbgs() << " Inlining: cost=" << IC.getCost() << ", thres=" << (IC.getCostDelta() + IC.getCost()) << ", Call: " << *CS.getInstruction() << '\n'); emitAnalysis( CS, CS.getCalledFunction()->getName() + Twine(" can be inlined into ") + CS.getCaller()->getName() + " with cost=" + Twine(IC.getCost()) + " (threshold=" + Twine(IC.getCostDelta() + IC.getCost()) + ")"); return true; } /// Return true if the specified inline history ID /// indicates an inline history that includes the specified function. static bool InlineHistoryIncludes(Function *F, int InlineHistoryID, const SmallVectorImpl<std::pair<Function*, int> > &InlineHistory) { while (InlineHistoryID != -1) { assert(unsigned(InlineHistoryID) < InlineHistory.size() && "Invalid inline history ID"); if (InlineHistory[InlineHistoryID].first == F) return true; InlineHistoryID = InlineHistory[InlineHistoryID].second; } return false; } bool Inliner::runOnSCC(CallGraphSCC &SCC) { CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>(); auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); const TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr; AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); SmallPtrSet<Function*, 8> SCCFunctions; DEBUG(dbgs() << "Inliner visiting SCC:"); for (CallGraphNode *Node : SCC) { Function *F = Node->getFunction(); if (F) SCCFunctions.insert(F); DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); } // Scan through and identify all call sites ahead of time so that we only // inline call sites in the original functions, not call sites that result // from inlining other functions. SmallVector<std::pair<CallSite, int>, 16> CallSites; // When inlining a callee produces new call sites, we want to keep track of // the fact that they were inlined from the callee. This allows us to avoid // infinite inlining in some obscure cases. To represent this, we use an // index into the InlineHistory vector. SmallVector<std::pair<Function*, int>, 8> InlineHistory; for (CallGraphNode *Node : SCC) { Function *F = Node->getFunction(); if (!F) continue; for (BasicBlock &BB : *F) for (Instruction &I : BB) { CallSite CS(cast<Value>(&I)); // If this isn't a call, or it is a call to an intrinsic, it can // never be inlined. if (!CS || isa<IntrinsicInst>(I)) continue; // If this is a direct call to an external function, we can never inline // it. If it is an indirect call, inlining may resolve it to be a // direct call, so we keep it. if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration()) continue; CallSites.push_back(std::make_pair(CS, -1)); } } DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); // If there are no calls in this function, exit early. if (CallSites.empty()) return false; // Now that we have all of the call sites, move the ones to functions in the // current SCC to the end of the list. unsigned FirstCallInSCC = CallSites.size(); for (unsigned i = 0; i < FirstCallInSCC; ++i) if (Function *F = CallSites[i].first.getCalledFunction()) if (SCCFunctions.count(F)) std::swap(CallSites[i--], CallSites[--FirstCallInSCC]); InlinedArrayAllocasTy InlinedArrayAllocas; InlineFunctionInfo InlineInfo(&CG, AA, ACT); // Now that we have all of the call sites, loop over them and inline them if // it looks profitable to do so. bool Changed = false; bool LocalChange; do { LocalChange = false; // Iterate over the outer loop because inlining functions can cause indirect // calls to become direct calls. // CallSites may be modified inside so ranged for loop can not be used. for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { CallSite CS = CallSites[CSi].first; Function *Caller = CS.getCaller(); Function *Callee = CS.getCalledFunction(); // If this call site is dead and it is to a readonly function, we should // just delete the call instead of trying to inline it, regardless of // size. This happens because IPSCCP propagates the result out of the // call and then we're left with the dead call. if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) { DEBUG(dbgs() << " -> Deleting dead call: " << *CS.getInstruction() << "\n"); // Update the call graph by deleting the edge from Callee to Caller. CG[Caller]->removeCallEdgeFor(CS); CS.getInstruction()->eraseFromParent(); ++NumCallsDeleted; } else { // We can only inline direct calls to non-declarations. if (!Callee || Callee->isDeclaration()) continue; // If this call site was obtained by inlining another function, verify // that the include path for the function did not include the callee // itself. If so, we'd be recursively inlining the same function, // which would provide the same callsites, which would cause us to // infinitely inline. int InlineHistoryID = CallSites[CSi].second; if (InlineHistoryID != -1 && InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) continue; LLVMContext &CallerCtx = Caller->getContext(); // Get DebugLoc to report. CS will be invalid after Inliner. DebugLoc DLoc = CS.getInstruction()->getDebugLoc(); // If the policy determines that we should inline this function, // try to do so. if (!shouldInline(CS)) { emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc, Twine(Callee->getName() + " will not be inlined into " + Caller->getName())); continue; } // Attempt to inline the function. if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas, InlineHistoryID, InsertLifetime)) { emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc, Twine(Callee->getName() + " will not be inlined into " + Caller->getName())); continue; } ++NumInlined; // Report the inline decision. emitOptimizationRemark( CallerCtx, DEBUG_TYPE, *Caller, DLoc, Twine(Callee->getName() + " inlined into " + Caller->getName())); // If inlining this function gave us any new call sites, throw them // onto our worklist to process. They are useful inline candidates. if (!InlineInfo.InlinedCalls.empty()) { // Create a new inline history entry for this, so that we remember // that these new callsites came about due to inlining Callee. int NewHistoryID = InlineHistory.size(); InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); for (Value *Ptr : InlineInfo.InlinedCalls) CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID)); } } // If we inlined or deleted the last possible call site to the function, // delete the function body now. if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && // TODO: Can remove if in SCC now. !SCCFunctions.count(Callee) && // The function may be apparently dead, but if there are indirect // callgraph references to the node, we cannot delete it yet, this // could invalidate the CGSCC iterator. CG[Callee]->getNumReferences() == 0) { DEBUG(dbgs() << " -> Deleting dead function: " << Callee->getName() << "\n"); CallGraphNode *CalleeNode = CG[Callee]; // Remove any call graph edges from the callee to its callees. CalleeNode->removeAllCalledFunctions(); // Removing the node for callee from the call graph and delete it. delete CG.removeFunctionFromModule(CalleeNode); ++NumDeleted; } // Remove this call site from the list. If possible, use // swap/pop_back for efficiency, but do not use it if doing so would // move a call site to a function in this SCC before the // 'FirstCallInSCC' barrier. if (SCC.isSingular()) { CallSites[CSi] = CallSites.back(); CallSites.pop_back(); } else { CallSites.erase(CallSites.begin()+CSi); } --CSi; Changed = true; LocalChange = true; } } while (LocalChange); return Changed; } /// Remove now-dead linkonce functions at the end of /// processing to avoid breaking the SCC traversal. bool Inliner::doFinalization(CallGraph &CG) { return removeDeadFunctions(CG); } /// Remove dead functions that are not included in DNR (Do Not Remove) list. bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) { SmallVector<CallGraphNode*, 16> FunctionsToRemove; SmallVector<CallGraphNode *, 16> DeadFunctionsInComdats; SmallDenseMap<const Comdat *, int, 16> ComdatEntriesAlive; auto RemoveCGN = [&](CallGraphNode *CGN) { // Remove any call graph edges from the function to its callees. CGN->removeAllCalledFunctions(); // Remove any edges from the external node to the function's call graph // node. These edges might have been made irrelegant due to // optimization of the program. CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); // Removing the node for callee from the call graph and delete it. FunctionsToRemove.push_back(CGN); }; // Scan for all of the functions, looking for ones that should now be removed // from the program. Insert the dead ones in the FunctionsToRemove set. for (const auto &I : CG) { CallGraphNode *CGN = I.second.get(); Function *F = CGN->getFunction(); if (!F || F->isDeclaration()) continue; // Handle the case when this function is called and we only want to care // about always-inline functions. This is a bit of a hack to share code // between here and the InlineAlways pass. if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) continue; // If the only remaining users of the function are dead constants, remove // them. F->removeDeadConstantUsers(); if (!F->isDefTriviallyDead()) continue; // It is unsafe to drop a function with discardable linkage from a COMDAT // without also dropping the other members of the COMDAT. // The inliner doesn't visit non-function entities which are in COMDAT // groups so it is unsafe to do so *unless* the linkage is local. if (!F->hasLocalLinkage()) { if (const Comdat *C = F->getComdat()) { --ComdatEntriesAlive[C]; DeadFunctionsInComdats.push_back(CGN); continue; } } RemoveCGN(CGN); } if (!DeadFunctionsInComdats.empty()) { // Count up all the entities in COMDAT groups auto ComdatGroupReferenced = [&](const Comdat *C) { auto I = ComdatEntriesAlive.find(C); if (I != ComdatEntriesAlive.end()) ++(I->getSecond()); }; for (const Function &F : CG.getModule()) if (const Comdat *C = F.getComdat()) ComdatGroupReferenced(C); for (const GlobalVariable &GV : CG.getModule().globals()) if (const Comdat *C = GV.getComdat()) ComdatGroupReferenced(C); for (const GlobalAlias &GA : CG.getModule().aliases()) if (const Comdat *C = GA.getComdat()) ComdatGroupReferenced(C); for (CallGraphNode *CGN : DeadFunctionsInComdats) { Function *F = CGN->getFunction(); const Comdat *C = F->getComdat(); int NumAlive = ComdatEntriesAlive[C]; // We can remove functions in a COMDAT group if the entire group is dead. assert(NumAlive >= 0); if (NumAlive > 0) continue; RemoveCGN(CGN); } } if (FunctionsToRemove.empty()) return false; // Now that we know which functions to delete, do so. We didn't want to do // this inline, because that would invalidate our CallGraph::iterator // objects. :( // // Note that it doesn't matter that we are iterating over a non-stable order // here to do this, it doesn't matter which order the functions are deleted // in. array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), FunctionsToRemove.end()); for (CallGraphNode *CGN : FunctionsToRemove) { delete CG.removeFunctionFromModule(CGN); ++NumDeleted; } return true; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/ConstantMerge.cpp
//===- ConstantMerge.cpp - Merge duplicate global constants ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the interface to a pass that merges duplicate global // constants together into a single constant that is shared. This is useful // because some passes (ie TraceValues) insert a lot of string constants into // the program, regardless of whether or not an existing string is available. // // Algorithm: ConstantMerge is designed to build up a map of available constants // and eliminate duplicates when it is initialized. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" using namespace llvm; #define DEBUG_TYPE "constmerge" STATISTIC(NumMerged, "Number of global constants merged"); namespace { struct ConstantMerge : public ModulePass { static char ID; // Pass identification, replacement for typeid ConstantMerge() : ModulePass(ID) { initializeConstantMergePass(*PassRegistry::getPassRegistry()); } // For this pass, process all of the globals in the module, eliminating // duplicate constants. bool runOnModule(Module &M) override; // Return true iff we can determine the alignment of this global variable. bool hasKnownAlignment(GlobalVariable *GV) const; // Return the alignment of the global, including converting the default // alignment to a concrete value. unsigned getAlignment(GlobalVariable *GV) const; }; } char ConstantMerge::ID = 0; INITIALIZE_PASS(ConstantMerge, "constmerge", "Merge Duplicate Global Constants", false, false) ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); } /// Find values that are marked as llvm.used. static void FindUsedValues(GlobalVariable *LLVMUsed, SmallPtrSetImpl<const GlobalValue*> &UsedValues) { if (!LLVMUsed) return; ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer()); for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) { Value *Operand = Inits->getOperand(i)->stripPointerCastsNoFollowAliases(); GlobalValue *GV = cast<GlobalValue>(Operand); UsedValues.insert(GV); } } // True if A is better than B. static bool IsBetterCanonical(const GlobalVariable &A, const GlobalVariable &B) { if (!A.hasLocalLinkage() && B.hasLocalLinkage()) return true; if (A.hasLocalLinkage() && !B.hasLocalLinkage()) return false; return A.hasUnnamedAddr(); } unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const { unsigned Align = GV->getAlignment(); if (Align) return Align; return GV->getParent()->getDataLayout().getPreferredAlignment(GV); } bool ConstantMerge::runOnModule(Module &M) { // Find all the globals that are marked "used". These cannot be merged. SmallPtrSet<const GlobalValue*, 8> UsedGlobals; FindUsedValues(M.getGlobalVariable("llvm.used"), UsedGlobals); FindUsedValues(M.getGlobalVariable("llvm.compiler.used"), UsedGlobals); // Map unique constants to globals. DenseMap<Constant *, GlobalVariable *> CMap; // Replacements - This vector contains a list of replacements to perform. SmallVector<std::pair<GlobalVariable*, GlobalVariable*>, 32> Replacements; bool MadeChange = false; // Iterate constant merging while we are still making progress. Merging two // constants together may allow us to merge other constants together if the // second level constants have initializers which point to the globals that // were just merged. while (1) { // First: Find the canonical constants others will be merged with. for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); GVI != E; ) { GlobalVariable *GV = GVI++; // If this GV is dead, remove it. GV->removeDeadConstantUsers(); if (GV->use_empty() && GV->hasLocalLinkage()) { GV->eraseFromParent(); continue; } // Only process constants with initializers in the default address space. if (!GV->isConstant() || !GV->hasDefinitiveInitializer() || GV->getType()->getAddressSpace() != 0 || GV->hasSection() || // Don't touch values marked with attribute(used). UsedGlobals.count(GV)) continue; // This transformation is legal for weak ODR globals in the sense it // doesn't change semantics, but we really don't want to perform it // anyway; it's likely to pessimize code generation, and some tools // (like the Darwin linker in cases involving CFString) don't expect it. if (GV->isWeakForLinker()) continue; Constant *Init = GV->getInitializer(); // Check to see if the initializer is already known. GlobalVariable *&Slot = CMap[Init]; // If this is the first constant we find or if the old one is local, // replace with the current one. If the current is externally visible // it cannot be replace, but can be the canonical constant we merge with. if (!Slot || IsBetterCanonical(*GV, *Slot)) Slot = GV; } // Second: identify all globals that can be merged together, filling in // the Replacements vector. We cannot do the replacement in this pass // because doing so may cause initializers of other globals to be rewritten, // invalidating the Constant* pointers in CMap. for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); GVI != E; ) { GlobalVariable *GV = GVI++; // Only process constants with initializers in the default address space. if (!GV->isConstant() || !GV->hasDefinitiveInitializer() || GV->getType()->getAddressSpace() != 0 || GV->hasSection() || // Don't touch values marked with attribute(used). UsedGlobals.count(GV)) continue; // We can only replace constant with local linkage. if (!GV->hasLocalLinkage()) continue; Constant *Init = GV->getInitializer(); // Check to see if the initializer is already known. GlobalVariable *Slot = CMap[Init]; if (!Slot || Slot == GV) continue; if (!Slot->hasUnnamedAddr() && !GV->hasUnnamedAddr()) continue; if (!GV->hasUnnamedAddr()) Slot->setUnnamedAddr(false); // Make all uses of the duplicate constant use the canonical version. Replacements.push_back(std::make_pair(GV, Slot)); } if (Replacements.empty()) return MadeChange; CMap.clear(); // Now that we have figured out which replacements must be made, do them all // now. This avoid invalidating the pointers in CMap, which are unneeded // now. for (unsigned i = 0, e = Replacements.size(); i != e; ++i) { // Bump the alignment if necessary. if (Replacements[i].first->getAlignment() || Replacements[i].second->getAlignment()) { Replacements[i].second->setAlignment( std::max(getAlignment(Replacements[i].first), getAlignment(Replacements[i].second))); } // Eliminate any uses of the dead global. Replacements[i].first->replaceAllUsesWith(Replacements[i].second); // Delete the global value from the module. assert(Replacements[i].first->hasLocalLinkage() && "Refusing to delete an externally visible global variable."); Replacements[i].first->eraseFromParent(); } NumMerged += Replacements.size(); Replacements.clear(); } }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/ElimAvailExtern.cpp
//===-- ElimAvailExtern.cpp - DCE unreachable internal functions ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This transform is designed to eliminate available external global // definitions from the program, turning them into declarations. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/Statistic.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Transforms/Utils/CtorUtils.h" #include "llvm/Transforms/Utils/GlobalStatus.h" #include "llvm/Pass.h" using namespace llvm; #define DEBUG_TYPE "elim-avail-extern" STATISTIC(NumFunctions, "Number of functions removed"); STATISTIC(NumVariables, "Number of global variables removed"); namespace { struct EliminateAvailableExternally : public ModulePass { static char ID; // Pass identification, replacement for typeid EliminateAvailableExternally() : ModulePass(ID) { initializeEliminateAvailableExternallyPass( *PassRegistry::getPassRegistry()); } // run - Do the EliminateAvailableExternally pass on the specified module, // optionally updating the specified callgraph to reflect the changes. // bool runOnModule(Module &M) override; }; } char EliminateAvailableExternally::ID = 0; INITIALIZE_PASS(EliminateAvailableExternally, "elim-avail-extern", "Eliminate Available Externally Globals", false, false) ModulePass *llvm::createEliminateAvailableExternallyPass() { return new EliminateAvailableExternally(); } bool EliminateAvailableExternally::runOnModule(Module &M) { bool Changed = false; // Drop initializers of available externally global variables. for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { if (!I->hasAvailableExternallyLinkage()) continue; if (I->hasInitializer()) { Constant *Init = I->getInitializer(); I->setInitializer(nullptr); if (isSafeToDestroyConstant(Init)) Init->destroyConstant(); } I->removeDeadConstantUsers(); I->setLinkage(GlobalValue::ExternalLinkage); NumVariables++; } // Drop the bodies of available externally functions. for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { if (!I->hasAvailableExternallyLinkage()) continue; if (!I->isDeclaration()) // This will set the linkage to external I->deleteBody(); I->removeDeadConstantUsers(); NumFunctions++; } return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/IPConstantPropagation.cpp
//===-- IPConstantPropagation.cpp - Propagate constants through calls -----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass implements an _extremely_ simple interprocedural constant // propagation pass. It could certainly be improved in many different ways, // like using a worklist. This pass makes arguments dead, but does not remove // them. The existing dead argument elimination pass should be run after this // to clean up the mess. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" using namespace llvm; #define DEBUG_TYPE "ipconstprop" STATISTIC(NumArgumentsProped, "Number of args turned into constants"); STATISTIC(NumReturnValProped, "Number of return values turned into constants"); namespace { /// IPCP - The interprocedural constant propagation pass /// struct IPCP : public ModulePass { static char ID; // Pass identification, replacement for typeid IPCP() : ModulePass(ID) { initializeIPCPPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; private: bool PropagateConstantsIntoArguments(Function &F); bool PropagateConstantReturn(Function &F); }; } char IPCP::ID = 0; INITIALIZE_PASS(IPCP, "ipconstprop", "Interprocedural constant propagation", false, false) ModulePass *llvm::createIPConstantPropagationPass() { return new IPCP(); } bool IPCP::runOnModule(Module &M) { bool Changed = false; bool LocalChange = true; // FIXME: instead of using smart algorithms, we just iterate until we stop // making changes. while (LocalChange) { LocalChange = false; for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) if (!I->isDeclaration()) { // Delete any klingons. I->removeDeadConstantUsers(); if (I->hasLocalLinkage()) LocalChange |= PropagateConstantsIntoArguments(*I); Changed |= PropagateConstantReturn(*I); } Changed |= LocalChange; } return Changed; } /// PropagateConstantsIntoArguments - Look at all uses of the specified /// function. If all uses are direct call sites, and all pass a particular /// constant in for an argument, propagate that constant in as the argument. /// bool IPCP::PropagateConstantsIntoArguments(Function &F) { if (F.arg_empty() || F.use_empty()) return false; // No arguments? Early exit. // For each argument, keep track of its constant value and whether it is a // constant or not. The bool is driven to true when found to be non-constant. SmallVector<std::pair<Constant*, bool>, 16> ArgumentConstants; ArgumentConstants.resize(F.arg_size()); unsigned NumNonconstant = 0; for (Use &U : F.uses()) { User *UR = U.getUser(); // Ignore blockaddress uses. if (isa<BlockAddress>(UR)) continue; // Used by a non-instruction, or not the callee of a function, do not // transform. if (!isa<CallInst>(UR) && !isa<InvokeInst>(UR)) return false; CallSite CS(cast<Instruction>(UR)); if (!CS.isCallee(&U)) return false; // Check out all of the potentially constant arguments. Note that we don't // inspect varargs here. CallSite::arg_iterator AI = CS.arg_begin(); Function::arg_iterator Arg = F.arg_begin(); for (unsigned i = 0, e = ArgumentConstants.size(); i != e; ++i, ++AI, ++Arg) { // If this argument is known non-constant, ignore it. if (ArgumentConstants[i].second) continue; Constant *C = dyn_cast<Constant>(*AI); if (C && ArgumentConstants[i].first == nullptr) { ArgumentConstants[i].first = C; // First constant seen. } else if (C && ArgumentConstants[i].first == C) { // Still the constant value we think it is. } else if (*AI == &*Arg) { // Ignore recursive calls passing argument down. } else { // Argument became non-constant. If all arguments are non-constant now, // give up on this function. if (++NumNonconstant == ArgumentConstants.size()) return false; ArgumentConstants[i].second = true; } } } // If we got to this point, there is a constant argument! assert(NumNonconstant != ArgumentConstants.size()); bool MadeChange = false; Function::arg_iterator AI = F.arg_begin(); for (unsigned i = 0, e = ArgumentConstants.size(); i != e; ++i, ++AI) { // Do we have a constant argument? if (ArgumentConstants[i].second || AI->use_empty() || AI->hasInAllocaAttr() || (AI->hasByValAttr() && !F.onlyReadsMemory())) continue; Value *V = ArgumentConstants[i].first; if (!V) V = UndefValue::get(AI->getType()); AI->replaceAllUsesWith(V); ++NumArgumentsProped; MadeChange = true; } return MadeChange; } // Check to see if this function returns one or more constants. If so, replace // all callers that use those return values with the constant value. This will // leave in the actual return values and instructions, but deadargelim will // clean that up. // // Additionally if a function always returns one of its arguments directly, // callers will be updated to use the value they pass in directly instead of // using the return value. bool IPCP::PropagateConstantReturn(Function &F) { if (F.getReturnType()->isVoidTy()) return false; // No return value. // If this function could be overridden later in the link stage, we can't // propagate information about its results into callers. if (F.mayBeOverridden()) return false; // Check to see if this function returns a constant. SmallVector<Value *,4> RetVals; StructType *STy = dyn_cast<StructType>(F.getReturnType()); if (STy) for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i) RetVals.push_back(UndefValue::get(STy->getElementType(i))); else RetVals.push_back(UndefValue::get(F.getReturnType())); unsigned NumNonConstant = 0; for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) { for (unsigned i = 0, e = RetVals.size(); i != e; ++i) { // Already found conflicting return values? Value *RV = RetVals[i]; if (!RV) continue; // Find the returned value Value *V; if (!STy) V = RI->getOperand(0); else V = FindInsertedValue(RI->getOperand(0), i); if (V) { // Ignore undefs, we can change them into anything if (isa<UndefValue>(V)) continue; // Try to see if all the rets return the same constant or argument. if (isa<Constant>(V) || isa<Argument>(V)) { if (isa<UndefValue>(RV)) { // No value found yet? Try the current one. RetVals[i] = V; continue; } // Returning the same value? Good. if (RV == V) continue; } } // Different or no known return value? Don't propagate this return // value. RetVals[i] = nullptr; // All values non-constant? Stop looking. if (++NumNonConstant == RetVals.size()) return false; } } // If we got here, the function returns at least one constant value. Loop // over all users, replacing any uses of the return value with the returned // constant. bool MadeChange = false; for (Use &U : F.uses()) { CallSite CS(U.getUser()); Instruction* Call = CS.getInstruction(); // Not a call instruction or a call instruction that's not calling F // directly? if (!Call || !CS.isCallee(&U)) continue; // Call result not used? if (Call->use_empty()) continue; MadeChange = true; if (!STy) { Value* New = RetVals[0]; if (Argument *A = dyn_cast<Argument>(New)) // Was an argument returned? Then find the corresponding argument in // the call instruction and use that. New = CS.getArgument(A->getArgNo()); Call->replaceAllUsesWith(New); continue; } for (auto I = Call->user_begin(), E = Call->user_end(); I != E;) { Instruction *Ins = cast<Instruction>(*I); // Increment now, so we can remove the use ++I; // Find the index of the retval to replace with int index = -1; if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Ins)) if (EV->hasIndices()) index = *EV->idx_begin(); // If this use uses a specific return value, and we have a replacement, // replace it. if (index != -1) { Value *New = RetVals[index]; if (New) { if (Argument *A = dyn_cast<Argument>(New)) // Was an argument returned? Then find the corresponding argument in // the call instruction and use that. New = CS.getArgument(A->getArgNo()); Ins->replaceAllUsesWith(New); Ins->eraseFromParent(); } } } } if (MadeChange) ++NumReturnValProped; return MadeChange; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/Internalize.cpp
//===-- Internalize.cpp - Mark functions internal -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass loops over all of the functions and variables in the input module. // If the function or variable is not in the list of external names given to // the pass it is marked as internal. // // This transformation would not be legal in a regular compilation, but it gets // extra information from the linker about what is safe. // // For example: Internalizing a function with external linkage. Only if we are // told it is only used from within this module, it is safe to do it. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/GlobalStatus.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include <fstream> #include <set> using namespace llvm; #define DEBUG_TYPE "internalize" STATISTIC(NumAliases , "Number of aliases internalized"); STATISTIC(NumFunctions, "Number of functions internalized"); STATISTIC(NumGlobals , "Number of global vars internalized"); // APIFile - A file which contains a list of symbols that should not be marked // external. static cl::opt<std::string> APIFile("internalize-public-api-file", cl::value_desc("filename"), cl::desc("A file containing list of symbol names to preserve")); // APIList - A list of symbols that should not be marked internal. static cl::list<std::string> APIList("internalize-public-api-list", cl::value_desc("list"), cl::desc("A list of symbol names to preserve"), cl::CommaSeparated); namespace { class InternalizePass : public ModulePass { std::set<std::string> ExternalNames; public: static char ID; // Pass identification, replacement for typeid explicit InternalizePass(); explicit InternalizePass(ArrayRef<const char *> ExportList); void LoadFile(const char *Filename); bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); AU.addPreserved<CallGraphWrapperPass>(); } }; } // end anonymous namespace char InternalizePass::ID = 0; INITIALIZE_PASS(InternalizePass, "internalize", "Internalize Global Symbols", false, false) InternalizePass::InternalizePass() : ModulePass(ID) { initializeInternalizePassPass(*PassRegistry::getPassRegistry()); if (!APIFile.empty()) // If a filename is specified, use it. LoadFile(APIFile.c_str()); ExternalNames.insert(APIList.begin(), APIList.end()); } InternalizePass::InternalizePass(ArrayRef<const char *> ExportList) : ModulePass(ID) { initializeInternalizePassPass(*PassRegistry::getPassRegistry()); for(ArrayRef<const char *>::const_iterator itr = ExportList.begin(); itr != ExportList.end(); itr++) { ExternalNames.insert(*itr); } } void InternalizePass::LoadFile(const char *Filename) { // Load the APIFile... std::ifstream In(Filename); if (!In.good()) { errs() << "WARNING: Internalize couldn't load file '" << Filename << "'! Continuing as if it's empty.\n"; return; // Just continue as if the file were empty } while (In) { std::string Symbol; In >> Symbol; if (!Symbol.empty()) ExternalNames.insert(Symbol); } } static bool shouldInternalize(const GlobalValue &GV, const std::set<std::string> &ExternalNames) { // Function must be defined here if (GV.isDeclaration()) return false; // Available externally is really just a "declaration with a body". if (GV.hasAvailableExternallyLinkage()) return false; // Assume that dllexported symbols are referenced elsewhere if (GV.hasDLLExportStorageClass()) return false; // Already has internal linkage if (GV.hasLocalLinkage()) return false; // Marked to keep external? if (ExternalNames.count(GV.getName())) return false; return true; } bool InternalizePass::runOnModule(Module &M) { CallGraphWrapperPass *CGPass = getAnalysisIfAvailable<CallGraphWrapperPass>(); CallGraph *CG = CGPass ? &CGPass->getCallGraph() : nullptr; CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : nullptr; bool Changed = false; SmallPtrSet<GlobalValue *, 8> Used; collectUsedGlobalVariables(M, Used, false); // We must assume that globals in llvm.used have a reference that not even // the linker can see, so we don't internalize them. // For llvm.compiler.used the situation is a bit fuzzy. The assembler and // linker can drop those symbols. If this pass is running as part of LTO, // one might think that it could just drop llvm.compiler.used. The problem // is that even in LTO llvm doesn't see every reference. For example, // we don't see references from function local inline assembly. To be // conservative, we internalize symbols in llvm.compiler.used, but we // keep llvm.compiler.used so that the symbol is not deleted by llvm. for (GlobalValue *V : Used) { ExternalNames.insert(V->getName()); } // Mark all functions not in the api as internal. for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { if (!shouldInternalize(*I, ExternalNames)) continue; I->setVisibility(GlobalValue::DefaultVisibility); I->setLinkage(GlobalValue::InternalLinkage); if (ExternalNode) // Remove a callgraph edge from the external node to this function. ExternalNode->removeOneAbstractEdgeTo((*CG)[I]); Changed = true; ++NumFunctions; DEBUG(dbgs() << "Internalizing func " << I->getName() << "\n"); } // Never internalize the llvm.used symbol. It is used to implement // attribute((used)). // FIXME: Shouldn't this just filter on llvm.metadata section?? ExternalNames.insert("llvm.used"); ExternalNames.insert("llvm.compiler.used"); // Never internalize anchors used by the machine module info, else the info // won't find them. (see MachineModuleInfo.) ExternalNames.insert("llvm.global_ctors"); ExternalNames.insert("llvm.global_dtors"); ExternalNames.insert("llvm.global.annotations"); // Never internalize symbols code-gen inserts. // FIXME: We should probably add this (and the __stack_chk_guard) via some // type of call-back in CodeGen. ExternalNames.insert("__stack_chk_fail"); ExternalNames.insert("__stack_chk_guard"); // Mark all global variables with initializers that are not in the api as // internal as well. for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { if (!shouldInternalize(*I, ExternalNames)) continue; I->setVisibility(GlobalValue::DefaultVisibility); I->setLinkage(GlobalValue::InternalLinkage); Changed = true; ++NumGlobals; DEBUG(dbgs() << "Internalized gvar " << I->getName() << "\n"); } // Mark all aliases that are not in the api as internal as well. for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); I != E; ++I) { if (!shouldInternalize(*I, ExternalNames)) continue; I->setVisibility(GlobalValue::DefaultVisibility); I->setLinkage(GlobalValue::InternalLinkage); Changed = true; ++NumAliases; DEBUG(dbgs() << "Internalized alias " << I->getName() << "\n"); } return Changed; } ModulePass *llvm::createInternalizePass() { return new InternalizePass(); } ModulePass *llvm::createInternalizePass(ArrayRef<const char *> ExportList) { return new InternalizePass(ExportList); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/LLVMBuild.txt
;===- ./lib/Transforms/IPO/LLVMBuild.txt -----------------------*- Conf -*--===; ; ; The LLVM Compiler Infrastructure ; ; This file is distributed under the University of Illinois Open Source ; License. See LICENSE.TXT for details. ; ;===------------------------------------------------------------------------===; ; ; This is an LLVMBuild description file for the components in this subdirectory. ; ; For more information on the LLVMBuild system, please see: ; ; http://llvm.org/docs/LLVMBuild.html ; ;===------------------------------------------------------------------------===; [component_0] type = Library name = IPO parent = Transforms library_name = ipo required_libraries = Analysis Core IPA InstCombine Scalar Support TransformUtils Vectorize
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/PruneEH.cpp
//===- PruneEH.cpp - Pass which deletes unused exception handlers ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a simple interprocedural pass which walks the // call-graph, turning invoke instructions into calls, iff the callee cannot // throw an exception, and marking functions 'nounwind' if they cannot throw. // It implements this as a bottom-up traversal of the call-graph. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraphSCCPass.h" #include "llvm/Analysis/LibCallSemantics.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include <algorithm> using namespace llvm; #define DEBUG_TYPE "prune-eh" STATISTIC(NumRemoved, "Number of invokes removed"); STATISTIC(NumUnreach, "Number of noreturn calls optimized"); namespace { struct PruneEH : public CallGraphSCCPass { static char ID; // Pass identification, replacement for typeid PruneEH() : CallGraphSCCPass(ID) { initializePruneEHPass(*PassRegistry::getPassRegistry()); } // runOnSCC - Analyze the SCC, performing the transformation if possible. bool runOnSCC(CallGraphSCC &SCC) override; bool SimplifyFunction(Function *F); void DeleteBasicBlock(BasicBlock *BB); }; } char PruneEH::ID = 0; INITIALIZE_PASS_BEGIN(PruneEH, "prune-eh", "Remove unused exception handling info", false, false) INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) INITIALIZE_PASS_END(PruneEH, "prune-eh", "Remove unused exception handling info", false, false) Pass *llvm::createPruneEHPass() { return new PruneEH(); } bool PruneEH::runOnSCC(CallGraphSCC &SCC) { SmallPtrSet<CallGraphNode *, 8> SCCNodes; CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); bool MadeChange = false; // Fill SCCNodes with the elements of the SCC. Used for quickly // looking up whether a given CallGraphNode is in this SCC. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) SCCNodes.insert(*I); // First pass, scan all of the functions in the SCC, simplifying them // according to what we know. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) if (Function *F = (*I)->getFunction()) MadeChange |= SimplifyFunction(F); // Next, check to see if any callees might throw or if there are any external // functions in this SCC: if so, we cannot prune any functions in this SCC. // Definitions that are weak and not declared non-throwing might be // overridden at linktime with something that throws, so assume that. // If this SCC includes the unwind instruction, we KNOW it throws, so // obviously the SCC might throw. // bool SCCMightUnwind = false, SCCMightReturn = false; for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); (!SCCMightUnwind || !SCCMightReturn) && I != E; ++I) { Function *F = (*I)->getFunction(); if (!F) { SCCMightUnwind = true; SCCMightReturn = true; } else if (F->isDeclaration() || F->mayBeOverridden()) { SCCMightUnwind |= !F->doesNotThrow(); SCCMightReturn |= !F->doesNotReturn(); } else { bool CheckUnwind = !SCCMightUnwind && !F->doesNotThrow(); bool CheckReturn = !SCCMightReturn && !F->doesNotReturn(); // Determine if we should scan for InlineAsm in a naked function as it // is the only way to return without a ReturnInst. Only do this for // no-inline functions as functions which may be inlined cannot // meaningfully return via assembly. bool CheckReturnViaAsm = CheckReturn && F->hasFnAttribute(Attribute::Naked) && F->hasFnAttribute(Attribute::NoInline); if (!CheckUnwind && !CheckReturn) continue; for (const BasicBlock &BB : *F) { const TerminatorInst *TI = BB.getTerminator(); if (CheckUnwind && TI->mayThrow()) { SCCMightUnwind = true; } else if (CheckReturn && isa<ReturnInst>(TI)) { SCCMightReturn = true; } for (const Instruction &I : BB) { if ((!CheckUnwind || SCCMightUnwind) && (!CheckReturnViaAsm || SCCMightReturn)) break; // Check to see if this function performs an unwind or calls an // unwinding function. if (CheckUnwind && !SCCMightUnwind && I.mayThrow()) { bool InstMightUnwind = true; if (const auto *CI = dyn_cast<CallInst>(&I)) { if (Function *Callee = CI->getCalledFunction()) { CallGraphNode *CalleeNode = CG[Callee]; // If the callee is outside our current SCC then we may throw // because it might. If it is inside, do nothing. if (SCCNodes.count(CalleeNode) > 0) InstMightUnwind = false; } } SCCMightUnwind |= InstMightUnwind; } if (CheckReturnViaAsm && !SCCMightReturn) if (auto ICS = ImmutableCallSite(&I)) if (const auto *IA = dyn_cast<InlineAsm>(ICS.getCalledValue())) if (IA->hasSideEffects()) SCCMightReturn = true; } if (SCCMightUnwind && SCCMightReturn) break; } } } // If the SCC doesn't unwind or doesn't throw, note this fact. if (!SCCMightUnwind || !SCCMightReturn) for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { AttrBuilder NewAttributes; if (!SCCMightUnwind) NewAttributes.addAttribute(Attribute::NoUnwind); if (!SCCMightReturn) NewAttributes.addAttribute(Attribute::NoReturn); Function *F = (*I)->getFunction(); const AttributeSet &PAL = F->getAttributes().getFnAttributes(); const AttributeSet &NPAL = AttributeSet::get( F->getContext(), AttributeSet::FunctionIndex, NewAttributes); if (PAL != NPAL) { MadeChange = true; F->addAttributes(AttributeSet::FunctionIndex, NPAL); } } for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { // Convert any invoke instructions to non-throwing functions in this node // into call instructions with a branch. This makes the exception blocks // dead. if (Function *F = (*I)->getFunction()) MadeChange |= SimplifyFunction(F); } return MadeChange; } // SimplifyFunction - Given information about callees, simplify the specified // function if we have invokes to non-unwinding functions or code after calls to // no-return functions. bool PruneEH::SimplifyFunction(Function *F) { bool MadeChange = false; for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(F)) { SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3); // Insert a call instruction before the invoke. CallInst *Call = CallInst::Create(II->getCalledValue(), Args, "", II); Call->takeName(II); Call->setCallingConv(II->getCallingConv()); Call->setAttributes(II->getAttributes()); Call->setDebugLoc(II->getDebugLoc()); // Anything that used the value produced by the invoke instruction // now uses the value produced by the call instruction. Note that we // do this even for void functions and calls with no uses so that the // callgraph edge is updated. II->replaceAllUsesWith(Call); BasicBlock *UnwindBlock = II->getUnwindDest(); UnwindBlock->removePredecessor(II->getParent()); // Insert a branch to the normal destination right before the // invoke. BranchInst::Create(II->getNormalDest(), II); // Finally, delete the invoke instruction! BB->getInstList().pop_back(); // If the unwind block is now dead, nuke it. if (pred_empty(UnwindBlock)) DeleteBasicBlock(UnwindBlock); // Delete the new BB. ++NumRemoved; MadeChange = true; } for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) if (CallInst *CI = dyn_cast<CallInst>(I++)) if (CI->doesNotReturn() && !isa<UnreachableInst>(I)) { // This call calls a function that cannot return. Insert an // unreachable instruction after it and simplify the code. Do this // by splitting the BB, adding the unreachable, then deleting the // new BB. BasicBlock *New = BB->splitBasicBlock(I); // Remove the uncond branch and add an unreachable. BB->getInstList().pop_back(); new UnreachableInst(BB->getContext(), BB); DeleteBasicBlock(New); // Delete the new BB. MadeChange = true; ++NumUnreach; break; } } return MadeChange; } /// DeleteBasicBlock - remove the specified basic block from the program, /// updating the callgraph to reflect any now-obsolete edges due to calls that /// exist in the BB. void PruneEH::DeleteBasicBlock(BasicBlock *BB) { assert(pred_empty(BB) && "BB is not dead!"); CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); CallGraphNode *CGN = CG[BB->getParent()]; for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; ) { --I; if (CallInst *CI = dyn_cast<CallInst>(I)) { if (!isa<IntrinsicInst>(I)) CGN->removeCallEdgeFor(CI); } else if (InvokeInst *II = dyn_cast<InvokeInst>(I)) CGN->removeCallEdgeFor(II); if (!I->use_empty()) I->replaceAllUsesWith(UndefValue::get(I->getType())); } // Get the list of successors of this block. std::vector<BasicBlock*> Succs(succ_begin(BB), succ_end(BB)); for (unsigned i = 0, e = Succs.size(); i != e; ++i) Succs[i]->removePredecessor(BB); BB->eraseFromParent(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/LoopExtractor.cpp
//===- LoopExtractor.cpp - Extract each loop into a new function ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // A pass wrapper around the ExtractLoop() scalar transformation to extract each // top-level loop into its own new function. If the loop is the ONLY loop in a // given function, it is not touched. This is a pass most useful for debugging // via bugpoint. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/CodeExtractor.h" #include <fstream> #include <set> using namespace llvm; #define DEBUG_TYPE "loop-extract" STATISTIC(NumExtracted, "Number of loops extracted"); namespace { struct LoopExtractor : public LoopPass { static char ID; // Pass identification, replacement for typeid unsigned NumLoops; explicit LoopExtractor(unsigned numLoops = ~0) : LoopPass(ID), NumLoops(numLoops) { initializeLoopExtractorPass(*PassRegistry::getPassRegistry()); } bool runOnLoop(Loop *L, LPPassManager &LPM) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequiredID(BreakCriticalEdgesID); AU.addRequiredID(LoopSimplifyID); AU.addRequired<DominatorTreeWrapperPass>(); } }; } char LoopExtractor::ID = 0; INITIALIZE_PASS_BEGIN(LoopExtractor, "loop-extract", "Extract loops into new functions", false, false) INITIALIZE_PASS_DEPENDENCY(BreakCriticalEdges) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(LoopExtractor, "loop-extract", "Extract loops into new functions", false, false) namespace { /// SingleLoopExtractor - For bugpoint. struct SingleLoopExtractor : public LoopExtractor { static char ID; // Pass identification, replacement for typeid SingleLoopExtractor() : LoopExtractor(1) {} }; } // End anonymous namespace char SingleLoopExtractor::ID = 0; INITIALIZE_PASS(SingleLoopExtractor, "loop-extract-single", "Extract at most one loop into a new function", false, false) // createLoopExtractorPass - This pass extracts all natural loops from the // program into a function if it can. // Pass *llvm::createLoopExtractorPass() { return new LoopExtractor(); } bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipOptnoneFunction(L)) return false; // Only visit top-level loops. if (L->getParentLoop()) return false; // If LoopSimplify form is not available, stay out of trouble. if (!L->isLoopSimplifyForm()) return false; DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); bool Changed = false; // If there is more than one top-level loop in this function, extract all of // the loops. Otherwise there is exactly one top-level loop; in this case if // this function is more than a minimal wrapper around the loop, extract // the loop. bool ShouldExtractLoop = false; // Extract the loop if the entry block doesn't branch to the loop header. TerminatorInst *EntryTI = L->getHeader()->getParent()->getEntryBlock().getTerminator(); if (!isa<BranchInst>(EntryTI) || !cast<BranchInst>(EntryTI)->isUnconditional() || EntryTI->getSuccessor(0) != L->getHeader()) { ShouldExtractLoop = true; } else { // Check to see if any exits from the loop are more than just return // blocks. SmallVector<BasicBlock*, 8> ExitBlocks; L->getExitBlocks(ExitBlocks); for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) if (!isa<ReturnInst>(ExitBlocks[i]->getTerminator())) { ShouldExtractLoop = true; break; } } if (ShouldExtractLoop) { // We must omit landing pads. Landing pads must accompany the invoke // instruction. But this would result in a loop in the extracted // function. An infinite cycle occurs when it tries to extract that loop as // well. SmallVector<BasicBlock*, 8> ExitBlocks; L->getExitBlocks(ExitBlocks); for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) if (ExitBlocks[i]->isLandingPad()) { ShouldExtractLoop = false; break; } } if (ShouldExtractLoop) { if (NumLoops == 0) return Changed; --NumLoops; CodeExtractor Extractor(DT, *L); if (Extractor.extractCodeRegion() != nullptr) { Changed = true; // After extraction, the loop is replaced by a function call, so // we shouldn't try to run any more loop passes on it. LPM.deleteLoopFromQueue(L); } ++NumExtracted; } return Changed; } // createSingleLoopExtractorPass - This pass extracts one natural loop from the // program into a function if it can. This is used by bugpoint. // Pass *llvm::createSingleLoopExtractorPass() { return new SingleLoopExtractor(); } // BlockFile - A file which contains a list of blocks that should not be // extracted. static cl::opt<std::string> BlockFile("extract-blocks-file", cl::value_desc("filename"), cl::desc("A file containing list of basic blocks to not extract"), cl::Hidden); namespace { /// BlockExtractorPass - This pass is used by bugpoint to extract all blocks /// from the module into their own functions except for those specified by the /// BlocksToNotExtract list. class BlockExtractorPass : public ModulePass { void LoadFile(const char *Filename); void SplitLandingPadPreds(Function *F); std::vector<BasicBlock*> BlocksToNotExtract; std::vector<std::pair<std::string, std::string> > BlocksToNotExtractByName; public: static char ID; // Pass identification, replacement for typeid BlockExtractorPass() : ModulePass(ID) { if (!BlockFile.empty()) LoadFile(BlockFile.c_str()); } bool runOnModule(Module &M) override; }; } char BlockExtractorPass::ID = 0; INITIALIZE_PASS(BlockExtractorPass, "extract-blocks", "Extract Basic Blocks From Module (for bugpoint use)", false, false) // createBlockExtractorPass - This pass extracts all blocks (except those // specified in the argument list) from the functions in the module. // ModulePass *llvm::createBlockExtractorPass() { return new BlockExtractorPass(); } void BlockExtractorPass::LoadFile(const char *Filename) { // Load the BlockFile... std::ifstream In(Filename); if (!In.good()) { errs() << "WARNING: BlockExtractor couldn't load file '" << Filename << "'!\n"; return; } while (In) { std::string FunctionName, BlockName; In >> FunctionName; In >> BlockName; if (!BlockName.empty()) BlocksToNotExtractByName.push_back( std::make_pair(FunctionName, BlockName)); } } /// SplitLandingPadPreds - The landing pad needs to be extracted with the invoke /// instruction. The critical edge breaker will refuse to break critical edges /// to a landing pad. So do them here. After this method runs, all landing pads /// should have only one predecessor. void BlockExtractorPass::SplitLandingPadPreds(Function *F) { for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) { InvokeInst *II = dyn_cast<InvokeInst>(I); if (!II) continue; BasicBlock *Parent = II->getParent(); BasicBlock *LPad = II->getUnwindDest(); // Look through the landing pad's predecessors. If one of them ends in an // 'invoke', then we want to split the landing pad. bool Split = false; for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ++PI) { BasicBlock *BB = *PI; if (BB->isLandingPad() && BB != Parent && isa<InvokeInst>(Parent->getTerminator())) { Split = true; break; } } if (!Split) continue; SmallVector<BasicBlock*, 2> NewBBs; SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", NewBBs); } } bool BlockExtractorPass::runOnModule(Module &M) { std::set<BasicBlock*> TranslatedBlocksToNotExtract; for (unsigned i = 0, e = BlocksToNotExtract.size(); i != e; ++i) { BasicBlock *BB = BlocksToNotExtract[i]; Function *F = BB->getParent(); // Map the corresponding function in this module. Function *MF = M.getFunction(F->getName()); assert(MF->getFunctionType() == F->getFunctionType() && "Wrong function?"); // Figure out which index the basic block is in its function. Function::iterator BBI = MF->begin(); std::advance(BBI, std::distance(F->begin(), Function::iterator(BB))); TranslatedBlocksToNotExtract.insert(BBI); } while (!BlocksToNotExtractByName.empty()) { // There's no way to find BBs by name without looking at every BB inside // every Function. Fortunately, this is always empty except when used by // bugpoint in which case correctness is more important than performance. std::string &FuncName = BlocksToNotExtractByName.back().first; std::string &BlockName = BlocksToNotExtractByName.back().second; for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) { Function &F = *FI; if (F.getName() != FuncName) continue; for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { BasicBlock &BB = *BI; if (BB.getName() != BlockName) continue; TranslatedBlocksToNotExtract.insert(BI); } } BlocksToNotExtractByName.pop_back(); } // Now that we know which blocks to not extract, figure out which ones we WANT // to extract. std::vector<BasicBlock*> BlocksToExtract; for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { SplitLandingPadPreds(&*F); for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) if (!TranslatedBlocksToNotExtract.count(BB)) BlocksToExtract.push_back(BB); } for (unsigned i = 0, e = BlocksToExtract.size(); i != e; ++i) { SmallVector<BasicBlock*, 2> BlocksToExtractVec; BlocksToExtractVec.push_back(BlocksToExtract[i]); if (const InvokeInst *II = dyn_cast<InvokeInst>(BlocksToExtract[i]->getTerminator())) BlocksToExtractVec.push_back(II->getUnwindDest()); CodeExtractor(BlocksToExtractVec).extractCodeRegion(); } return !BlocksToExtract.empty(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/InlineSimple.cpp
//===- InlineSimple.cpp - Code to perform simple function inlining --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements bottom-up inlining of functions into callees. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/InlineCost.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/Transforms/IPO/InlinerPass.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; #define DEBUG_TYPE "inline" namespace { /// \brief Actual inliner pass implementation. /// /// The common implementation of the inlining logic is shared between this /// inliner pass and the always inliner pass. The two passes use different cost /// analyses to determine when to inline. class SimpleInliner : public Inliner { InlineCostAnalysis *ICA; public: SimpleInliner() : Inliner(ID), ICA(nullptr) { initializeSimpleInlinerPass(*PassRegistry::getPassRegistry()); } SimpleInliner(int Threshold) : Inliner(ID, Threshold, /*InsertLifetime*/ true), ICA(nullptr) { initializeSimpleInlinerPass(*PassRegistry::getPassRegistry()); } static char ID; // Pass identification, replacement for typeid InlineCost getInlineCost(CallSite CS) override { return ICA->getInlineCost(CS, getInlineThreshold(CS)); } bool runOnSCC(CallGraphSCC &SCC) override; void getAnalysisUsage(AnalysisUsage &AU) const override; }; static int computeThresholdFromOptLevels(unsigned OptLevel, unsigned SizeOptLevel) { if (OptLevel > 2) return 275; if (SizeOptLevel == 1) // -Os return 75; if (SizeOptLevel == 2) // -Oz return 25; return 225; } } // end anonymous namespace char SimpleInliner::ID = 0; INITIALIZE_PASS_BEGIN(SimpleInliner, "inline", "Function Integration/Inlining", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) INITIALIZE_PASS_DEPENDENCY(InlineCostAnalysis) INITIALIZE_PASS_END(SimpleInliner, "inline", "Function Integration/Inlining", false, false) Pass *llvm::createFunctionInliningPass() { return new SimpleInliner(); } Pass *llvm::createFunctionInliningPass(int Threshold) { return new SimpleInliner(Threshold); } Pass *llvm::createFunctionInliningPass(unsigned OptLevel, unsigned SizeOptLevel) { return new SimpleInliner( computeThresholdFromOptLevels(OptLevel, SizeOptLevel)); } bool SimpleInliner::runOnSCC(CallGraphSCC &SCC) { ICA = &getAnalysis<InlineCostAnalysis>(); return Inliner::runOnSCC(SCC); } void SimpleInliner::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<InlineCostAnalysis>(); Inliner::getAnalysisUsage(AU); } // HLSL Change Starts void Inliner::applyOptions(PassOptions O) { GetPassOptionUnsigned(O, "InlineThreshold", &InlineThreshold, InlineThreshold); GetPassOptionBool(O, "InsertLifetime", &InsertLifetime, InsertLifetime); } void Inliner::dumpConfig(raw_ostream &OS) { CallGraphSCCPass::dumpConfig(OS); OS << ",InlineThreshold=" << InlineThreshold; OS << ",InsertLifetime=" << (InsertLifetime ? 't' : 'f'); } // HLSL Change Ends
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/LowerBitSets.cpp
//===-- LowerBitSets.cpp - Bitset lowering pass ---------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass lowers bitset metadata and calls to the llvm.bitset.test intrinsic. // See http://llvm.org/docs/LangRef.html#bitsets for more information. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO/LowerBitSets.h" #include "llvm/Transforms/IPO.h" #include "llvm/ADT/EquivalenceClasses.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" using namespace llvm; #define DEBUG_TYPE "lowerbitsets" STATISTIC(ByteArraySizeBits, "Byte array size in bits"); STATISTIC(ByteArraySizeBytes, "Byte array size in bytes"); STATISTIC(NumByteArraysCreated, "Number of byte arrays created"); STATISTIC(NumBitSetCallsLowered, "Number of bitset calls lowered"); STATISTIC(NumBitSetDisjointSets, "Number of disjoint sets of bitsets"); #if 0 // HLSL Change static cl::opt<bool> AvoidReuse( "lowerbitsets-avoid-reuse", cl::desc("Try to avoid reuse of byte array addresses using aliases"), cl::Hidden, cl::init(true)); #else static bool AvoidReuse = true; #endif bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const { if (Offset < ByteOffset) return false; if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0) return false; uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2; if (BitOffset >= BitSize) return false; return Bits.count(BitOffset); } bool BitSetInfo::containsValue( const DataLayout &DL, const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout, Value *V, uint64_t COffset) const { if (auto GV = dyn_cast<GlobalVariable>(V)) { auto I = GlobalLayout.find(GV); if (I == GlobalLayout.end()) return false; return containsGlobalOffset(I->second + COffset); } if (auto GEP = dyn_cast<GEPOperator>(V)) { APInt APOffset(DL.getPointerSizeInBits(0), 0); bool Result = GEP->accumulateConstantOffset(DL, APOffset); if (!Result) return false; COffset += APOffset.getZExtValue(); return containsValue(DL, GlobalLayout, GEP->getPointerOperand(), COffset); } if (auto Op = dyn_cast<Operator>(V)) { if (Op->getOpcode() == Instruction::BitCast) return containsValue(DL, GlobalLayout, Op->getOperand(0), COffset); if (Op->getOpcode() == Instruction::Select) return containsValue(DL, GlobalLayout, Op->getOperand(1), COffset) && containsValue(DL, GlobalLayout, Op->getOperand(2), COffset); } return false; } BitSetInfo BitSetBuilder::build() { if (Min > Max) Min = 0; // Normalize each offset against the minimum observed offset, and compute // the bitwise OR of each of the offsets. The number of trailing zeros // in the mask gives us the log2 of the alignment of all offsets, which // allows us to compress the bitset by only storing one bit per aligned // address. uint64_t Mask = 0; for (uint64_t &Offset : Offsets) { Offset -= Min; Mask |= Offset; } BitSetInfo BSI; BSI.ByteOffset = Min; BSI.AlignLog2 = 0; if (Mask != 0) BSI.AlignLog2 = countTrailingZeros(Mask, ZB_Undefined); // Build the compressed bitset while normalizing the offsets against the // computed alignment. BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1; for (uint64_t Offset : Offsets) { Offset >>= BSI.AlignLog2; BSI.Bits.insert(Offset); } return BSI; } void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) { // Create a new fragment to hold the layout for F. Fragments.emplace_back(); std::vector<uint64_t> &Fragment = Fragments.back(); uint64_t FragmentIndex = Fragments.size() - 1; for (auto ObjIndex : F) { uint64_t OldFragmentIndex = FragmentMap[ObjIndex]; if (OldFragmentIndex == 0) { // We haven't seen this object index before, so just add it to the current // fragment. Fragment.push_back(ObjIndex); } else { // This index belongs to an existing fragment. Copy the elements of the // old fragment into this one and clear the old fragment. We don't update // the fragment map just yet, this ensures that any further references to // indices from the old fragment in this fragment do not insert any more // indices. std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex]; Fragment.insert(Fragment.end(), OldFragment.begin(), OldFragment.end()); OldFragment.clear(); } } // Update the fragment map to point our object indices to this fragment. for (uint64_t ObjIndex : Fragment) FragmentMap[ObjIndex] = FragmentIndex; } void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits, uint64_t BitSize, uint64_t &AllocByteOffset, uint8_t &AllocMask) { // Find the smallest current allocation. unsigned Bit = 0; for (unsigned I = 1; I != BitsPerByte; ++I) if (BitAllocs[I] < BitAllocs[Bit]) Bit = I; AllocByteOffset = BitAllocs[Bit]; // Add our size to it. unsigned ReqSize = AllocByteOffset + BitSize; BitAllocs[Bit] = ReqSize; if (Bytes.size() < ReqSize) Bytes.resize(ReqSize); // Set our bits. AllocMask = 1 << Bit; for (uint64_t B : Bits) Bytes[AllocByteOffset + B] |= AllocMask; } namespace { struct ByteArrayInfo { std::set<uint64_t> Bits; uint64_t BitSize; GlobalVariable *ByteArray; Constant *Mask; }; struct LowerBitSets : public ModulePass { static char ID; LowerBitSets() : ModulePass(ID) { initializeLowerBitSetsPass(*PassRegistry::getPassRegistry()); } Module *M; bool LinkerSubsectionsViaSymbols; IntegerType *Int1Ty; IntegerType *Int8Ty; IntegerType *Int32Ty; Type *Int32PtrTy; IntegerType *Int64Ty; Type *IntPtrTy; // The llvm.bitsets named metadata. NamedMDNode *BitSetNM; // Mapping from bitset mdstrings to the call sites that test them. DenseMap<MDString *, std::vector<CallInst *>> BitSetTestCallSites; std::vector<ByteArrayInfo> ByteArrayInfos; BitSetInfo buildBitSet(MDString *BitSet, const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout); ByteArrayInfo *createByteArray(BitSetInfo &BSI); void allocateByteArrays(); Value *createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI, ByteArrayInfo *&BAI, Value *BitOffset); Value * lowerBitSetCall(CallInst *CI, BitSetInfo &BSI, ByteArrayInfo *&BAI, GlobalVariable *CombinedGlobal, const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout); void buildBitSetsFromGlobals(const std::vector<MDString *> &BitSets, const std::vector<GlobalVariable *> &Globals); bool buildBitSets(); bool eraseBitSetMetadata(); bool doInitialization(Module &M) override; bool runOnModule(Module &M) override; }; } // namespace INITIALIZE_PASS_BEGIN(LowerBitSets, "lowerbitsets", "Lower bitset metadata", false, false) INITIALIZE_PASS_END(LowerBitSets, "lowerbitsets", "Lower bitset metadata", false, false) char LowerBitSets::ID = 0; ModulePass *llvm::createLowerBitSetsPass() { return new LowerBitSets; } bool LowerBitSets::doInitialization(Module &Mod) { M = &Mod; const DataLayout &DL = Mod.getDataLayout(); Triple TargetTriple(M->getTargetTriple()); LinkerSubsectionsViaSymbols = TargetTriple.isMacOSX(); Int1Ty = Type::getInt1Ty(M->getContext()); Int8Ty = Type::getInt8Ty(M->getContext()); Int32Ty = Type::getInt32Ty(M->getContext()); Int32PtrTy = PointerType::getUnqual(Int32Ty); Int64Ty = Type::getInt64Ty(M->getContext()); IntPtrTy = DL.getIntPtrType(M->getContext(), 0); BitSetNM = M->getNamedMetadata("llvm.bitsets"); BitSetTestCallSites.clear(); return false; } /// Build a bit set for BitSet using the object layouts in /// GlobalLayout. BitSetInfo LowerBitSets::buildBitSet( MDString *BitSet, const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout) { BitSetBuilder BSB; // Compute the byte offset of each element of this bitset. if (BitSetNM) { for (MDNode *Op : BitSetNM->operands()) { if (Op->getOperand(0) != BitSet || !Op->getOperand(1)) continue; auto OpGlobal = dyn_cast<GlobalVariable>( cast<ConstantAsMetadata>(Op->getOperand(1))->getValue()); if (!OpGlobal) continue; uint64_t Offset = cast<ConstantInt>(cast<ConstantAsMetadata>(Op->getOperand(2)) ->getValue())->getZExtValue(); Offset += GlobalLayout.find(OpGlobal)->second; BSB.addOffset(Offset); } } return BSB.build(); } /// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in /// Bits. This pattern matches to the bt instruction on x86. static Value *createMaskedBitTest(IRBuilder<> &B, Value *Bits, Value *BitOffset) { auto BitsType = cast<IntegerType>(Bits->getType()); unsigned BitWidth = BitsType->getBitWidth(); BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType); Value *BitIndex = B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1)); Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex); Value *MaskedBits = B.CreateAnd(Bits, BitMask); return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0)); } ByteArrayInfo *LowerBitSets::createByteArray(BitSetInfo &BSI) { // Create globals to stand in for byte arrays and masks. These never actually // get initialized, we RAUW and erase them later in allocateByteArrays() once // we know the offset and mask to use. auto ByteArrayGlobal = new GlobalVariable( *M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr); auto MaskGlobal = new GlobalVariable( *M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr); ByteArrayInfos.emplace_back(); ByteArrayInfo *BAI = &ByteArrayInfos.back(); BAI->Bits = BSI.Bits; BAI->BitSize = BSI.BitSize; BAI->ByteArray = ByteArrayGlobal; BAI->Mask = ConstantExpr::getPtrToInt(MaskGlobal, Int8Ty); return BAI; } void LowerBitSets::allocateByteArrays() { std::stable_sort(ByteArrayInfos.begin(), ByteArrayInfos.end(), [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) { return BAI1.BitSize > BAI2.BitSize; }); std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size()); ByteArrayBuilder BAB; for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) { ByteArrayInfo *BAI = &ByteArrayInfos[I]; uint8_t Mask; BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask); BAI->Mask->replaceAllUsesWith(ConstantInt::get(Int8Ty, Mask)); cast<GlobalVariable>(BAI->Mask->getOperand(0))->eraseFromParent(); } Constant *ByteArrayConst = ConstantDataArray::get(M->getContext(), BAB.Bytes); auto ByteArray = new GlobalVariable(*M, ByteArrayConst->getType(), /*isConstant=*/true, GlobalValue::PrivateLinkage, ByteArrayConst); for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) { ByteArrayInfo *BAI = &ByteArrayInfos[I]; Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0), ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])}; Constant *GEP = ConstantExpr::getInBoundsGetElementPtr( ByteArrayConst->getType(), ByteArray, Idxs); // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures // that the pc-relative displacement is folded into the lea instead of the // test instruction getting another displacement. if (LinkerSubsectionsViaSymbols) { BAI->ByteArray->replaceAllUsesWith(GEP); } else { GlobalAlias *Alias = GlobalAlias::create(PointerType::getUnqual(Int8Ty), GlobalValue::PrivateLinkage, "bits", GEP, M); BAI->ByteArray->replaceAllUsesWith(Alias); } BAI->ByteArray->eraseFromParent(); } ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] + BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] + BAB.BitAllocs[6] + BAB.BitAllocs[7]; ByteArraySizeBytes = BAB.Bytes.size(); } /// Build a test that bit BitOffset is set in BSI, where /// BitSetGlobal is a global containing the bits in BSI. Value *LowerBitSets::createBitSetTest(IRBuilder<> &B, BitSetInfo &BSI, ByteArrayInfo *&BAI, Value *BitOffset) { if (BSI.BitSize <= 64) { // If the bit set is sufficiently small, we can avoid a load by bit testing // a constant. IntegerType *BitsTy; if (BSI.BitSize <= 32) BitsTy = Int32Ty; else BitsTy = Int64Ty; uint64_t Bits = 0; for (auto Bit : BSI.Bits) Bits |= uint64_t(1) << Bit; Constant *BitsConst = ConstantInt::get(BitsTy, Bits); return createMaskedBitTest(B, BitsConst, BitOffset); } else { if (!BAI) { ++NumByteArraysCreated; BAI = createByteArray(BSI); } Constant *ByteArray = BAI->ByteArray; Type *Ty = BAI->ByteArray->getValueType(); if (!LinkerSubsectionsViaSymbols && AvoidReuse) { // Each use of the byte array uses a different alias. This makes the // backend less likely to reuse previously computed byte array addresses, // improving the security of the CFI mechanism based on this pass. ByteArray = GlobalAlias::create(BAI->ByteArray->getType(), GlobalValue::PrivateLinkage, "bits_use", ByteArray, M); } Value *ByteAddr = B.CreateGEP(Ty, ByteArray, BitOffset); Value *Byte = B.CreateLoad(ByteAddr); Value *ByteAndMask = B.CreateAnd(Byte, BAI->Mask); return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0)); } } /// Lower a llvm.bitset.test call to its implementation. Returns the value to /// replace the call with. Value *LowerBitSets::lowerBitSetCall( CallInst *CI, BitSetInfo &BSI, ByteArrayInfo *&BAI, GlobalVariable *CombinedGlobal, const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout) { Value *Ptr = CI->getArgOperand(0); const DataLayout &DL = M->getDataLayout(); if (BSI.containsValue(DL, GlobalLayout, Ptr)) return ConstantInt::getTrue(CombinedGlobal->getParent()->getContext()); Constant *GlobalAsInt = ConstantExpr::getPtrToInt(CombinedGlobal, IntPtrTy); Constant *OffsetedGlobalAsInt = ConstantExpr::getAdd( GlobalAsInt, ConstantInt::get(IntPtrTy, BSI.ByteOffset)); BasicBlock *InitialBB = CI->getParent(); IRBuilder<> B(CI); Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy); if (BSI.isSingleOffset()) return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt); Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt); Value *BitOffset; if (BSI.AlignLog2 == 0) { BitOffset = PtrOffset; } else { // We need to check that the offset both falls within our range and is // suitably aligned. We can check both properties at the same time by // performing a right rotate by log2(alignment) followed by an integer // comparison against the bitset size. The rotate will move the lower // order bits that need to be zero into the higher order bits of the // result, causing the comparison to fail if they are nonzero. The rotate // also conveniently gives us a bit offset to use during the load from // the bitset. Value *OffsetSHR = B.CreateLShr(PtrOffset, ConstantInt::get(IntPtrTy, BSI.AlignLog2)); Value *OffsetSHL = B.CreateShl( PtrOffset, ConstantInt::get(IntPtrTy, DL.getPointerSizeInBits(0) - BSI.AlignLog2)); BitOffset = B.CreateOr(OffsetSHR, OffsetSHL); } Constant *BitSizeConst = ConstantInt::get(IntPtrTy, BSI.BitSize); Value *OffsetInRange = B.CreateICmpULT(BitOffset, BitSizeConst); // If the bit set is all ones, testing against it is unnecessary. if (BSI.isAllOnes()) return OffsetInRange; TerminatorInst *Term = SplitBlockAndInsertIfThen(OffsetInRange, CI, false); IRBuilder<> ThenB(Term); // Now that we know that the offset is in range and aligned, load the // appropriate bit from the bitset. Value *Bit = createBitSetTest(ThenB, BSI, BAI, BitOffset); // The value we want is 0 if we came directly from the initial block // (having failed the range or alignment checks), or the loaded bit if // we came from the block in which we loaded it. B.SetInsertPoint(CI); PHINode *P = B.CreatePHI(Int1Ty, 2); P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB); P->addIncoming(Bit, ThenB.GetInsertBlock()); return P; } /// Given a disjoint set of bitsets and globals, layout the globals, build the /// bit sets and lower the llvm.bitset.test calls. void LowerBitSets::buildBitSetsFromGlobals( const std::vector<MDString *> &BitSets, const std::vector<GlobalVariable *> &Globals) { // Build a new global with the combined contents of the referenced globals. std::vector<Constant *> GlobalInits; const DataLayout &DL = M->getDataLayout(); for (GlobalVariable *G : Globals) { GlobalInits.push_back(G->getInitializer()); uint64_t InitSize = DL.getTypeAllocSize(G->getInitializer()->getType()); // Compute the amount of padding required to align the next element to the // next power of 2. uint64_t Padding = NextPowerOf2(InitSize - 1) - InitSize; // Cap at 128 was found experimentally to have a good data/instruction // overhead tradeoff. if (Padding > 128) Padding = RoundUpToAlignment(InitSize, 128) - InitSize; GlobalInits.push_back( ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding))); } if (!GlobalInits.empty()) GlobalInits.pop_back(); Constant *NewInit = ConstantStruct::getAnon(M->getContext(), GlobalInits); auto CombinedGlobal = new GlobalVariable(*M, NewInit->getType(), /*isConstant=*/true, GlobalValue::PrivateLinkage, NewInit); const StructLayout *CombinedGlobalLayout = DL.getStructLayout(cast<StructType>(NewInit->getType())); // Compute the offsets of the original globals within the new global. DenseMap<GlobalVariable *, uint64_t> GlobalLayout; for (unsigned I = 0; I != Globals.size(); ++I) // Multiply by 2 to account for padding elements. GlobalLayout[Globals[I]] = CombinedGlobalLayout->getElementOffset(I * 2); // For each bitset in this disjoint set... for (MDString *BS : BitSets) { // Build the bitset. BitSetInfo BSI = buildBitSet(BS, GlobalLayout); ByteArrayInfo *BAI = 0; // Lower each call to llvm.bitset.test for this bitset. for (CallInst *CI : BitSetTestCallSites[BS]) { ++NumBitSetCallsLowered; Value *Lowered = lowerBitSetCall(CI, BSI, BAI, CombinedGlobal, GlobalLayout); CI->replaceAllUsesWith(Lowered); CI->eraseFromParent(); } } // Build aliases pointing to offsets into the combined global for each // global from which we built the combined global, and replace references // to the original globals with references to the aliases. for (unsigned I = 0; I != Globals.size(); ++I) { // Multiply by 2 to account for padding elements. Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, I * 2)}; Constant *CombinedGlobalElemPtr = ConstantExpr::getGetElementPtr( NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs); if (LinkerSubsectionsViaSymbols) { Globals[I]->replaceAllUsesWith(CombinedGlobalElemPtr); } else { GlobalAlias *GAlias = GlobalAlias::create(Globals[I]->getType(), Globals[I]->getLinkage(), "", CombinedGlobalElemPtr, M); GAlias->takeName(Globals[I]); Globals[I]->replaceAllUsesWith(GAlias); } Globals[I]->eraseFromParent(); } } /// Lower all bit sets in this module. bool LowerBitSets::buildBitSets() { Function *BitSetTestFunc = M->getFunction(Intrinsic::getName(Intrinsic::bitset_test)); if (!BitSetTestFunc) return false; // Equivalence class set containing bitsets and the globals they reference. // This is used to partition the set of bitsets in the module into disjoint // sets. typedef EquivalenceClasses<PointerUnion<GlobalVariable *, MDString *>> GlobalClassesTy; GlobalClassesTy GlobalClasses; for (const Use &U : BitSetTestFunc->uses()) { auto CI = cast<CallInst>(U.getUser()); auto BitSetMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1)); if (!BitSetMDVal || !isa<MDString>(BitSetMDVal->getMetadata())) report_fatal_error( "Second argument of llvm.bitset.test must be metadata string"); auto BitSet = cast<MDString>(BitSetMDVal->getMetadata()); // Add the call site to the list of call sites for this bit set. We also use // BitSetTestCallSites to keep track of whether we have seen this bit set // before. If we have, we don't need to re-add the referenced globals to the // equivalence class. std::pair<DenseMap<MDString *, std::vector<CallInst *>>::iterator, bool> Ins = BitSetTestCallSites.insert( std::make_pair(BitSet, std::vector<CallInst *>())); Ins.first->second.push_back(CI); if (!Ins.second) continue; // Add the bitset to the equivalence class. GlobalClassesTy::iterator GCI = GlobalClasses.insert(BitSet); GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI); if (!BitSetNM) continue; // Verify the bitset metadata and add the referenced globals to the bitset's // equivalence class. for (MDNode *Op : BitSetNM->operands()) { if (Op->getNumOperands() != 3) report_fatal_error( "All operands of llvm.bitsets metadata must have 3 elements"); if (Op->getOperand(0) != BitSet || !Op->getOperand(1)) continue; auto OpConstMD = dyn_cast<ConstantAsMetadata>(Op->getOperand(1)); if (!OpConstMD) report_fatal_error("Bit set element must be a constant"); auto OpGlobal = dyn_cast<GlobalVariable>(OpConstMD->getValue()); if (!OpGlobal) continue; auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Op->getOperand(2)); if (!OffsetConstMD) report_fatal_error("Bit set element offset must be a constant"); auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue()); if (!OffsetInt) report_fatal_error( "Bit set element offset must be an integer constant"); CurSet = GlobalClasses.unionSets( CurSet, GlobalClasses.findLeader(GlobalClasses.insert(OpGlobal))); } } if (GlobalClasses.empty()) return false; // For each disjoint set we found... for (GlobalClassesTy::iterator I = GlobalClasses.begin(), E = GlobalClasses.end(); I != E; ++I) { if (!I->isLeader()) continue; ++NumBitSetDisjointSets; // Build the list of bitsets and referenced globals in this disjoint set. std::vector<MDString *> BitSets; std::vector<GlobalVariable *> Globals; llvm::DenseMap<MDString *, uint64_t> BitSetIndices; llvm::DenseMap<GlobalVariable *, uint64_t> GlobalIndices; for (GlobalClassesTy::member_iterator MI = GlobalClasses.member_begin(I); MI != GlobalClasses.member_end(); ++MI) { if ((*MI).is<MDString *>()) { BitSetIndices[MI->get<MDString *>()] = BitSets.size(); BitSets.push_back(MI->get<MDString *>()); } else { GlobalIndices[MI->get<GlobalVariable *>()] = Globals.size(); Globals.push_back(MI->get<GlobalVariable *>()); } } // For each bitset, build a set of indices that refer to globals referenced // by the bitset. std::vector<std::set<uint64_t>> BitSetMembers(BitSets.size()); if (BitSetNM) { for (MDNode *Op : BitSetNM->operands()) { // Op = { bitset name, global, offset } if (!Op->getOperand(1)) continue; auto I = BitSetIndices.find(cast<MDString>(Op->getOperand(0))); if (I == BitSetIndices.end()) continue; auto OpGlobal = dyn_cast<GlobalVariable>( cast<ConstantAsMetadata>(Op->getOperand(1))->getValue()); if (!OpGlobal) continue; BitSetMembers[I->second].insert(GlobalIndices[OpGlobal]); } } // Order the sets of indices by size. The GlobalLayoutBuilder works best // when given small index sets first. std::stable_sort( BitSetMembers.begin(), BitSetMembers.end(), [](const std::set<uint64_t> &O1, const std::set<uint64_t> &O2) { return O1.size() < O2.size(); }); // Create a GlobalLayoutBuilder and provide it with index sets as layout // fragments. The GlobalLayoutBuilder tries to lay out members of fragments // as close together as possible. GlobalLayoutBuilder GLB(Globals.size()); for (auto &&MemSet : BitSetMembers) GLB.addFragment(MemSet); // Build a vector of globals with the computed layout. std::vector<GlobalVariable *> OrderedGlobals(Globals.size()); auto OGI = OrderedGlobals.begin(); for (auto &&F : GLB.Fragments) for (auto &&Offset : F) *OGI++ = Globals[Offset]; // Order bitsets by name for determinism. std::sort(BitSets.begin(), BitSets.end(), [](MDString *S1, MDString *S2) { return S1->getString() < S2->getString(); }); // Build the bitsets from this disjoint set. buildBitSetsFromGlobals(BitSets, OrderedGlobals); } allocateByteArrays(); return true; } bool LowerBitSets::eraseBitSetMetadata() { if (!BitSetNM) return false; M->eraseNamedMetadata(BitSetNM); return true; } bool LowerBitSets::runOnModule(Module &M) { bool Changed = buildBitSets(); Changed |= eraseBitSetMetadata(); return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/GlobalOpt.cpp
//===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass transforms simple global variables that never have their address // taken. If obviously true, it marks read/write globals as constant, deletes // variables only stored to, etc. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/CtorUtils.h" #include "llvm/Transforms/Utils/GlobalStatus.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include "dxc/DXIL/DxilModule.h" // HLSL Change - Entrypoint testing #include <algorithm> #include <deque> using namespace llvm; #define DEBUG_TYPE "globalopt" STATISTIC(NumMarked , "Number of globals marked constant"); STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); STATISTIC(NumDeleted , "Number of globals deleted"); STATISTIC(NumFnDeleted , "Number of functions deleted"); STATISTIC(NumGlobUses , "Number of global uses devirtualized"); STATISTIC(NumLocalized , "Number of globals localized"); STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); STATISTIC(NumNestRemoved , "Number of nest attributes removed"); STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); namespace { struct GlobalOpt : public ModulePass { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<TargetLibraryInfoWrapperPass>(); } static char ID; // Pass identification, replacement for typeid GlobalOpt() : ModulePass(ID) { initializeGlobalOptPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; private: bool OptimizeFunctions(Module &M); bool OptimizeGlobalVars(Module &M); bool OptimizeGlobalAliases(Module &M); bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, const GlobalStatus &GS); bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); TargetLibraryInfo *TLI; SmallSet<const Comdat *, 8> NotDiscardableComdats; }; } char GlobalOpt::ID = 0; INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt", "Global Variable Optimizer", false, false) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(GlobalOpt, "globalopt", "Global Variable Optimizer", false, false) ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } /// isLeakCheckerRoot - Is this global variable possibly used by a leak checker /// as a root? If so, we might not really want to eliminate the stores to it. static bool isLeakCheckerRoot(GlobalVariable *GV) { // A global variable is a root if it is a pointer, or could plausibly contain // a pointer. There are two challenges; one is that we could have a struct // the has an inner member which is a pointer. We recurse through the type to // detect these (up to a point). The other is that we may actually be a union // of a pointer and another type, and so our LLVM type is an integer which // gets converted into a pointer, or our type is an [i8 x #] with a pointer // potentially contained here. if (GV->hasPrivateLinkage()) return false; SmallVector<Type *, 4> Types; Types.push_back(cast<PointerType>(GV->getType())->getElementType()); unsigned Limit = 20; do { Type *Ty = Types.pop_back_val(); switch (Ty->getTypeID()) { default: break; case Type::PointerTyID: return true; case Type::ArrayTyID: case Type::VectorTyID: { SequentialType *STy = cast<SequentialType>(Ty); Types.push_back(STy->getElementType()); break; } case Type::StructTyID: { StructType *STy = cast<StructType>(Ty); if (STy->isOpaque()) return true; for (StructType::element_iterator I = STy->element_begin(), E = STy->element_end(); I != E; ++I) { Type *InnerTy = *I; if (isa<PointerType>(InnerTy)) return true; if (isa<CompositeType>(InnerTy)) Types.push_back(InnerTy); } break; } } if (--Limit == 0) return true; } while (!Types.empty()); return false; } /// Given a value that is stored to a global but never read, determine whether /// it's safe to remove the store and the chain of computation that feeds the /// store. static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { do { if (isa<Constant>(V)) return true; if (!V->hasOneUse()) return false; if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || isa<GlobalValue>(V)) return false; if (isAllocationFn(V, TLI)) return true; Instruction *I = cast<Instruction>(V); if (I->mayHaveSideEffects()) return false; if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { if (!GEP->hasAllConstantIndices()) return false; } else if (I->getNumOperands() != 1) { return false; } V = I->getOperand(0); } while (1); } /// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users /// of the global and clean up any that obviously don't assign the global a /// value that isn't dynamically allocated. /// static bool CleanupPointerRootUsers(GlobalVariable *GV, const TargetLibraryInfo *TLI) { // A brief explanation of leak checkers. The goal is to find bugs where // pointers are forgotten, causing an accumulating growth in memory // usage over time. The common strategy for leak checkers is to whitelist the // memory pointed to by globals at exit. This is popular because it also // solves another problem where the main thread of a C++ program may shut down // before other threads that are still expecting to use those globals. To // handle that case, we expect the program may create a singleton and never // destroy it. bool Changed = false; // If Dead[n].first is the only use of a malloc result, we can delete its // chain of computation and the store to the global in Dead[n].second. SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; // Constants can't be pointers to dynamically allocated memory. for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end(); UI != E;) { User *U = *UI++; if (StoreInst *SI = dyn_cast<StoreInst>(U)) { Value *V = SI->getValueOperand(); if (isa<Constant>(V)) { Changed = true; SI->eraseFromParent(); } else if (Instruction *I = dyn_cast<Instruction>(V)) { if (I->hasOneUse()) Dead.push_back(std::make_pair(I, SI)); } } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { if (isa<Constant>(MSI->getValue())) { Changed = true; MSI->eraseFromParent(); } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { if (I->hasOneUse()) Dead.push_back(std::make_pair(I, MSI)); } } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); if (MemSrc && MemSrc->isConstant()) { Changed = true; MTI->eraseFromParent(); } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { if (I->hasOneUse()) Dead.push_back(std::make_pair(I, MTI)); } } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { if (CE->use_empty()) { CE->destroyConstant(); Changed = true; } } else if (Constant *C = dyn_cast<Constant>(U)) { if (isSafeToDestroyConstant(C)) { C->destroyConstant(); // This could have invalidated UI, start over from scratch. Dead.clear(); CleanupPointerRootUsers(GV, TLI); return true; } } } for (int i = 0, e = Dead.size(); i != e; ++i) { if (IsSafeComputationToRemove(Dead[i].first, TLI)) { Dead[i].second->eraseFromParent(); Instruction *I = Dead[i].first; do { if (isAllocationFn(I, TLI)) break; Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); if (!J) break; I->eraseFromParent(); I = J; } while (1); I->eraseFromParent(); } } return Changed; } /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all /// users of the global, cleaning up the obvious ones. This is largely just a /// quick scan over the use list to clean up the easy and obvious cruft. This /// returns true if it made a change. static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, const DataLayout &DL, TargetLibraryInfo *TLI) { bool Changed = false; // Note that we need to use a weak value handle for the worklist items. When // we delete a constant array, we may also be holding pointer to one of its // elements (or an element of one of its elements if we're dealing with an // array of arrays) in the worklist. SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end()); while (!WorkList.empty()) { Value *UV = WorkList.pop_back_val(); if (!UV) continue; User *U = cast<User>(UV); if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (Init) { // Replace the load with the initializer. LI->replaceAllUsesWith(Init); LI->eraseFromParent(); Changed = true; } } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { // Store must be unreachable or storing Init into the global. SI->eraseFromParent(); Changed = true; } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { if (CE->getOpcode() == Instruction::GetElementPtr) { Constant *SubInit = nullptr; if (Init) SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI); } else if ((CE->getOpcode() == Instruction::BitCast && CE->getType()->isPointerTy()) || CE->getOpcode() == Instruction::AddrSpaceCast) { // Pointer cast, delete any stores and memsets to the global. Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI); } if (CE->use_empty()) { CE->destroyConstant(); Changed = true; } } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { // Do not transform "gepinst (gep constexpr (GV))" here, because forming // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold // and will invalidate our notion of what Init is. Constant *SubInit = nullptr; if (!isa<ConstantExpr>(GEP->getOperand(0))) { ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>( ConstantFoldInstruction(GEP, DL, TLI)); if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); // If the initializer is an all-null value and we have an inbounds GEP, // we already know what the result of any load from that GEP is. // TODO: Handle splats. if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) SubInit = Constant::getNullValue(GEP->getType()->getElementType()); } Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI); if (GEP->use_empty()) { GEP->eraseFromParent(); Changed = true; } } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv if (MI->getRawDest() == V) { MI->eraseFromParent(); Changed = true; } } else if (Constant *C = dyn_cast<Constant>(U)) { // If we have a chain of dead constantexprs or other things dangling from // us, and if they are all dead, nuke them without remorse. if (isSafeToDestroyConstant(C)) { C->destroyConstant(); CleanupConstantGlobalUsers(V, Init, DL, TLI); return true; } } } return Changed; } /// isSafeSROAElementUse - Return true if the specified instruction is a safe /// user of a derived expression from a global that we want to SROA. static bool isSafeSROAElementUse(Value *V) { // We might have a dead and dangling constant hanging off of here. if (Constant *C = dyn_cast<Constant>(V)) return isSafeToDestroyConstant(C); Instruction *I = dyn_cast<Instruction>(V); if (!I) return false; // Loads are ok. if (isa<LoadInst>(I)) return true; // Stores *to* the pointer are ok. if (StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->getOperand(0) != V; // Otherwise, it must be a GEP. GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); if (!GEPI) return false; if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || !cast<Constant>(GEPI->getOperand(1))->isNullValue()) return false; for (User *U : GEPI->users()) if (!isSafeSROAElementUse(U)) return false; return true; } /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. /// Look at it and its uses and decide whether it is safe to SROA this global. /// static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { // The user of the global must be a GEP Inst or a ConstantExpr GEP. if (!isa<GetElementPtrInst>(U) && (!isa<ConstantExpr>(U) || cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) return false; // Check to see if this ConstantExpr GEP is SRA'able. In particular, we // don't like < 3 operand CE's, and we don't like non-constant integer // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some // value of C. if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || !cast<Constant>(U->getOperand(1))->isNullValue() || !isa<ConstantInt>(U->getOperand(2))) return false; gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); ++GEPI; // Skip over the pointer index. // If this is a use of an array allocation, do a bit more checking for sanity. if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { uint64_t NumElements = AT->getNumElements(); ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); // Check to make sure that index falls within the array. If not, // something funny is going on, so we won't do the optimization. // if (Idx->getZExtValue() >= NumElements) return false; // We cannot scalar repl this level of the array unless any array // sub-indices are in-range constants. In particular, consider: // A[0][i]. We cannot know that the user isn't doing invalid things like // allowing i to index an out-of-range subscript that accesses A[1]. // // Scalar replacing *just* the outer index of the array is probably not // going to be a win anyway, so just give up. for (++GEPI; // Skip array index. GEPI != E; ++GEPI) { uint64_t NumElements; if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) NumElements = SubArrayTy->getNumElements(); else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) NumElements = SubVectorTy->getNumElements(); else { assert((*GEPI)->isStructTy() && "Indexed GEP type is not array, vector, or struct!"); continue; } ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); if (!IdxVal || IdxVal->getZExtValue() >= NumElements) return false; } } for (User *UU : U->users()) if (!isSafeSROAElementUse(UU)) return false; return true; } /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it /// is safe for us to perform this transformation. /// static bool GlobalUsersSafeToSRA(GlobalValue *GV) { for (User *U : GV->users()) if (!IsUserOfGlobalSafeForSRA(U, GV)) return false; return true; } /// SRAGlobal - Perform scalar replacement of aggregates on the specified global /// variable. This opens the door for other optimizations by exposing the /// behavior of the program in a more fine-grained way. We have determined that /// this transformation is safe already. We return the first global variable we /// insert so that the caller can reprocess it. static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { // Make sure this global only has simple uses that we can SRA. if (!GlobalUsersSafeToSRA(GV)) return nullptr; assert(GV->hasLocalLinkage() && !GV->isConstant()); Constant *Init = GV->getInitializer(); Type *Ty = Init->getType(); std::vector<GlobalVariable*> NewGlobals; Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); // Get the alignment of the global, either explicit or target-specific. unsigned StartAlignment = GV->getAlignment(); if (StartAlignment == 0) StartAlignment = DL.getABITypeAlignment(GV->getType()); if (StructType *STy = dyn_cast<StructType>(Ty)) { NewGlobals.reserve(STy->getNumElements()); const StructLayout &Layout = *DL.getStructLayout(STy); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { Constant *In = Init->getAggregateElement(i); assert(In && "Couldn't get element of initializer?"); GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, GlobalVariable::InternalLinkage, In, GV->getName()+"."+Twine(i), GV->getThreadLocalMode(), GV->getType()->getAddressSpace()); Globals.insert(GV, NGV); NewGlobals.push_back(NGV); // Calculate the known alignment of the field. If the original aggregate // had 256 byte alignment for example, something might depend on that: // propagate info to each field. uint64_t FieldOffset = Layout.getElementOffset(i); unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i))) NGV->setAlignment(NewAlign); } } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { unsigned NumElements = 0; if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) NumElements = ATy->getNumElements(); else NumElements = cast<VectorType>(STy)->getNumElements(); if (NumElements > 16 && GV->hasNUsesOrMore(16)) return nullptr; // It's not worth it. NewGlobals.reserve(NumElements); uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType()); unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType()); for (unsigned i = 0, e = NumElements; i != e; ++i) { Constant *In = Init->getAggregateElement(i); assert(In && "Couldn't get element of initializer?"); GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, GlobalVariable::InternalLinkage, In, GV->getName()+"."+Twine(i), GV->getThreadLocalMode(), GV->getType()->getAddressSpace()); Globals.insert(GV, NGV); NewGlobals.push_back(NGV); // Calculate the known alignment of the field. If the original aggregate // had 256 byte alignment for example, something might depend on that: // propagate info to each field. unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); if (NewAlign > EltAlign) NGV->setAlignment(NewAlign); } } if (NewGlobals.empty()) return nullptr; DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); // Loop over all of the uses of the global, replacing the constantexpr geps, // with smaller constantexpr geps or direct references. while (!GV->use_empty()) { User *GEP = GV->user_back(); assert(((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); // Ignore the 1th operand, which has to be zero or else the program is quite // broken (undefined). Get the 2nd operand, which is the structure or array // index. unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. Value *NewPtr = NewGlobals[Val]; Type *NewTy = NewGlobals[Val]->getValueType(); // Form a shorter GEP if needed. if (GEP->getNumOperands() > 3) { if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { SmallVector<Constant*, 8> Idxs; Idxs.push_back(NullInt); for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) Idxs.push_back(CE->getOperand(i)); NewPtr = ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs); } else { GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); SmallVector<Value*, 8> Idxs; Idxs.push_back(NullInt); for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) Idxs.push_back(GEPI->getOperand(i)); NewPtr = GetElementPtrInst::Create( NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI); } } GEP->replaceAllUsesWith(NewPtr); if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) GEPI->eraseFromParent(); else cast<ConstantExpr>(GEP)->destroyConstant(); } // Delete the old global, now that it is dead. Globals.erase(GV); ++NumSRA; // Loop over the new globals array deleting any globals that are obviously // dead. This can arise due to scalarization of a structure or an array that // has elements that are dead. unsigned FirstGlobal = 0; for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) if (NewGlobals[i]->use_empty()) { Globals.erase(NewGlobals[i]); if (FirstGlobal == i) ++FirstGlobal; } return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr; } /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified /// value will trap if the value is dynamically null. PHIs keeps track of any /// phi nodes we've seen to avoid reprocessing them. static bool AllUsesOfValueWillTrapIfNull(const Value *V, SmallPtrSetImpl<const PHINode*> &PHIs) { for (const User *U : V->users()) if (isa<LoadInst>(U)) { // Will trap. } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { if (SI->getOperand(0) == V) { //cerr << "NONTRAPPING USE: " << *U; return false; // Storing the value. } } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { if (CI->getCalledValue() != V) { //cerr << "NONTRAPPING USE: " << *U; return false; // Not calling the ptr } } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { if (II->getCalledValue() != V) { //cerr << "NONTRAPPING USE: " << *U; return false; // Not calling the ptr } } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { // If we've already seen this phi node, ignore it, it has already been // checked. if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) return false; } else if (isa<ICmpInst>(U) && isa<ConstantPointerNull>(U->getOperand(1))) { // Ignore icmp X, null } else { //cerr << "NONTRAPPING USE: " << *U; return false; } return true; } /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads /// from GV will trap if the loaded value is null. Note that this also permits /// comparisons of the loaded value against null, as a special case. static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { for (const User *U : GV->users()) if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { SmallPtrSet<const PHINode*, 8> PHIs; if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) return false; } else if (isa<StoreInst>(U)) { // Ignore stores to the global. } else { // We don't know or understand this user, bail out. //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; return false; } return true; } static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { bool Changed = false; for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { Instruction *I = cast<Instruction>(*UI++); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { LI->setOperand(0, NewV); Changed = true; } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { if (SI->getOperand(1) == V) { SI->setOperand(1, NewV); Changed = true; } } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { CallSite CS(I); if (CS.getCalledValue() == V) { // Calling through the pointer! Turn into a direct call, but be careful // that the pointer is not also being passed as an argument. CS.setCalledFunction(NewV); Changed = true; bool PassedAsArg = false; for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) if (CS.getArgument(i) == V) { PassedAsArg = true; CS.setArgument(i, NewV); } if (PassedAsArg) { // Being passed as an argument also. Be careful to not invalidate UI! UI = V->user_begin(); } } } else if (CastInst *CI = dyn_cast<CastInst>(I)) { Changed |= OptimizeAwayTrappingUsesOfValue(CI, ConstantExpr::getCast(CI->getOpcode(), NewV, CI->getType())); if (CI->use_empty()) { Changed = true; CI->eraseFromParent(); } } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { // Should handle GEP here. SmallVector<Constant*, 8> Idxs; Idxs.reserve(GEPI->getNumOperands()-1); for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); i != e; ++i) if (Constant *C = dyn_cast<Constant>(*i)) Idxs.push_back(C); else break; if (Idxs.size() == GEPI->getNumOperands()-1) Changed |= OptimizeAwayTrappingUsesOfValue( GEPI, ConstantExpr::getGetElementPtr(nullptr, NewV, Idxs)); if (GEPI->use_empty()) { Changed = true; GEPI->eraseFromParent(); } } } return Changed; } /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null /// value stored into it. If there are uses of the loaded value that would trap /// if the loaded value is dynamically null, then we know that they cannot be /// reachable with a null optimize away the load. static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, const DataLayout &DL, TargetLibraryInfo *TLI) { bool Changed = false; // Keep track of whether we are able to remove all the uses of the global // other than the store that defines it. bool AllNonStoreUsesGone = true; // Replace all uses of loads with uses of uses of the stored value. for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){ User *GlobalUser = *GUI++; if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); // If we were able to delete all uses of the loads if (LI->use_empty()) { LI->eraseFromParent(); Changed = true; } else { AllNonStoreUsesGone = false; } } else if (isa<StoreInst>(GlobalUser)) { // Ignore the store that stores "LV" to the global. assert(GlobalUser->getOperand(1) == GV && "Must be storing *to* the global"); } else { AllNonStoreUsesGone = false; // If we get here we could have other crazy uses that are transitively // loaded. assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || isa<BitCastInst>(GlobalUser) || isa<GetElementPtrInst>(GlobalUser)) && "Only expect load and stores!"); } } if (Changed) { DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); ++NumGlobUses; } // If we nuked all of the loads, then none of the stores are needed either, // nor is the global. if (AllNonStoreUsesGone) { if (isLeakCheckerRoot(GV)) { Changed |= CleanupPointerRootUsers(GV, TLI); } else { Changed = true; CleanupConstantGlobalUsers(GV, nullptr, DL, TLI); } if (GV->use_empty()) { DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); Changed = true; GV->eraseFromParent(); ++NumDeleted; } } return Changed; } /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the /// instructions that are foldable. static void ConstantPropUsersOf(Value *V, const DataLayout &DL, TargetLibraryInfo *TLI) { for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) if (Instruction *I = dyn_cast<Instruction>(*UI++)) if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { I->replaceAllUsesWith(NewC); // Advance UI to the next non-I use to avoid invalidating it! // Instructions could multiply use V. while (UI != E && *UI == I) ++UI; I->eraseFromParent(); } } /// OptimizeGlobalAddressOfMalloc - This function takes the specified global /// variable, and transforms the program as if it always contained the result of /// the specified malloc. Because it is always the result of the specified /// malloc, there is no reason to actually DO the malloc. Instead, turn the /// malloc into a global, and any loads of GV as uses of the new global. static GlobalVariable * OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, ConstantInt *NElements, const DataLayout &DL, TargetLibraryInfo *TLI) { DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); Type *GlobalType; if (NElements->getZExtValue() == 1) GlobalType = AllocTy; else // If we have an array allocation, the global variable is of an array. GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); // Create the new global variable. The contents of the malloc'd memory is // undefined, so initialize with an undef value. GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage, UndefValue::get(GlobalType), GV->getName()+".body", GV, GV->getThreadLocalMode()); // If there are bitcast users of the malloc (which is typical, usually we have // a malloc + bitcast) then replace them with uses of the new global. Update // other users to use the global as well. BitCastInst *TheBC = nullptr; while (!CI->use_empty()) { Instruction *User = cast<Instruction>(CI->user_back()); if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { if (BCI->getType() == NewGV->getType()) { BCI->replaceAllUsesWith(NewGV); BCI->eraseFromParent(); } else { BCI->setOperand(0, NewGV); } } else { if (!TheBC) TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); User->replaceUsesOfWith(CI, TheBC); } } Constant *RepValue = NewGV; if (NewGV->getType() != GV->getType()->getElementType()) RepValue = ConstantExpr::getBitCast(RepValue, GV->getType()->getElementType()); // If there is a comparison against null, we will insert a global bool to // keep track of whether the global was initialized yet or not. GlobalVariable *InitBool = new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, GlobalValue::InternalLinkage, ConstantInt::getFalse(GV->getContext()), GV->getName()+".init", GV->getThreadLocalMode()); bool InitBoolUsed = false; // Loop over all uses of GV, processing them in turn. while (!GV->use_empty()) { if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { // The global is initialized when the store to it occurs. new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, SI->getOrdering(), SI->getSynchScope(), SI); SI->eraseFromParent(); continue; } LoadInst *LI = cast<LoadInst>(GV->user_back()); while (!LI->use_empty()) { Use &LoadUse = *LI->use_begin(); ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); if (!ICI) { LoadUse = RepValue; continue; } // Replace the cmp X, 0 with a use of the bool value. // Sink the load to where the compare was, if atomic rules allow us to. Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, LI->getOrdering(), LI->getSynchScope(), LI->isUnordered() ? (Instruction*)ICI : LI); InitBoolUsed = true; switch (ICI->getPredicate()) { default: llvm_unreachable("Unknown ICmp Predicate!"); case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_SLT: // X < null -> always false LV = ConstantInt::getFalse(GV->getContext()); break; case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_EQ: LV = BinaryOperator::CreateNot(LV, "notinit", ICI); break; case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGE: case ICmpInst::ICMP_SGE: case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_SGT: break; // no change. } ICI->replaceAllUsesWith(LV); ICI->eraseFromParent(); } LI->eraseFromParent(); } // If the initialization boolean was used, insert it, otherwise delete it. if (!InitBoolUsed) { while (!InitBool->use_empty()) // Delete initializations cast<StoreInst>(InitBool->user_back())->eraseFromParent(); delete InitBool; } else GV->getParent()->getGlobalList().insert(GV, InitBool); // Now the GV is dead, nuke it and the malloc.. GV->eraseFromParent(); CI->eraseFromParent(); // To further other optimizations, loop over all users of NewGV and try to // constant prop them. This will promote GEP instructions with constant // indices into GEP constant-exprs, which will allow global-opt to hack on it. ConstantPropUsersOf(NewGV, DL, TLI); if (RepValue != NewGV) ConstantPropUsersOf(RepValue, DL, TLI); return NewGV; } /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking /// to make sure that there are no complex uses of V. We permit simple things /// like dereferencing the pointer, but not storing through the address, unless /// it is to the specified global. static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, const GlobalVariable *GV, SmallPtrSetImpl<const PHINode*> &PHIs) { for (const User *U : V->users()) { const Instruction *Inst = cast<Instruction>(U); if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { continue; // Fine, ignore. } if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { if (SI->getOperand(0) == V && SI->getOperand(1) != GV) return false; // Storing the pointer itself... bad. continue; // Otherwise, storing through it, or storing into GV... fine. } // Must index into the array and into the struct. if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) return false; continue; } if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI // cycles. if (PHIs.insert(PN).second) if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) return false; continue; } if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) return false; continue; } return false; } return true; } /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV /// somewhere. Transform all uses of the allocation into loads from the /// global and uses of the resultant pointer. Further, delete the store into /// GV. This assumes that these value pass the /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, GlobalVariable *GV) { while (!Alloc->use_empty()) { Instruction *U = cast<Instruction>(*Alloc->user_begin()); Instruction *InsertPt = U; if (StoreInst *SI = dyn_cast<StoreInst>(U)) { // If this is the store of the allocation into the global, remove it. if (SI->getOperand(1) == GV) { SI->eraseFromParent(); continue; } } else if (PHINode *PN = dyn_cast<PHINode>(U)) { // Insert the load in the corresponding predecessor, not right before the // PHI. InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator(); } else if (isa<BitCastInst>(U)) { // Must be bitcast between the malloc and store to initialize the global. ReplaceUsesOfMallocWithGlobal(U, GV); U->eraseFromParent(); continue; } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { // If this is a "GEP bitcast" and the user is a store to the global, then // just process it as a bitcast. if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back())) if (SI->getOperand(1) == GV) { // Must be bitcast GEP between the malloc and store to initialize // the global. ReplaceUsesOfMallocWithGlobal(GEPI, GV); GEPI->eraseFromParent(); continue; } } // Insert a load from the global, and use it instead of the malloc. Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); U->replaceUsesOfWith(Alloc, NL); } } /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi /// of a load) are simple enough to perform heap SRA on. This permits GEP's /// that index through the array and struct field, icmps of null, and PHIs. static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs, SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) { // We permit two users of the load: setcc comparing against the null // pointer, and a getelementptr of a specific form. for (const User *U : V->users()) { const Instruction *UI = cast<Instruction>(U); // Comparison against null is ok. if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) { if (!isa<ConstantPointerNull>(ICI->getOperand(1))) return false; continue; } // getelementptr is also ok, but only a simple form. if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { // Must index into the array and into the struct. if (GEPI->getNumOperands() < 3) return false; // Otherwise the GEP is ok. continue; } if (const PHINode *PN = dyn_cast<PHINode>(UI)) { if (!LoadUsingPHIsPerLoad.insert(PN).second) // This means some phi nodes are dependent on each other. // Avoid infinite looping! return false; if (!LoadUsingPHIs.insert(PN).second) // If we have already analyzed this PHI, then it is safe. continue; // Make sure all uses of the PHI are simple enough to transform. if (!LoadUsesSimpleEnoughForHeapSRA(PN, LoadUsingPHIs, LoadUsingPHIsPerLoad)) return false; continue; } // Otherwise we don't know what this is, not ok. return false; } return true; } /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from /// GV are simple enough to perform HeapSRA, return true. static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, Instruction *StoredVal) { SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; for (const User *U : GV->users()) if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, LoadUsingPHIsPerLoad)) return false; LoadUsingPHIsPerLoad.clear(); } // If we reach here, we know that all uses of the loads and transitive uses // (through PHI nodes) are simple enough to transform. However, we don't know // that all inputs the to the PHI nodes are in the same equivalence sets. // Check to verify that all operands of the PHIs are either PHIS that can be // transformed, loads from GV, or MI itself. for (const PHINode *PN : LoadUsingPHIs) { for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { Value *InVal = PN->getIncomingValue(op); // PHI of the stored value itself is ok. if (InVal == StoredVal) continue; if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { // One of the PHIs in our set is (optimistically) ok. if (LoadUsingPHIs.count(InPN)) continue; return false; } // Load from GV is ok. if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) if (LI->getOperand(0) == GV) continue; // UNDEF? NULL? // Anything else is rejected. return false; } } return true; } static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; if (FieldNo >= FieldVals.size()) FieldVals.resize(FieldNo+1); // If we already have this value, just reuse the previously scalarized // version. if (Value *FieldVal = FieldVals[FieldNo]) return FieldVal; // Depending on what instruction this is, we have several cases. Value *Result; if (LoadInst *LI = dyn_cast<LoadInst>(V)) { // This is a scalarized version of the load from the global. Just create // a new Load of the scalarized global. Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, InsertedScalarizedValues, PHIsToRewrite), LI->getName()+".f"+Twine(FieldNo), LI); } else { PHINode *PN = cast<PHINode>(V); // PN's type is pointer to struct. Make a new PHI of pointer to struct // field. PointerType *PTy = cast<PointerType>(PN->getType()); StructType *ST = cast<StructType>(PTy->getElementType()); unsigned AS = PTy->getAddressSpace(); PHINode *NewPN = PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS), PN->getNumIncomingValues(), PN->getName()+".f"+Twine(FieldNo), PN); Result = NewPN; PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); } return FieldVals[FieldNo] = Result; } /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from /// the load, rewrite the derived value to use the HeapSRoA'd load. static void RewriteHeapSROALoadUser(Instruction *LoadUser, DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { // If this is a comparison against null, handle it. if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { assert(isa<ConstantPointerNull>(SCI->getOperand(1))); // If we have a setcc of the loaded pointer, we can use a setcc of any // field. Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, InsertedScalarizedValues, PHIsToRewrite); Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, Constant::getNullValue(NPtr->getType()), SCI->getName()); SCI->replaceAllUsesWith(New); SCI->eraseFromParent(); return; } // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) && "Unexpected GEPI!"); // Load the pointer for this field. unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, InsertedScalarizedValues, PHIsToRewrite); // Create the new GEP idx vector. SmallVector<Value*, 8> GEPIdx; GEPIdx.push_back(GEPI->getOperand(1)); GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx, GEPI->getName(), GEPI); GEPI->replaceAllUsesWith(NGEPI); GEPI->eraseFromParent(); return; } // Recursively transform the users of PHI nodes. This will lazily create the // PHIs that are needed for individual elements. Keep track of what PHIs we // see in InsertedScalarizedValues so that we don't get infinite loops (very // antisocial). If the PHI is already in InsertedScalarizedValues, it has // already been seen first by another load, so its uses have already been // processed. PHINode *PN = cast<PHINode>(LoadUser); if (!InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>())).second) return; // If this is the first time we've seen this PHI, recursively process all // users. for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { Instruction *User = cast<Instruction>(*UI++); RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); } } /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr /// is a value loaded from the global. Eliminate all uses of Ptr, making them /// use FieldGlobals instead. All uses of loaded values satisfy /// AllGlobalLoadUsesSimpleEnoughForHeapSRA. static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) { Instruction *User = cast<Instruction>(*UI++); RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); } if (Load->use_empty()) { Load->eraseFromParent(); InsertedScalarizedValues.erase(Load); } } /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break /// it up into multiple allocations of arrays of the fields. static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, Value *NElems, const DataLayout &DL, const TargetLibraryInfo *TLI) { DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); Type *MAT = getMallocAllocatedType(CI, TLI); StructType *STy = cast<StructType>(MAT); // There is guaranteed to be at least one use of the malloc (storing // it into GV). If there are other uses, change them to be uses of // the global to simplify later code. This also deletes the store // into GV. ReplaceUsesOfMallocWithGlobal(CI, GV); // Okay, at this point, there are no users of the malloc. Insert N // new mallocs at the same place as CI, and N globals. std::vector<Value*> FieldGlobals; std::vector<Value*> FieldMallocs; unsigned AS = GV->getType()->getPointerAddressSpace(); for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ Type *FieldTy = STy->getElementType(FieldNo); PointerType *PFieldTy = PointerType::get(FieldTy, AS); GlobalVariable *NGV = new GlobalVariable(*GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage, Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo), GV, GV->getThreadLocalMode()); FieldGlobals.push_back(NGV); unsigned TypeSize = DL.getTypeAllocSize(FieldTy); if (StructType *ST = dyn_cast<StructType>(FieldTy)) TypeSize = DL.getStructLayout(ST)->getSizeInBytes(); Type *IntPtrTy = DL.getIntPtrType(CI->getType()); Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, ConstantInt::get(IntPtrTy, TypeSize), NElems, nullptr, CI->getName() + ".f" + Twine(FieldNo)); FieldMallocs.push_back(NMI); new StoreInst(NMI, NGV, CI); } // The tricky aspect of this transformation is handling the case when malloc // fails. In the original code, malloc failing would set the result pointer // of malloc to null. In this case, some mallocs could succeed and others // could fail. As such, we emit code that looks like this: // F0 = malloc(field0) // F1 = malloc(field1) // F2 = malloc(field2) // if (F0 == 0 || F1 == 0 || F2 == 0) { // if (F0) { free(F0); F0 = 0; } // if (F1) { free(F1); F1 = 0; } // if (F2) { free(F2); F2 = 0; } // } // The malloc can also fail if its argument is too large. Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), ConstantZero, "isneg"); for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], Constant::getNullValue(FieldMallocs[i]->getType()), "isnull"); RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); } // Split the basic block at the old malloc. BasicBlock *OrigBB = CI->getParent(); BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); // Create the block to check the first condition. Put all these blocks at the // end of the function as they are unlikely to be executed. BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), "malloc_ret_null", OrigBB->getParent()); // Remove the uncond branch from OrigBB to ContBB, turning it into a cond // branch on RunningOr. OrigBB->getTerminator()->eraseFromParent(); BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); // Within the NullPtrBlock, we need to emit a comparison and branch for each // pointer, because some may be null while others are not. for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, Constant::getNullValue(GVVal->getType())); BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", OrigBB->getParent()); BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", OrigBB->getParent()); Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock); // Fill in FreeBlock. CallInst::CreateFree(GVVal, BI); new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], FreeBlock); BranchInst::Create(NextBlock, FreeBlock); NullPtrBlock = NextBlock; } BranchInst::Create(ContBB, NullPtrBlock); // CI is no longer needed, remove it. CI->eraseFromParent(); /// InsertedScalarizedLoads - As we process loads, if we can't immediately /// update all uses of the load, keep track of what scalarized loads are /// inserted for a given load. DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; InsertedScalarizedValues[GV] = FieldGlobals; std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; // Okay, the malloc site is completely handled. All of the uses of GV are now // loads, and all uses of those loads are simple. Rewrite them to use loads // of the per-field globals instead. for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) { Instruction *User = cast<Instruction>(*UI++); if (LoadInst *LI = dyn_cast<LoadInst>(User)) { RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); continue; } // Must be a store of null. StoreInst *SI = cast<StoreInst>(User); assert(isa<ConstantPointerNull>(SI->getOperand(0)) && "Unexpected heap-sra user!"); // Insert a store of null into each global. for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); Constant *Null = Constant::getNullValue(PT->getElementType()); new StoreInst(Null, FieldGlobals[i], SI); } // Erase the original store. SI->eraseFromParent(); } // While we have PHIs that are interesting to rewrite, do it. while (!PHIsToRewrite.empty()) { PHINode *PN = PHIsToRewrite.back().first; unsigned FieldNo = PHIsToRewrite.back().second; PHIsToRewrite.pop_back(); PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); // Add all the incoming values. This can materialize more phis. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { Value *InVal = PN->getIncomingValue(i); InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, PHIsToRewrite); FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); } } // Drop all inter-phi links and any loads that made it this far. for (DenseMap<Value*, std::vector<Value*> >::iterator I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); I != E; ++I) { if (PHINode *PN = dyn_cast<PHINode>(I->first)) PN->dropAllReferences(); else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) LI->dropAllReferences(); } // Delete all the phis and loads now that inter-references are dead. for (DenseMap<Value*, std::vector<Value*> >::iterator I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); I != E; ++I) { if (PHINode *PN = dyn_cast<PHINode>(I->first)) PN->eraseFromParent(); else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) LI->eraseFromParent(); } // The old global is now dead, remove it. GV->eraseFromParent(); ++NumHeapSRA; return cast<GlobalVariable>(FieldGlobals[0]); } /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a /// pointer global variable with a single value stored it that is a malloc or /// cast of malloc. static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI, Type *AllocTy, AtomicOrdering Ordering, Module::global_iterator &GVI, const DataLayout &DL, TargetLibraryInfo *TLI) { // If this is a malloc of an abstract type, don't touch it. if (!AllocTy->isSized()) return false; // We can't optimize this global unless all uses of it are *known* to be // of the malloc value, not of the null initializer value (consider a use // that compares the global's value against zero to see if the malloc has // been reached). To do this, we check to see if all uses of the global // would trap if the global were null: this proves that they must all // happen after the malloc. if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) return false; // We can't optimize this if the malloc itself is used in a complex way, // for example, being stored into multiple globals. This allows the // malloc to be stored into the specified global, loaded icmp'd, and // GEP'd. These are all things we could transform to using the global // for. SmallPtrSet<const PHINode*, 8> PHIs; if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) return false; // If we have a global that is only initialized with a fixed size malloc, // transform the program to use global memory instead of malloc'd memory. // This eliminates dynamic allocation, avoids an indirection accessing the // data, and exposes the resultant global to further GlobalOpt. // We cannot optimize the malloc if we cannot determine malloc array size. Value *NElems = getMallocArraySize(CI, DL, TLI, true); if (!NElems) return false; if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) // Restrict this transformation to only working on small allocations // (2048 bytes currently), as we don't want to introduce a 16M global or // something. if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) { GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); return true; } // If the allocation is an array of structures, consider transforming this // into multiple malloc'd arrays, one for each field. This is basically // SRoA for malloc'd memory. if (Ordering != NotAtomic) return false; // If this is an allocation of a fixed size array of structs, analyze as a // variable size array. malloc [100 x struct],1 -> malloc struct, 100 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) AllocTy = AT->getElementType(); StructType *AllocSTy = dyn_cast<StructType>(AllocTy); if (!AllocSTy) return false; // This the structure has an unreasonable number of fields, leave it // alone. if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { // If this is a fixed size array, transform the Malloc to be an alloc of // structs. malloc [100 x struct],1 -> malloc struct, 100 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { Type *IntPtrTy = DL.getIntPtrType(CI->getType()); unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes(); Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements, nullptr, CI->getName()); Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); CI->replaceAllUsesWith(Cast); CI->eraseFromParent(); if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) CI = cast<CallInst>(BCI->getOperand(0)); else CI = cast<CallInst>(Malloc); } GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL, TLI); return true; } return false; } // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge // that only one value (besides its initializer) is ever stored to the global. static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, AtomicOrdering Ordering, Module::global_iterator &GVI, const DataLayout &DL, TargetLibraryInfo *TLI) { // Ignore no-op GEPs and bitcasts. StoredOnceVal = StoredOnceVal->stripPointerCasts(); // If we are dealing with a pointer global that is initialized to null and // only has one (non-null) value stored into it, then we can optimize any // users of the loaded value (often calls and loads) that would trap if the // value was null. if (GV->getInitializer()->getType()->isPointerTy() && GV->getInitializer()->isNullValue()) { if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { if (GV->getInitializer()->getType() != SOVC->getType()) SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); // Optimize away any trapping uses of the loaded value. if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI)) return true; } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { Type *MallocType = getMallocAllocatedType(CI, TLI); if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, DL, TLI)) return true; } } return false; } /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only /// two values ever stored into GV are its initializer and OtherVal. See if we /// can shrink the global into a boolean and select between the two values /// whenever it is used. This exposes the values to other scalar optimizations. static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { Type *GVElType = GV->getType()->getElementType(); // If GVElType is already i1, it is already shrunk. If the type of the GV is // an FP value, pointer or vector, don't do this optimization because a select // between them is very expensive and unlikely to lead to later // simplification. In these cases, we typically end up with "cond ? v1 : v2" // where v1 and v2 both require constant pool loads, a big loss. if (GVElType == Type::getInt1Ty(GV->getContext()) || GVElType->isFloatingPointTy() || GVElType->isPointerTy() || GVElType->isVectorTy()) return false; // Walk the use list of the global seeing if all the uses are load or store. // If there is anything else, bail out. for (User *U : GV->users()) if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) return false; DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); // Create the new global, initializing it to false. GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, GlobalValue::InternalLinkage, ConstantInt::getFalse(GV->getContext()), GV->getName()+".b", GV->getThreadLocalMode(), GV->getType()->getAddressSpace()); GV->getParent()->getGlobalList().insert(GV, NewGV); Constant *InitVal = GV->getInitializer(); assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && "No reason to shrink to bool!"); // If initialized to zero and storing one into the global, we can use a cast // instead of a select to synthesize the desired value. bool IsOneZero = false; if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) IsOneZero = InitVal->isNullValue() && CI->isOne(); while (!GV->use_empty()) { Instruction *UI = cast<Instruction>(GV->user_back()); if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { // Change the store into a boolean store. bool StoringOther = SI->getOperand(0) == OtherVal; // Only do this if we weren't storing a loaded value. Value *StoreVal; if (StoringOther || SI->getOperand(0) == InitVal) { StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), StoringOther); } else { // Otherwise, we are storing a previously loaded copy. To do this, // change the copy from copying the original value to just copying the // bool. Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); // If we've already replaced the input, StoredVal will be a cast or // select instruction. If not, it will be a load of the original // global. if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { assert(LI->getOperand(0) == GV && "Not a copy!"); // Insert a new load, to preserve the saved value. StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, LI->getOrdering(), LI->getSynchScope(), LI); } else { assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && "This is not a form that we understand!"); StoreVal = StoredVal->getOperand(0); assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); } } new StoreInst(StoreVal, NewGV, false, 0, SI->getOrdering(), SI->getSynchScope(), SI); } else { // Change the load into a load of bool then a select. LoadInst *LI = cast<LoadInst>(UI); LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, LI->getOrdering(), LI->getSynchScope(), LI); Value *NSI; if (IsOneZero) NSI = new ZExtInst(NLI, LI->getType(), "", LI); else NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); NSI->takeName(LI); LI->replaceAllUsesWith(NSI); } UI->eraseFromParent(); } // Retain the name of the old global variable. People who are debugging their // programs may expect these variables to be named the same. NewGV->takeName(GV); GV->eraseFromParent(); return true; } /// ProcessGlobal - Analyze the specified global variable and optimize it if /// possible. If we make a change, return true. bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, Module::global_iterator &GVI) { // Do more involved optimizations if the global is internal. GV->removeDeadConstantUsers(); if (GV->use_empty()) { DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); GV->eraseFromParent(); ++NumDeleted; return true; } if (!GV->hasLocalLinkage()) return false; GlobalStatus GS; if (GlobalStatus::analyzeGlobal(GV, GS)) return false; if (!GS.IsCompared && !GV->hasUnnamedAddr()) { GV->setUnnamedAddr(true); NumUnnamed++; } if (GV->isConstant() || !GV->hasInitializer()) return false; return ProcessInternalGlobal(GV, GVI, GS); } // HLSL Change Begin static bool isEntryPoint(const llvm::Function* Func) { const llvm::Module* Mod = Func->getParent(); return Mod->HasDxilModule() ? Mod->GetDxilModule().IsEntryOrPatchConstantFunction(Func) : Func->getName() == "main"; // Original logic for non-HLSL } // HLSL Change End /// ProcessInternalGlobal - Analyze the specified global variable and optimize /// it if possible. If we make a change, return true. bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, Module::global_iterator &GVI, const GlobalStatus &GS) { auto &DL = GV->getParent()->getDataLayout(); // If this is a first class global and has only one accessing function // and this function is main (which we know is not recursive), we replace // the global with a local alloca in this function. // // NOTE: It doesn't make sense to promote non-single-value types since we // are just replacing static memory to stack memory. // // If the global is in different address space, don't bring it to stack. if (!GS.HasMultipleAccessingFunctions && GS.AccessingFunction && !GS.HasNonInstructionUser && GV->getType()->getElementType()->isSingleValueType() && isEntryPoint(GS.AccessingFunction) && // HLSL Change - Generalize entrypoint testing GS.AccessingFunction->hasExternalLinkage() && GV->getType()->getAddressSpace() == 0) { DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction ->getEntryBlock().begin()); Type *ElemTy = GV->getType()->getElementType(); // FIXME: Pass Global's alignment when globals have alignment AllocaInst *Alloca = new AllocaInst(ElemTy, nullptr, GV->getName(), &FirstI); if (!isa<UndefValue>(GV->getInitializer())) new StoreInst(GV->getInitializer(), Alloca, &FirstI); GV->replaceAllUsesWith(Alloca); GV->eraseFromParent(); ++NumLocalized; return true; } // If the global is never loaded (but may be stored to), it is dead. // Delete it now. if (!GS.IsLoaded) { DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); bool Changed; if (isLeakCheckerRoot(GV)) { // Delete any constant stores to the global. Changed = CleanupPointerRootUsers(GV, TLI); } else { // Delete any stores we can find to the global. We may not be able to // make it completely dead though. Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); } // If the global is dead now, delete it. if (GV->use_empty()) { GV->eraseFromParent(); ++NumDeleted; Changed = true; } return Changed; } else if (GS.StoredType <= GlobalStatus::InitializerStored) { DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); GV->setConstant(true); // Clean up any obviously simplifiable users now. CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); // If the global is dead now, just nuke it. if (GV->use_empty()) { DEBUG(dbgs() << " *** Marking constant allowed us to simplify " << "all users and delete global!\n"); GV->eraseFromParent(); ++NumDeleted; } ++NumMarked; return true; } else if (!GV->getInitializer()->getType()->isSingleValueType()) { const DataLayout &DL = GV->getParent()->getDataLayout(); if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) { GVI = FirstNewGV; // Don't skip the newly produced globals! return true; } } else if (GS.StoredType == GlobalStatus::StoredOnce) { // If the initial value for the global was an undef value, and if only // one other value was stored into it, we can just change the // initializer to be the stored value, then delete all stores to the // global. This allows us to mark it constant. if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) if (isa<UndefValue>(GV->getInitializer())) { // Change the initial value here. GV->setInitializer(SOVConstant); // Clean up any obviously simplifiable users now. CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); if (GV->use_empty()) { DEBUG(dbgs() << " *** Substituting initializer allowed us to " << "simplify all users and delete global!\n"); GV->eraseFromParent(); ++NumDeleted; } else { GVI = GV; } ++NumSubstitute; return true; } // Try to optimize globals based on the knowledge that only one value // (besides its initializer) is ever stored to the global. if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI, DL, TLI)) return true; // Otherwise, if the global was not a boolean, we can shrink it to be a // boolean. if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { if (GS.Ordering == NotAtomic) { if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { ++NumShrunkToBool; return true; } } } } return false; } /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified /// function, changing them to FastCC. static void ChangeCalleesToFastCall(Function *F) { for (User *U : F->users()) { if (isa<BlockAddress>(U)) continue; CallSite CS(cast<Instruction>(U)); CS.setCallingConv(CallingConv::Fast); } } static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) { for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { unsigned Index = Attrs.getSlotIndex(i); if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest)) continue; // There can be only one. return Attrs.removeAttribute(C, Index, Attribute::Nest); } return Attrs; } static void RemoveNestAttribute(Function *F) { F->setAttributes(StripNest(F->getContext(), F->getAttributes())); for (User *U : F->users()) { if (isa<BlockAddress>(U)) continue; CallSite CS(cast<Instruction>(U)); CS.setAttributes(StripNest(F->getContext(), CS.getAttributes())); } } /// Return true if this is a calling convention that we'd like to change. The /// idea here is that we don't want to mess with the convention if the user /// explicitly requested something with performance implications like coldcc, /// GHC, or anyregcc. static bool isProfitableToMakeFastCC(Function *F) { CallingConv::ID CC = F->getCallingConv(); // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? return CC == CallingConv::C || CC == CallingConv::X86_ThisCall; } bool GlobalOpt::OptimizeFunctions(Module &M) { bool Changed = false; // Optimize functions. for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { Function *F = FI++; // Functions without names cannot be referenced outside this module. if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage()) F->setLinkage(GlobalValue::InternalLinkage); const Comdat *C = F->getComdat(); bool inComdat = C && NotDiscardableComdats.count(C); F->removeDeadConstantUsers(); if ((!inComdat || F->hasLocalLinkage()) && F->isDefTriviallyDead()) { F->eraseFromParent(); Changed = true; ++NumFnDeleted; } else if (F->hasLocalLinkage()) { if (isProfitableToMakeFastCC(F) && !F->isVarArg() && !F->hasAddressTaken()) { // If this function has a calling convention worth changing, is not a // varargs function, and is only called directly, promote it to use the // Fast calling convention. F->setCallingConv(CallingConv::Fast); ChangeCalleesToFastCall(F); ++NumFastCallFns; Changed = true; } if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && !F->hasAddressTaken()) { // The function is not used by a trampoline intrinsic, so it is safe // to remove the 'nest' attribute. RemoveNestAttribute(F); ++NumNestRemoved; Changed = true; } } } return Changed; } bool GlobalOpt::OptimizeGlobalVars(Module &M) { bool Changed = false; for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); GVI != E; ) { GlobalVariable *GV = GVI++; // Global variables without names cannot be referenced outside this module. if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage()) GV->setLinkage(GlobalValue::InternalLinkage); // Simplify the initializer. if (GV->hasInitializer()) if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { auto &DL = M.getDataLayout(); Constant *New = ConstantFoldConstantExpression(CE, DL, TLI); if (New && New != CE) GV->setInitializer(New); } if (GV->isDiscardableIfUnused()) { if (const Comdat *C = GV->getComdat()) if (NotDiscardableComdats.count(C) && !GV->hasLocalLinkage()) continue; Changed |= ProcessGlobal(GV, GVI); } } return Changed; } static inline bool isSimpleEnoughValueToCommit(Constant *C, SmallPtrSetImpl<Constant *> &SimpleConstants, const DataLayout &DL); /// isSimpleEnoughValueToCommit - Return true if the specified constant can be /// handled by the code generator. We don't want to generate something like: /// void *X = &X/42; /// because the code generator doesn't have a relocation that can handle that. /// /// This function should be called if C was not found (but just got inserted) /// in SimpleConstants to avoid having to rescan the same constants all the /// time. static bool isSimpleEnoughValueToCommitHelper(Constant *C, SmallPtrSetImpl<Constant *> &SimpleConstants, const DataLayout &DL) { // Simple global addresses are supported, do not allow dllimport or // thread-local globals. if (auto *GV = dyn_cast<GlobalValue>(C)) return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal(); // Simple integer, undef, constant aggregate zero, etc are all supported. if (C->getNumOperands() == 0 || isa<BlockAddress>(C)) return true; // Aggregate values are safe if all their elements are. if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || isa<ConstantVector>(C)) { for (Value *Op : C->operands()) if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL)) return false; return true; } // We don't know exactly what relocations are allowed in constant expressions, // so we allow &global+constantoffset, which is safe and uniformly supported // across targets. ConstantExpr *CE = cast<ConstantExpr>(C); switch (CE->getOpcode()) { case Instruction::BitCast: // Bitcast is fine if the casted value is fine. return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); case Instruction::IntToPtr: case Instruction::PtrToInt: // int <=> ptr is fine if the int type is the same size as the // pointer type. if (DL.getTypeSizeInBits(CE->getType()) != DL.getTypeSizeInBits(CE->getOperand(0)->getType())) return false; return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); // GEP is fine if it is simple + constant offset. case Instruction::GetElementPtr: for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) if (!isa<ConstantInt>(CE->getOperand(i))) return false; return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); case Instruction::Add: // We allow simple+cst. if (!isa<ConstantInt>(CE->getOperand(1))) return false; return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); } return false; } static inline bool isSimpleEnoughValueToCommit(Constant *C, SmallPtrSetImpl<Constant *> &SimpleConstants, const DataLayout &DL) { // If we already checked this constant, we win. if (!SimpleConstants.insert(C).second) return true; // Check the constant. return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL); } /// isSimpleEnoughPointerToCommit - Return true if this constant is simple /// enough for us to understand. In particular, if it is a cast to anything /// other than from one pointer type to another pointer type, we punt. /// We basically just support direct accesses to globals and GEP's of /// globals. This should be kept up to date with CommitValueTo. static bool isSimpleEnoughPointerToCommit(Constant *C) { // Conservatively, avoid aggregate types. This is because we don't // want to worry about them partially overlapping other stores. if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) return false; if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) // Do not allow weak/*_odr/linkonce linkage or external globals. return GV->hasUniqueInitializer(); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { // Handle a constantexpr gep. if (CE->getOpcode() == Instruction::GetElementPtr && isa<GlobalVariable>(CE->getOperand(0)) && cast<GEPOperator>(CE)->isInBounds()) { GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or // external globals. if (!GV->hasUniqueInitializer()) return false; // The first index must be zero. ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin())); if (!CI || !CI->isZero()) return false; // The remaining indices must be compile-time known integers within the // notional bounds of the corresponding static array types. if (!CE->isGEPWithNoNotionalOverIndexing()) return false; return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); // A constantexpr bitcast from a pointer to another pointer is a no-op, // and we know how to evaluate it by moving the bitcast from the pointer // operand to the value operand. } else if (CE->getOpcode() == Instruction::BitCast && isa<GlobalVariable>(CE->getOperand(0))) { // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or // external globals. return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); } } return false; } /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global /// initializer. This returns 'Init' modified to reflect 'Val' stored into it. /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, ConstantExpr *Addr, unsigned OpNo) { // Base case of the recursion. if (OpNo == Addr->getNumOperands()) { assert(Val->getType() == Init->getType() && "Type mismatch!"); return Val; } SmallVector<Constant*, 32> Elts; if (StructType *STy = dyn_cast<StructType>(Init->getType())) { // Break up the constant into its elements. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) Elts.push_back(Init->getAggregateElement(i)); // Replace the element that we are supposed to. ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); unsigned Idx = CU->getZExtValue(); assert(Idx < STy->getNumElements() && "Struct index out of range!"); Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); // Return the modified struct. return ConstantStruct::get(STy, Elts); } ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); SequentialType *InitTy = cast<SequentialType>(Init->getType()); uint64_t NumElts; if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) NumElts = ATy->getNumElements(); else NumElts = InitTy->getVectorNumElements(); // Break up the array into elements. for (uint64_t i = 0, e = NumElts; i != e; ++i) Elts.push_back(Init->getAggregateElement(i)); assert(CI->getZExtValue() < NumElts); Elts[CI->getZExtValue()] = EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); if (Init->getType()->isArrayTy()) return ConstantArray::get(cast<ArrayType>(InitTy), Elts); return ConstantVector::get(Elts); } /// CommitValueTo - We have decided that Addr (which satisfies the predicate /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. static void CommitValueTo(Constant *Val, Constant *Addr) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { assert(GV->hasInitializer()); GV->setInitializer(Val); return; } ConstantExpr *CE = cast<ConstantExpr>(Addr); GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); } namespace { /// Evaluator - This class evaluates LLVM IR, producing the Constant /// representing each SSA instruction. Changes to global variables are stored /// in a mapping that can be iterated over after the evaluation is complete. /// Once an evaluation call fails, the evaluation object should not be reused. class Evaluator { public: Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI) : DL(DL), TLI(TLI) { ValueStack.emplace_back(); } ~Evaluator() { for (auto &Tmp : AllocaTmps) // If there are still users of the alloca, the program is doing something // silly, e.g. storing the address of the alloca somewhere and using it // later. Since this is undefined, we'll just make it be null. if (!Tmp->use_empty()) Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); } /// EvaluateFunction - Evaluate a call to function F, returning true if /// successful, false if we can't evaluate it. ActualArgs contains the formal /// arguments for the function. bool EvaluateFunction(Function *F, Constant *&RetVal, const SmallVectorImpl<Constant*> &ActualArgs); /// EvaluateBlock - Evaluate all instructions in block BB, returning true if /// successful, false if we can't evaluate it. NewBB returns the next BB that /// control flows into, or null upon return. bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB); Constant *getVal(Value *V) { if (Constant *CV = dyn_cast<Constant>(V)) return CV; Constant *R = ValueStack.back().lookup(V); assert(R && "Reference to an uncomputed value!"); return R; } void setVal(Value *V, Constant *C) { ValueStack.back()[V] = C; } const DenseMap<Constant*, Constant*> &getMutatedMemory() const { return MutatedMemory; } const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const { return Invariants; } private: Constant *ComputeLoadResult(Constant *P); /// ValueStack - As we compute SSA register values, we store their contents /// here. The back of the deque contains the current function and the stack /// contains the values in the calling frames. std::deque<DenseMap<Value*, Constant*>> ValueStack; /// CallStack - This is used to detect recursion. In pathological situations /// we could hit exponential behavior, but at least there is nothing /// unbounded. SmallVector<Function*, 4> CallStack; /// MutatedMemory - For each store we execute, we update this map. Loads /// check this to get the most up-to-date value. If evaluation is successful, /// this state is committed to the process. DenseMap<Constant*, Constant*> MutatedMemory; /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable /// to represent its body. This vector is needed so we can delete the /// temporary globals when we are done. SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps; /// Invariants - These global variables have been marked invariant by the /// static constructor. SmallPtrSet<GlobalVariable*, 8> Invariants; /// SimpleConstants - These are constants we have checked and know to be /// simple enough to live in a static initializer of a global. SmallPtrSet<Constant*, 8> SimpleConstants; const DataLayout &DL; const TargetLibraryInfo *TLI; }; } // anonymous namespace /// ComputeLoadResult - Return the value that would be computed by a load from /// P after the stores reflected by 'memory' have been performed. If we can't /// decide, return null. Constant *Evaluator::ComputeLoadResult(Constant *P) { // If this memory location has been recently stored, use the stored value: it // is the most up-to-date. DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P); if (I != MutatedMemory.end()) return I->second; // Access it. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { if (GV->hasDefinitiveInitializer()) return GV->getInitializer(); return nullptr; } // Handle a constantexpr getelementptr. if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) if (CE->getOpcode() == Instruction::GetElementPtr && isa<GlobalVariable>(CE->getOperand(0))) { GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); if (GV->hasDefinitiveInitializer()) return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); } return nullptr; // don't know how to evaluate. } /// EvaluateBlock - Evaluate all instructions in block BB, returning true if /// successful, false if we can't evaluate it. NewBB returns the next BB that /// control flows into, or null upon return. bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB) { // This is the main evaluation loop. while (1) { Constant *InstResult = nullptr; DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { if (!SI->isSimple()) { DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); return false; // no volatile/atomic accesses. } Constant *Ptr = getVal(SI->getOperand(1)); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); Ptr = ConstantFoldConstantExpression(CE, DL, TLI); DEBUG(dbgs() << "; To: " << *Ptr << "\n"); } if (!isSimpleEnoughPointerToCommit(Ptr)) { // If this is too complex for us to commit, reject it. DEBUG(dbgs() << "Pointer is too complex for us to evaluate store."); return false; } Constant *Val = getVal(SI->getOperand(0)); // If this might be too difficult for the backend to handle (e.g. the addr // of one global variable divided by another) then we can't commit it. if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) { DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val << "\n"); return false; } if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { if (CE->getOpcode() == Instruction::BitCast) { DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n"); // If we're evaluating a store through a bitcast, then we need // to pull the bitcast off the pointer type and push it onto the // stored value. Ptr = CE->getOperand(0); Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType(); // In order to push the bitcast onto the stored value, a bitcast // from NewTy to Val's type must be legal. If it's not, we can try // introspecting NewTy to find a legal conversion. while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { // If NewTy is a struct, we can convert the pointer to the struct // into a pointer to its first member. // FIXME: This could be extended to support arrays as well. if (StructType *STy = dyn_cast<StructType>(NewTy)) { NewTy = STy->getTypeAtIndex(0U); IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32); Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); Constant * const IdxList[] = {IdxZero, IdxZero}; Ptr = ConstantExpr::getGetElementPtr(nullptr, Ptr, IdxList); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) Ptr = ConstantFoldConstantExpression(CE, DL, TLI); // If we can't improve the situation by introspecting NewTy, // we have to give up. } else { DEBUG(dbgs() << "Failed to bitcast constant ptr, can not " "evaluate.\n"); return false; } } // If we found compatible types, go ahead and push the bitcast // onto the stored value. Val = ConstantExpr::getBitCast(Val, NewTy); DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n"); } } MutatedMemory[Ptr] = Val; } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { InstResult = ConstantExpr::get(BO->getOpcode(), getVal(BO->getOperand(0)), getVal(BO->getOperand(1))); DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult << "\n"); } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { InstResult = ConstantExpr::getCompare(CI->getPredicate(), getVal(CI->getOperand(0)), getVal(CI->getOperand(1))); DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult << "\n"); } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { InstResult = ConstantExpr::getCast(CI->getOpcode(), getVal(CI->getOperand(0)), CI->getType()); DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult << "\n"); } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), getVal(SI->getOperand(1)), getVal(SI->getOperand(2))); DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult << "\n"); } else if (auto *EVI = dyn_cast<ExtractValueInst>(CurInst)) { InstResult = ConstantExpr::getExtractValue( getVal(EVI->getAggregateOperand()), EVI->getIndices()); DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " << *InstResult << "\n"); } else if (auto *IVI = dyn_cast<InsertValueInst>(CurInst)) { InstResult = ConstantExpr::getInsertValue( getVal(IVI->getAggregateOperand()), getVal(IVI->getInsertedValueOperand()), IVI->getIndices()); DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " << *InstResult << "\n"); } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { Constant *P = getVal(GEP->getOperand(0)); SmallVector<Constant*, 8> GEPOps; for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; ++i) GEPOps.push_back(getVal(*i)); InstResult = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), P, GEPOps, cast<GEPOperator>(GEP)->isInBounds()); DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult << "\n"); } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { if (!LI->isSimple()) { DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); return false; // no volatile/atomic accesses. } Constant *Ptr = getVal(LI->getOperand(0)); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { Ptr = ConstantFoldConstantExpression(CE, DL, TLI); DEBUG(dbgs() << "Found a constant pointer expression, constant " "folding: " << *Ptr << "\n"); } InstResult = ComputeLoadResult(Ptr); if (!InstResult) { DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load." "\n"); return false; // Could not evaluate load. } DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { if (AI->isArrayAllocation()) { DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); return false; // Cannot handle array allocs. } Type *Ty = AI->getType()->getElementType(); AllocaTmps.push_back( make_unique<GlobalVariable>(Ty, false, GlobalValue::InternalLinkage, UndefValue::get(Ty), AI->getName())); InstResult = AllocaTmps.back().get(); DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { CallSite CS(CurInst); // Debug info can safely be ignored here. if (isa<DbgInfoIntrinsic>(CS.getInstruction())) { DEBUG(dbgs() << "Ignoring debug info.\n"); ++CurInst; continue; } // Cannot handle inline asm. if (isa<InlineAsm>(CS.getCalledValue())) { DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); return false; } if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { if (MSI->isVolatile()) { DEBUG(dbgs() << "Can not optimize a volatile memset " << "intrinsic.\n"); return false; } Constant *Ptr = getVal(MSI->getDest()); Constant *Val = getVal(MSI->getValue()); Constant *DestVal = ComputeLoadResult(getVal(Ptr)); if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { // This memset is a no-op. DEBUG(dbgs() << "Ignoring no-op memset.\n"); ++CurInst; continue; } } if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) { DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); ++CurInst; continue; } if (II->getIntrinsicID() == Intrinsic::invariant_start) { // We don't insert an entry into Values, as it doesn't have a // meaningful return value. if (!II->use_empty()) { DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n"); return false; } ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); Value *PtrArg = getVal(II->getArgOperand(1)); Value *Ptr = PtrArg->stripPointerCasts(); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { Type *ElemTy = cast<PointerType>(GV->getType())->getElementType(); if (!Size->isAllOnesValue() && Size->getValue().getLimitedValue() >= DL.getTypeStoreSize(ElemTy)) { Invariants.insert(GV); DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV << "\n"); } else { DEBUG(dbgs() << "Found a global var, but can not treat it as an " "invariant.\n"); } } // Continue even if we do nothing. ++CurInst; continue; } DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n"); return false; } // Resolve function pointers. Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue())); if (!Callee || Callee->mayBeOverridden()) { DEBUG(dbgs() << "Can not resolve function pointer.\n"); return false; // Cannot resolve. } SmallVector<Constant*, 8> Formals; for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) Formals.push_back(getVal(*i)); if (Callee->isDeclaration()) { // If this is a function we can constant fold, do it. if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) { InstResult = C; DEBUG(dbgs() << "Constant folded function call. Result: " << *InstResult << "\n"); } else { DEBUG(dbgs() << "Can not constant fold function call.\n"); return false; } } else { if (Callee->getFunctionType()->isVarArg()) { DEBUG(dbgs() << "Can not constant fold vararg function call.\n"); return false; } Constant *RetVal = nullptr; // Execute the call, if successful, use the return value. ValueStack.emplace_back(); if (!EvaluateFunction(Callee, RetVal, Formals)) { DEBUG(dbgs() << "Failed to evaluate function.\n"); return false; } ValueStack.pop_back(); InstResult = RetVal; if (InstResult) { DEBUG(dbgs() << "Successfully evaluated function. Result: " << InstResult << "\n\n"); } else { DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n"); } } } else if (isa<TerminatorInst>(CurInst)) { DEBUG(dbgs() << "Found a terminator instruction.\n"); if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { if (BI->isUnconditional()) { NextBB = BI->getSuccessor(0); } else { ConstantInt *Cond = dyn_cast<ConstantInt>(getVal(BI->getCondition())); if (!Cond) return false; // Cannot determine. NextBB = BI->getSuccessor(!Cond->getZExtValue()); } } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { ConstantInt *Val = dyn_cast<ConstantInt>(getVal(SI->getCondition())); if (!Val) return false; // Cannot determine. NextBB = SI->findCaseValue(Val).getCaseSuccessor(); } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { Value *Val = getVal(IBI->getAddress())->stripPointerCasts(); if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) NextBB = BA->getBasicBlock(); else return false; // Cannot determine. } else if (isa<ReturnInst>(CurInst)) { NextBB = nullptr; } else { // invoke, unwind, resume, unreachable. DEBUG(dbgs() << "Can not handle terminator."); return false; // Cannot handle this terminator. } // We succeeded at evaluating this block! DEBUG(dbgs() << "Successfully evaluated block.\n"); return true; } else { // Did not know how to evaluate this! DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction." "\n"); return false; } if (!CurInst->use_empty()) { if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) InstResult = ConstantFoldConstantExpression(CE, DL, TLI); setVal(CurInst, InstResult); } // If we just processed an invoke, we finished evaluating the block. if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { NextBB = II->getNormalDest(); DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); return true; } // Advance program counter. ++CurInst; } } /// EvaluateFunction - Evaluate a call to function F, returning true if /// successful, false if we can't evaluate it. ActualArgs contains the formal /// arguments for the function. bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, const SmallVectorImpl<Constant*> &ActualArgs) { // Check to see if this function is already executing (recursion). If so, // bail out. TODO: we might want to accept limited recursion. if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) return false; CallStack.push_back(F); // Initialize arguments to the incoming values specified. unsigned ArgNo = 0; for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI, ++ArgNo) setVal(AI, ActualArgs[ArgNo]); // ExecutedBlocks - We only handle non-looping, non-recursive code. As such, // we can only evaluate any one basic block at most once. This set keeps // track of what we have executed so we can detect recursive cases etc. SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; // CurBB - The current basic block we're evaluating. BasicBlock *CurBB = F->begin(); BasicBlock::iterator CurInst = CurBB->begin(); while (1) { BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings. DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); if (!EvaluateBlock(CurInst, NextBB)) return false; if (!NextBB) { // Successfully running until there's no next block means that we found // the return. Fill it the return value and pop the call stack. ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator()); if (RI->getNumOperands()) RetVal = getVal(RI->getOperand(0)); CallStack.pop_back(); return true; } // Okay, we succeeded in evaluating this control flow. See if we have // executed the new block before. If so, we have a looping function, // which we cannot evaluate in reasonable time. if (!ExecutedBlocks.insert(NextBB).second) return false; // looped! // Okay, we have never been in this block before. Check to see if there // are any PHI nodes. If so, evaluate them with information about where // we came from. PHINode *PN = nullptr; for (CurInst = NextBB->begin(); (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB))); // Advance to the next block. CurBB = NextBB; } } /// EvaluateStaticConstructor - Evaluate static constructors in the function, if /// we can. Return true if we can, false otherwise. static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL, const TargetLibraryInfo *TLI) { // Call the function. Evaluator Eval(DL, TLI); Constant *RetValDummy; bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, SmallVector<Constant*, 0>()); if (EvalSuccess) { ++NumCtorsEvaluated; // We succeeded at evaluation: commit the result. DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" << F->getName() << "' to " << Eval.getMutatedMemory().size() << " stores.\n"); for (DenseMap<Constant*, Constant*>::const_iterator I = Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end(); I != E; ++I) CommitValueTo(I->second, I->first); for (GlobalVariable *GV : Eval.getInvariants()) GV->setConstant(true); } return EvalSuccess; } // HLSL Change: changed calling convention to __cdecl static int __cdecl compareNames(Constant *const *A, Constant *const *B) { return (*A)->stripPointerCasts()->getName().compare( (*B)->stripPointerCasts()->getName()); } static void setUsedInitializer(GlobalVariable &V, const SmallPtrSet<GlobalValue *, 8> &Init) { if (Init.empty()) { V.eraseFromParent(); return; } // Type of pointer to the array of pointers. PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); SmallVector<llvm::Constant *, 8> UsedArray; for (GlobalValue *GV : Init) { Constant *Cast = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); UsedArray.push_back(Cast); } // Sort to get deterministic order. array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); Module *M = V.getParent(); V.removeFromParent(); GlobalVariable *NV = new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage, llvm::ConstantArray::get(ATy, UsedArray), ""); NV->takeName(&V); NV->setSection("llvm.metadata"); delete &V; } namespace { /// \brief An easy to access representation of llvm.used and llvm.compiler.used. class LLVMUsed { SmallPtrSet<GlobalValue *, 8> Used; SmallPtrSet<GlobalValue *, 8> CompilerUsed; GlobalVariable *UsedV; GlobalVariable *CompilerUsedV; public: LLVMUsed(Module &M) { UsedV = collectUsedGlobalVariables(M, Used, false); CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); } typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator; typedef iterator_range<iterator> used_iterator_range; iterator usedBegin() { return Used.begin(); } iterator usedEnd() { return Used.end(); } used_iterator_range used() { return used_iterator_range(usedBegin(), usedEnd()); } iterator compilerUsedBegin() { return CompilerUsed.begin(); } iterator compilerUsedEnd() { return CompilerUsed.end(); } used_iterator_range compilerUsed() { return used_iterator_range(compilerUsedBegin(), compilerUsedEnd()); } bool usedCount(GlobalValue *GV) const { return Used.count(GV); } bool compilerUsedCount(GlobalValue *GV) const { return CompilerUsed.count(GV); } bool usedErase(GlobalValue *GV) { return Used.erase(GV); } bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; } bool compilerUsedInsert(GlobalValue *GV) { return CompilerUsed.insert(GV).second; } void syncVariablesAndSets() { if (UsedV) setUsedInitializer(*UsedV, Used); if (CompilerUsedV) setUsedInitializer(*CompilerUsedV, CompilerUsed); } }; } static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { if (GA.use_empty()) // No use at all. return false; assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && "We should have removed the duplicated " "element from llvm.compiler.used"); if (!GA.hasOneUse()) // Strictly more than one use. So at least one is not in llvm.used and // llvm.compiler.used. return true; // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); } static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, const LLVMUsed &U) { unsigned N = 2; assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && "We should have removed the duplicated " "element from llvm.compiler.used"); if (U.usedCount(&V) || U.compilerUsedCount(&V)) ++N; return V.hasNUsesOrMore(N); } static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { if (!GA.hasLocalLinkage()) return true; return U.usedCount(&GA) || U.compilerUsedCount(&GA); } static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U, bool &RenameTarget) { RenameTarget = false; bool Ret = false; if (hasUseOtherThanLLVMUsed(GA, U)) Ret = true; // If the alias is externally visible, we may still be able to simplify it. if (!mayHaveOtherReferences(GA, U)) return Ret; // If the aliasee has internal linkage, give it the name and linkage // of the alias, and delete the alias. This turns: // define internal ... @f(...) // @a = alias ... @f // into: // define ... @a(...) Constant *Aliasee = GA.getAliasee(); GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); if (!Target->hasLocalLinkage()) return Ret; // Do not perform the transform if multiple aliases potentially target the // aliasee. This check also ensures that it is safe to replace the section // and other attributes of the aliasee with those of the alias. if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) return Ret; RenameTarget = true; return true; } bool GlobalOpt::OptimizeGlobalAliases(Module &M) { bool Changed = false; LLVMUsed Used(M); for (GlobalValue *GV : Used.used()) Used.compilerUsedErase(GV); for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); I != E;) { Module::alias_iterator J = I++; // Aliases without names cannot be referenced outside this module. if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage()) J->setLinkage(GlobalValue::InternalLinkage); // If the aliasee may change at link time, nothing can be done - bail out. if (J->mayBeOverridden()) continue; Constant *Aliasee = J->getAliasee(); GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts()); // We can't trivially replace the alias with the aliasee if the aliasee is // non-trivial in some way. // TODO: Try to handle non-zero GEPs of local aliasees. if (!Target) continue; Target->removeDeadConstantUsers(); // Make all users of the alias use the aliasee instead. bool RenameTarget; if (!hasUsesToReplace(*J, Used, RenameTarget)) continue; J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType())); ++NumAliasesResolved; Changed = true; if (RenameTarget) { // Give the aliasee the name, linkage and other attributes of the alias. Target->takeName(J); Target->setLinkage(J->getLinkage()); Target->setVisibility(J->getVisibility()); Target->setDLLStorageClass(J->getDLLStorageClass()); if (Used.usedErase(J)) Used.usedInsert(Target); if (Used.compilerUsedErase(J)) Used.compilerUsedInsert(Target); } else if (mayHaveOtherReferences(*J, Used)) continue; // Delete the alias. M.getAliasList().erase(J); ++NumAliasesRemoved; Changed = true; } Used.syncVariablesAndSets(); return Changed; } static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::cxa_atexit)) return nullptr; Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit)); if (!Fn) return nullptr; FunctionType *FTy = Fn->getFunctionType(); // Checking that the function has the right return type, the right number of // parameters and that they all have pointer types should be enough. if (!FTy->getReturnType()->isIntegerTy() || FTy->getNumParams() != 3 || !FTy->getParamType(0)->isPointerTy() || !FTy->getParamType(1)->isPointerTy() || !FTy->getParamType(2)->isPointerTy()) return nullptr; return Fn; } /// cxxDtorIsEmpty - Returns whether the given function is an empty C++ /// destructor and can therefore be eliminated. /// Note that we assume that other optimization passes have already simplified /// the code so we only look for a function with a single basic block, where /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and /// other side-effect free instructions. static bool cxxDtorIsEmpty(const Function &Fn, SmallPtrSet<const Function *, 8> &CalledFunctions) { // FIXME: We could eliminate C++ destructors if they're readonly/readnone and // nounwind, but that doesn't seem worth doing. if (Fn.isDeclaration()) return false; if (++Fn.begin() != Fn.end()) return false; const BasicBlock &EntryBlock = Fn.getEntryBlock(); for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); I != E; ++I) { if (const CallInst *CI = dyn_cast<CallInst>(I)) { // Ignore debug intrinsics. if (isa<DbgInfoIntrinsic>(CI)) continue; const Function *CalledFn = CI->getCalledFunction(); if (!CalledFn) return false; SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); // Don't treat recursive functions as empty. if (!NewCalledFunctions.insert(CalledFn).second) return false; if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) return false; } else if (isa<ReturnInst>(*I)) return true; // We're done. else if (I->mayHaveSideEffects()) return false; // Destructor with side effects, bail. } return false; } bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { /// Itanium C++ ABI p3.3.5: /// /// After constructing a global (or local static) object, that will require /// destruction on exit, a termination function is registered as follows: /// /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); /// /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the /// call f(p) when DSO d is unloaded, before all such termination calls /// registered before this one. It returns zero if registration is /// successful, nonzero on failure. // This pass will look for calls to __cxa_atexit where the function is trivial // and remove them. bool Changed = false; for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end(); I != E;) { // We're only interested in calls. Theoretically, we could handle invoke // instructions as well, but neither llvm-gcc nor clang generate invokes // to __cxa_atexit. CallInst *CI = dyn_cast<CallInst>(*I++); if (!CI) continue; Function *DtorFn = dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); if (!DtorFn) continue; SmallPtrSet<const Function *, 8> CalledFunctions; if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) continue; // Just remove the call. CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); CI->eraseFromParent(); ++NumCXXDtorsRemoved; Changed |= true; } return Changed; } bool GlobalOpt::runOnModule(Module &M) { bool Changed = false; auto &DL = M.getDataLayout(); TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); bool LocalChange = true; while (LocalChange) { LocalChange = false; NotDiscardableComdats.clear(); for (const GlobalVariable &GV : M.globals()) if (const Comdat *C = GV.getComdat()) if (!GV.isDiscardableIfUnused() || !GV.use_empty()) NotDiscardableComdats.insert(C); for (Function &F : M) if (const Comdat *C = F.getComdat()) if (!F.isDefTriviallyDead()) NotDiscardableComdats.insert(C); for (GlobalAlias &GA : M.aliases()) if (const Comdat *C = GA.getComdat()) if (!GA.isDiscardableIfUnused() || !GA.use_empty()) NotDiscardableComdats.insert(C); // Delete functions that are trivially dead, ccc -> fastcc LocalChange |= OptimizeFunctions(M); // Optimize global_ctors list. LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) { return EvaluateStaticConstructor(F, DL, TLI); }); // Optimize non-address-taken globals. LocalChange |= OptimizeGlobalVars(M); // Resolve aliases, when possible. LocalChange |= OptimizeGlobalAliases(M); // Try to remove trivial global destructors if they are not removed // already. Function *CXAAtExitFn = FindCXAAtExit(M, TLI); if (CXAAtExitFn) LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); Changed |= LocalChange; } // TODO: Move all global ctors functions to the end of the module for code // layout. return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/ExtractGV.cpp
//===-- ExtractGV.cpp - Global Value extraction pass ----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass extracts global values // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/SetVector.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include <algorithm> using namespace llvm; /// Make sure GV is visible from both modules. Delete is true if it is /// being deleted from this module. /// This also makes sure GV cannot be dropped so that references from /// the split module remain valid. static void makeVisible(GlobalValue &GV, bool Delete) { bool Local = GV.hasLocalLinkage(); if (Local || Delete) { GV.setLinkage(GlobalValue::ExternalLinkage); if (Local) GV.setVisibility(GlobalValue::HiddenVisibility); return; } if (!GV.hasLinkOnceLinkage()) { assert(!GV.isDiscardableIfUnused()); return; } // Map linkonce* to weak* so that llvm doesn't drop this GV. switch(GV.getLinkage()) { default: llvm_unreachable("Unexpected linkage"); case GlobalValue::LinkOnceAnyLinkage: GV.setLinkage(GlobalValue::WeakAnyLinkage); return; case GlobalValue::LinkOnceODRLinkage: GV.setLinkage(GlobalValue::WeakODRLinkage); return; } } namespace { /// @brief A pass to extract specific functions and their dependencies. class GVExtractorPass : public ModulePass { SetVector<GlobalValue *> Named; bool deleteStuff; public: static char ID; // Pass identification, replacement for typeid /// FunctionExtractorPass - If deleteFn is true, this pass deletes as the /// specified function. Otherwise, it deletes as much of the module as /// possible, except for the function specified. /// explicit GVExtractorPass(std::vector<GlobalValue*>& GVs, bool deleteS = true) : ModulePass(ID), Named(GVs.begin(), GVs.end()), deleteStuff(deleteS) {} bool runOnModule(Module &M) override { // Visit the global inline asm. if (!deleteStuff) M.setModuleInlineAsm(""); // For simplicity, just give all GlobalValues ExternalLinkage. A trickier // implementation could figure out which GlobalValues are actually // referenced by the Named set, and which GlobalValues in the rest of // the module are referenced by the NamedSet, and get away with leaving // more internal and private things internal and private. But for now, // be conservative and simple. // Visit the GlobalVariables. for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { bool Delete = deleteStuff == (bool)Named.count(I) && !I->isDeclaration(); if (!Delete) { if (I->hasAvailableExternallyLinkage()) continue; if (I->getName() == "llvm.global_ctors") continue; } makeVisible(*I, Delete); if (Delete) { // Make this a declaration and drop it's comdat. I->setInitializer(nullptr); I->setComdat(nullptr); } } // Visit the Functions. for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { bool Delete = deleteStuff == (bool)Named.count(I) && !I->isDeclaration(); if (!Delete) { if (I->hasAvailableExternallyLinkage()) continue; } makeVisible(*I, Delete); if (Delete) { // Make this a declaration and drop it's comdat. I->deleteBody(); I->setComdat(nullptr); } } // Visit the Aliases. for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); I != E;) { Module::alias_iterator CurI = I; ++I; bool Delete = deleteStuff == (bool)Named.count(CurI); makeVisible(*CurI, Delete); if (Delete) { Type *Ty = CurI->getType()->getElementType(); CurI->removeFromParent(); llvm::Value *Declaration; if (FunctionType *FTy = dyn_cast<FunctionType>(Ty)) { Declaration = Function::Create(FTy, GlobalValue::ExternalLinkage, CurI->getName(), &M); } else { Declaration = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, nullptr, CurI->getName()); } CurI->replaceAllUsesWith(Declaration); delete CurI; } } return true; } }; char GVExtractorPass::ID = 0; } ModulePass *llvm::createGVExtractionPass(std::vector<GlobalValue *> &GVs, bool deleteFn) { return new GVExtractorPass(GVs, deleteFn); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/PassManagerBuilder.cpp
//===- PassManagerBuilder.cpp - Build Standard Pass -----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PassManagerBuilder class, which is used to set up a // "standard" optimization sequence suitable for languages like C and C++. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO/PassManagerBuilder.h" #include "llvm-c/Transforms/PassManagerBuilder.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/Passes.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Verifier.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Vectorize.h" #include "dxc/HLSL/DxilGenerationPass.h" // HLSL Change #include "dxc/HLSL/HLMatrixLowerPass.h" // HLSL Change #include "dxc/HLSL/ComputeViewIdState.h" // HLSL Change #include "llvm/Analysis/DxilValueCache.h" // HLSL Change using namespace llvm; #if HLSL_VECTORIZATION_ENABLED // HLSL Change - don't build vectorization passes static cl::opt<bool> RunLoopVectorization("vectorize-loops", cl::Hidden, cl::desc("Run the Loop vectorization passes")); static cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::Hidden, cl::desc("Run the SLP vectorization passes")); static cl::opt<bool> RunBBVectorization("vectorize-slp-aggressive", cl::Hidden, cl::desc("Run the BB vectorization passes")); static cl::opt<bool> UseGVNAfterVectorization("use-gvn-after-vectorization", cl::init(false), cl::Hidden, cl::desc("Run GVN instead of Early CSE after vectorization passes")); static cl::opt<bool> ExtraVectorizerPasses( "extra-vectorizer-passes", cl::init(false), cl::Hidden, cl::desc("Run cleanup optimization passes after vectorization.")); static cl::opt<bool> UseNewSROA("use-new-sroa", cl::init(true), cl::Hidden, cl::desc("Enable the new, experimental SROA pass")); static cl::opt<bool> RunLoopRerolling("reroll-loops", cl::Hidden, cl::desc("Run the loop rerolling pass")); static cl::opt<bool> RunFloat2Int("float-to-int", cl::Hidden, cl::init(true), cl::desc("Run the float2int (float demotion) pass")); static cl::opt<bool> RunLoadCombine("combine-loads", cl::init(false), cl::Hidden, cl::desc("Run the load combining pass")); static cl::opt<bool> RunSLPAfterLoopVectorization("run-slp-after-loop-vectorization", cl::init(true), cl::Hidden, cl::desc("Run the SLP vectorizer (and BB vectorizer) after the Loop " "vectorizer instead of before")); static cl::opt<bool> UseCFLAA("use-cfl-aa", cl::init(false), cl::Hidden, cl::desc("Enable the new, experimental CFL alias analysis")); static cl::opt<bool> EnableMLSM("mlsm", cl::init(true), cl::Hidden, cl::desc("Enable motion of merged load and store")); static cl::opt<bool> EnableLoopInterchange( "enable-loopinterchange", cl::init(false), cl::Hidden, cl::desc("Enable the new, experimental LoopInterchange Pass")); static cl::opt<bool> EnableLoopDistribute( "enable-loop-distribute", cl::init(false), cl::Hidden, cl::desc("Enable the new, experimental LoopDistribution Pass")); #else // Don't declare the 'false' counterparts - simply avoid altogether. static const bool UseNewSROA = true; static const bool RunLoopRerolling = false; static const bool RunFloat2Int = true; static const bool RunLoadCombine = false; #if HLSL_VECTORIZATION_ENABLED // HLSL Change - don't build vectorization passes static const bool RunSLPAfterLoopVectorization = true; #endif // HLSL Change static const bool UseCFLAA = false; static const bool EnableMLSM = true; static const bool EnableLoopInterchange = false; static const bool EnableLoopDistribute = false; #endif // HLSL Change - don't build vectorization passes PassManagerBuilder::PassManagerBuilder() { OptLevel = 2; SizeLevel = 0; LibraryInfo = nullptr; Inliner = nullptr; DisableUnitAtATime = false; DisableUnrollLoops = false; #if HLSL_VECTORIZATION_ENABLED // HLSL Change - don't build vectorization passes BBVectorize = RunBBVectorization; SLPVectorize = RunSLPVectorization; LoopVectorize = RunLoopVectorization; #else BBVectorize = SLPVectorize = LoopVectorize = false; #endif RerollLoops = RunLoopRerolling; LoadCombine = RunLoadCombine; DisableGVNLoadPRE = false; VerifyInput = false; VerifyOutput = false; MergeFunctions = false; PrepareForLTO = false; } PassManagerBuilder::~PassManagerBuilder() { delete LibraryInfo; delete Inliner; } #if 0 // HLSL Change Starts - no global extensions /// Set of global extensions, automatically added as part of the standard set. static ManagedStatic<SmallVector<std::pair<PassManagerBuilder::ExtensionPointTy, PassManagerBuilder::ExtensionFn>, 8> > GlobalExtensions; #endif // HLSL Change Ends #if 0 // HLSL Change Starts - no global extensions void PassManagerBuilder::addGlobalExtension( PassManagerBuilder::ExtensionPointTy Ty, PassManagerBuilder::ExtensionFn Fn) { GlobalExtensions->push_back(std::make_pair(Ty, Fn)); } #endif // HLSL Change Ends void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) { Extensions.push_back(std::make_pair(Ty, Fn)); } void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy, legacy::PassManagerBase &PM) const { #if 0 // HLSL Change Starts - no global extensions for (unsigned i = 0, e = GlobalExtensions->size(); i != e; ++i) if ((*GlobalExtensions)[i].first == ETy) (*GlobalExtensions)[i].second(*this, PM); for (unsigned i = 0, e = Extensions.size(); i != e; ++i) if (Extensions[i].first == ETy) Extensions[i].second(*this, PM); #endif // HLSL Change Ends } void PassManagerBuilder::addInitialAliasAnalysisPasses( legacy::PassManagerBase &PM) const { // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that // BasicAliasAnalysis wins if they disagree. This is intended to help // support "obvious" type-punning idioms. if (UseCFLAA) PM.add(createCFLAliasAnalysisPass()); PM.add(createTypeBasedAliasAnalysisPass()); PM.add(createScopedNoAliasAAPass()); PM.add(createBasicAliasAnalysisPass()); } void PassManagerBuilder::populateFunctionPassManager( legacy::FunctionPassManager &FPM) { addExtensionsToPM(EP_EarlyAsPossible, FPM); // Add LibraryInfo if we have some. if (LibraryInfo) FPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo)); if (OptLevel == 0) return; addInitialAliasAnalysisPasses(FPM); FPM.add(createCFGSimplificationPass()); // HLSL Change - don't run SROA. // HLSL uses special SROA added in addHLSLPasses. if (HLSLHighLevel) { // HLSL Change if (UseNewSROA) FPM.add(createSROAPass()); else FPM.add(createScalarReplAggregatesPass()); } // HLSL Change. FPM.add(createEarlyCSEPass()); FPM.add(createLowerExpectIntrinsicPass()); } // HLSL Change Starts void PassManagerBuilder::addHLSLPasses(legacy::PassManagerBase &MPM) { // Don't do any lowering if we're targeting high-level. if (HLSLHighLevel) { MPM.add(createHLEmitMetadataPass()); return; } MPM.add(createDxilCleanupAddrSpaceCastPass()); MPM.add(createHLPreprocessPass()); bool NoOpt = OptLevel == 0; if (!NoOpt) { MPM.add(createHLDeadFunctionEliminationPass()); } // Do this before scalarrepl-param-hlsl for opportunities to move things // like resource arrays to alloca, allowing more likely memcpy replacement. MPM.add(createLowerStaticGlobalIntoAlloca()); // Expand buffer store intrinsics before we SROA MPM.add(createHLExpandStoreIntrinsicsPass()); // Split struct and array of parameter. MPM.add(createSROA_Parameter_HLSL()); MPM.add(createHLMatrixLowerPass()); // DCE should after SROA to remove unused element. MPM.add(createDeadCodeEliminationPass()); MPM.add(createGlobalDCEPass()); if (NoOpt) { // If not run mem2reg, try to promote allocas used by EvalOperations. // Do this before change vector to array. MPM.add(createDxilLegalizeEvalOperationsPass()); } // This should go between matrix lower and dynamic indexing vector to array, // because matrix lower may create dynamically indexed global vectors, // which should become locals. If they are turned into arrays first, // this pass will ignore them as it only works on scalars and vectors. MPM.add(createLowerStaticGlobalIntoAlloca()); // Change dynamic indexing vector to array. MPM.add(createDynamicIndexingVectorToArrayPass(false /* ReplaceAllVector */)); // Rotate the loops before, mem2reg, since it messes up dbg.value's MPM.add(createLoopRotatePass()); // mem2reg // Special Mem2Reg pass that skips precise marker. MPM.add(createDxilConditionalMem2RegPass(NoOpt)); MPM.add(createDxilDeleteRedundantDebugValuesPass()); // Remove unneeded dxbreak conditionals MPM.add(createCleanupDxBreakPass()); if (!NoOpt) { MPM.add(createDxilConvergentMarkPass()); // Clean up inefficiencies that can cause unnecessary live values related to // lifetime marker cleanup blocks. This is the earliest possible location // without interfering with HLSL-specific lowering. // Partial lifetime markers don't have cleanup blocks, so these passes are // unnecessary. if (HLSLEnableLifetimeMarkers && !HLSLEnablePartialLifetimeMarkers) { MPM.add(createSROAPass()); MPM.add(createSimplifyInstPass()); MPM.add(createJumpThreadingPass()); } } if (!NoOpt) MPM.add(createSimplifyInstPass()); if (!NoOpt) MPM.add(createCFGSimplificationPass()); MPM.add(createDxilPromoteLocalResources()); MPM.add(createDxilPromoteStaticResources()); // Verify no undef resource again after promotion MPM.add(createInvalidateUndefResourcesPass()); MPM.add(createDxilGenerationPass(NoOpt, this->HLSLExtensionsCodeGen)); // Propagate precise attribute. MPM.add(createDxilPrecisePropagatePass()); if (!NoOpt) MPM.add(createSimplifyInstPass()); // scalarize vector to scalar MPM.add(createScalarizerPass(!NoOpt /* AllowFolding */)); // Remove vector instructions MPM.add(createDxilEliminateVectorPass()); // Passes to handle [unroll] // Needs to happen after SROA since loop count may depend on // struct members. // Needs to happen before resources are lowered and before HL // module is gone. MPM.add(createDxilLoopUnrollPass(1024, HLSLOnlyWarnOnUnrollFail, StructurizeLoopExitsForUnroll)); // Default unroll pass. This is purely for optimizing loops without // attributes. if (OptLevel > 2) { MPM.add(createLoopUnrollPass(-1, -1, -1, -1, StructurizeLoopExitsForUnroll)); } if (!NoOpt) MPM.add(createSimplifyInstPass()); if (!NoOpt) MPM.add(createCFGSimplificationPass()); MPM.add(createDeadCodeEliminationPass()); if (OptLevel > 0) { MPM.add(createDxilFixConstArrayInitializerPass()); } } // HLSL Change Ends void PassManagerBuilder::populateModulePassManager( legacy::PassManagerBase &MPM) { // If all optimizations are disabled, just run the always-inline pass and, // if enabled, the function merging pass. if (OptLevel == 0) { if (!HLSLHighLevel) { MPM.add(createHLEnsureMetadataPass()); // HLSL Change - rehydrate metadata from high-level codegen } MPM.add(createDxilRewriteOutputArgDebugInfoPass()); // Fix output argument types. if (!HLSLHighLevel) if (HLSLEnableDebugNops) MPM.add(createDxilInsertPreservesPass(HLSLAllowPreserveValues)); // HLSL Change - insert preserve instructions if (Inliner) { MPM.add(createHLLegalizeParameter()); // HLSL Change - legalize parameters // before inline. MPM.add(Inliner); Inliner = nullptr; } // FIXME: The BarrierNoopPass is a HACK! The inliner pass above implicitly // creates a CGSCC pass manager, but we don't want to add extensions into // that pass manager. To prevent this we insert a no-op module pass to reset // the pass manager to get the same behavior as EP_OptimizerLast in non-O0 // builds. The function merging pass is if (MergeFunctions) MPM.add(createMergeFunctionsPass()); else if (!Extensions.empty()) // HLSL Change - GlobalExtensions not considered MPM.add(createBarrierNoopPass()); if (!HLSLHighLevel) MPM.add(createDxilPreserveToSelectPass()); // HLSL Change - lower preserve instructions to selects addExtensionsToPM(EP_EnabledOnOptLevel0, MPM); // HLSL Change Begins. addHLSLPasses(MPM); if (!HLSLHighLevel) { MPM.add(createDxilConvergentClearPass()); MPM.add(createDxilSimpleGVNEliminateRegionPass()); MPM.add(createDeadCodeEliminationPass()); MPM.add(createDxilRemoveDeadBlocksPass()); MPM.add(createDxilEraseDeadRegionPass()); MPM.add(createDxilNoOptSimplifyInstructionsPass()); MPM.add(createGlobalOptimizerPass()); MPM.add(createMultiDimArrayToOneDimArrayPass()); MPM.add(createDeadCodeEliminationPass()); MPM.add(createGlobalDCEPass()); MPM.add(createDxilMutateResourceToHandlePass()); MPM.add(createDxilCleanupDynamicResourceHandlePass()); MPM.add(createDxilLowerCreateHandleForLibPass()); MPM.add(createDxilTranslateRawBuffer()); MPM.add(createDxilLegalizeSampleOffsetPass()); MPM.add(createDxilNoOptLegalizePass()); MPM.add(createDxilFinalizePreservesPass()); MPM.add(createDxilFinalizeModulePass()); MPM.add(createComputeViewIdStatePass()); MPM.add(createDxilDeadFunctionEliminationPass()); MPM.add(createDxilDeleteRedundantDebugValuesPass()); MPM.add(createNoPausePassesPass()); MPM.add(createDxilEmitMetadataPass()); } // HLSL Change Ends. return; } if (!HLSLHighLevel) { MPM.add(createHLEnsureMetadataPass()); // HLSL Change - rehydrate metadata from high-level codegen } // HLSL Change Begins MPM.add(createDxilRewriteOutputArgDebugInfoPass()); // Fix output argument types. MPM.add(createHLLegalizeParameter()); // legalize parameters before inline. if (HLSLEarlyInlining && Inliner) { MPM.add(Inliner); Inliner = nullptr; } addHLSLPasses(MPM); // HLSL Change Ends // Add LibraryInfo if we have some. if (LibraryInfo) MPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo)); addInitialAliasAnalysisPasses(MPM); if (!DisableUnitAtATime) { addExtensionsToPM(EP_ModuleOptimizerEarly, MPM); MPM.add(createIPSCCPPass()); // IP SCCP MPM.add(createGlobalOptimizerPass()); // Optimize out global vars MPM.add(createDeadArgEliminationPass()); // Dead argument elimination MPM.add(createInstructionCombiningPass(HLSLNoSink));// Clean up after IPCP & DAE addExtensionsToPM(EP_Peephole, MPM); MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE } // Start of CallGraph SCC passes. if (!DisableUnitAtATime) MPM.add(createPruneEHPass()); // Remove dead EH info if (Inliner) { MPM.add(Inliner); Inliner = nullptr; } if (!DisableUnitAtATime) MPM.add(createFunctionAttrsPass()); // Set readonly/readnone attrs #if 0 // HLSL Change Starts: Disable ArgumentPromotion if (OptLevel > 2) MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args #endif // HLSL Change Ends // Start of function pass. // Break up aggregate allocas, using SSAUpdater. if (UseNewSROA) MPM.add(createSROAPass(/*RequiresDomTree*/ false)); else MPM.add(createScalarReplAggregatesPass(-1, false)); // HLSL Change. MPM.add(createEarlyCSEPass()); // Catch trivial redundancies // HLSL Change. MPM.add(createJumpThreadingPass()); // Thread jumps. MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals MPM.add(createCFGSimplificationPass()); // Merge & remove BBs MPM.add(createInstructionCombiningPass(HLSLNoSink)); // Combine silly seq's addExtensionsToPM(EP_Peephole, MPM); // HLSL Change Begins. // HLSL does not allow recursize functions. //MPM.add(createTailCallEliminationPass()); // Eliminate tail calls // HLSL Change Ends. MPM.add(createCFGSimplificationPass()); // Merge & remove BBs MPM.add(createReassociatePass( HLSLEnableAggressiveReassociation)); // Reassociate expressions // Rotate Loop - disable header duplication at -Oz MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1)); // HLSL Change - disable LICM in frontend for not consider register pressure. //MPM.add(createLICMPass()); // Hoist loop invariants //MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3)); // HLSL Change - may move barrier inside divergent if. MPM.add(createInstructionCombiningPass(HLSLNoSink)); MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars // HLSL Change Begins // Don't allow loop idiom pass which may insert memset/memcpy thereby breaking the dxil //MPM.add(createLoopIdiomPass()); // Recognize idioms like memset. // HLSL Change Ends MPM.add(createLoopDeletionPass()); // Delete dead loops if (EnableLoopInterchange) { MPM.add(createLoopInterchangePass()); // Interchange loops MPM.add(createCFGSimplificationPass()); } if (!DisableUnrollLoops) MPM.add(createSimpleLoopUnrollPass()); // Unroll small loops addExtensionsToPM(EP_LoopOptimizerEnd, MPM); if (OptLevel > 1) { if (EnableMLSM) MPM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds // HLSL Change Begins if (EnableGVN) { MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies if (!HLSLResMayAlias) MPM.add(createDxilSimpleGVNHoistPass()); } // HLSL Change Ends } // HLSL Change Begins. { // Run reassociate pass again after GVN since GVN will expose more // opportunities for reassociation. if (HLSLEnableAggressiveReassociation) { MPM.add(createReassociatePass(true)); // Reassociate expressions if (EnableGVN) MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies } } // Use value numbering to figure out if regions are equivalent, and branch to only one. MPM.add(createDxilSimpleGVNEliminateRegionPass()); // HLSL don't allow memcpy and memset. //MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset // HLSL Change Ends. MPM.add(createSCCPPass()); // Constant prop with SCCP // Delete dead bit computations (instcombine runs after to fold away the dead // computations, and then ADCE will run later to exploit any new DCE // opportunities that creates). MPM.add(createBitTrackingDCEPass()); // Delete dead bit computations // Run instcombine after redundancy elimination to exploit opportunities // opened up by them. MPM.add(createInstructionCombiningPass(HLSLNoSink)); addExtensionsToPM(EP_Peephole, MPM); // HLSL Change. MPM.add(createJumpThreadingPass()); // Thread jumps MPM.add(createCorrelatedValuePropagationPass()); MPM.add(createDeadStoreEliminationPass(ScanLimit)); // Delete dead stores // HLSL Change - disable LICM in frontend for not consider register pressure. // MPM.add(createLICMPass()); addExtensionsToPM(EP_ScalarOptimizerLate, MPM); if (RerollLoops) MPM.add(createLoopRerollPass()); #if HLSL_VECTORIZATION_ENABLED // HLSL Change - don't build vectorization passes if (!RunSLPAfterLoopVectorization) { if (SLPVectorize) MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains. if (BBVectorize) { MPM.add(createBBVectorizePass()); MPM.add(createInstructionCombiningPass()); addExtensionsToPM(EP_Peephole, MPM); if (OptLevel > 1 && UseGVNAfterVectorization) MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies else MPM.add(createEarlyCSEPass()); // Catch trivial redundancies // BBVectorize may have significantly shortened a loop body; unroll again. if (!DisableUnrollLoops) MPM.add(createLoopUnrollPass()); } } #endif if (LoadCombine) MPM.add(createLoadCombinePass()); MPM.add(createHoistConstantArrayPass()); // HLSL change MPM.add(createAggressiveDCEPass()); // Delete dead instructions MPM.add(createCFGSimplificationPass()); // Merge & remove BBs MPM.add(createInstructionCombiningPass(HLSLNoSink)); // Clean up after everything. addExtensionsToPM(EP_Peephole, MPM); // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC // pass manager that we are specifically trying to avoid. To prevent this // we must insert a no-op module pass to reset the pass manager. MPM.add(createBarrierNoopPass()); if (RunFloat2Int) MPM.add(createFloat2IntPass()); // Re-rotate loops in all our loop nests. These may have fallout out of // rotated form due to GVN or other transformations, and the vectorizer relies // on the rotated form. Disable header duplication at -Oz. MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1)); // Distribute loops to allow partial vectorization. I.e. isolate dependences // into separate loop that would otherwise inhibit vectorization. if (EnableLoopDistribute) MPM.add(createLoopDistributePass()); #if HLSL_VECTORIZATION_ENABLED // HLSL Change - don't build vectorization passes MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize)); #endif // FIXME: Because of #pragma vectorize enable, the passes below are always // inserted in the pipeline, even when the vectorizer doesn't run (ex. when // on -O1 and no #pragma is found). Would be good to have these two passes // as function calls, so that we can only pass them when the vectorizer // changed the code. MPM.add(createInstructionCombiningPass(HLSLNoSink)); #if HLSL_VECTORIZATION_ENABLED // HLSL Change - don't build vectorization passes if (OptLevel > 1 && ExtraVectorizerPasses) { // At higher optimization levels, try to clean up any runtime overlap and // alignment checks inserted by the vectorizer. We want to track correllated // runtime checks for two inner loops in the same outer loop, fold any // common computations, hoist loop-invariant aspects out of any outer loop, // and unswitch the runtime checks if possible. Once hoisted, we may have // dead (or speculatable) control flows or more combining opportunities. MPM.add(createEarlyCSEPass()); MPM.add(createCorrelatedValuePropagationPass()); MPM.add(createInstructionCombiningPass()); MPM.add(createLICMPass()); MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3)); MPM.add(createCFGSimplificationPass()); MPM.add(createInstructionCombiningPass()); } if (RunSLPAfterLoopVectorization) { if (SLPVectorize) { MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains. if (OptLevel > 1 && ExtraVectorizerPasses) { MPM.add(createEarlyCSEPass()); } } if (BBVectorize) { MPM.add(createBBVectorizePass()); MPM.add(createInstructionCombiningPass()); addExtensionsToPM(EP_Peephole, MPM); if (OptLevel > 1 && UseGVNAfterVectorization) MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies else MPM.add(createEarlyCSEPass()); // Catch trivial redundancies // BBVectorize may have significantly shortened a loop body; unroll again. if (!DisableUnrollLoops) MPM.add(createLoopUnrollPass()); } } #endif // HLSL Change - don't build vectorization passes addExtensionsToPM(EP_Peephole, MPM); MPM.add(createCFGSimplificationPass()); MPM.add(createDxilLoopDeletionPass(HLSLNoSink)); // HLSL Change - try to delete loop again. //MPM.add(createInstructionCombiningPass()); // HLSL Change - pass is included in above if (!DisableUnrollLoops) { MPM.add(createLoopUnrollPass(/* HLSL Change begin */-1, -1, -1, -1, this->StructurizeLoopExitsForUnroll /* HLSL Change end */)); // Unroll small loops // LoopUnroll may generate some redundency to cleanup. MPM.add(createInstructionCombiningPass(HLSLNoSink)); // Runtime unrolling will introduce runtime check in loop prologue. If the // unrolled loop is a inner loop, then the prologue will be inside the // outer loop. LICM pass can help to promote the runtime check out if the // checked value is loop invariant. // MPM.add(createLICMPass());// HLSL Change - disable LICM in frontend for // not consider register pressure. } // After vectorization and unrolling, assume intrinsics may tell us more // about pointer alignments. MPM.add(createAlignmentFromAssumptionsPass()); if (!DisableUnitAtATime) { // FIXME: We shouldn't bother with this anymore. MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes // GlobalOpt already deletes dead functions and globals, at -O2 try a // late pass of GlobalDCE. It is capable of deleting dead cycles. if (OptLevel > 1) { if (!PrepareForLTO) { // Remove avail extern fns and globals definitions if we aren't // compiling an object file for later LTO. For LTO we want to preserve // these so they are eligible for inlining at link-time. Note if they // are unreferenced they will be removed by GlobalDCE below, so // this only impacts referenced available externally globals. // Eventually they will be suppressed during codegen, but eliminating // here enables more opportunity for GlobalDCE as it may make // globals referenced by available external functions dead. MPM.add(createEliminateAvailableExternallyPass()); } MPM.add(createGlobalDCEPass()); // Remove dead fns and globals. MPM.add(createConstantMergePass()); // Merge dup global constants } } if (MergeFunctions) MPM.add(createMergeFunctionsPass()); // HLSL Change Begins. if (!HLSLHighLevel) { MPM.add(createDxilEraseDeadRegionPass()); MPM.add(createDxilConvergentClearPass()); MPM.add(createDeadCodeEliminationPass()); // DCE needed after clearing convergence // annotations before CreateHandleForLib // so no unused resources get re-added to // DxilModule. MPM.add(createMultiDimArrayToOneDimArrayPass()); MPM.add(createDxilRemoveDeadBlocksPass()); MPM.add(createDeadCodeEliminationPass()); MPM.add(createGlobalDCEPass()); MPM.add(createDxilMutateResourceToHandlePass()); MPM.add(createDxilCleanupDynamicResourceHandlePass()); MPM.add(createDxilLowerCreateHandleForLibPass()); MPM.add(createDxilTranslateRawBuffer()); // Always try to legalize sample offsets as loop unrolling // is not guaranteed for higher opt levels. MPM.add(createDxilLegalizeSampleOffsetPass()); MPM.add(createDxilFinalizeModulePass()); MPM.add(createComputeViewIdStatePass()); MPM.add(createDxilDeadFunctionEliminationPass()); MPM.add(createDxilDeleteRedundantDebugValuesPass()); MPM.add(createNoPausePassesPass()); MPM.add(createDxilValidateWaveSensitivityPass()); MPM.add(createDxilEmitMetadataPass()); } // HLSL Change Ends. addExtensionsToPM(EP_OptimizerLast, MPM); } #if 0 // HLSL Change: No LTO void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) { // Provide AliasAnalysis services for optimizations. addInitialAliasAnalysisPasses(PM); // Propagate constants at call sites into the functions they call. This // opens opportunities for globalopt (and inlining) by substituting function // pointers passed as arguments to direct uses of functions. PM.add(createIPSCCPPass()); // Now that we internalized some globals, see if we can hack on them! PM.add(createGlobalOptimizerPass()); // Linking modules together can lead to duplicated global constants, only // keep one copy of each constant. PM.add(createConstantMergePass()); // Remove unused arguments from functions. PM.add(createDeadArgEliminationPass()); // Reduce the code after globalopt and ipsccp. Both can open up significant // simplification opportunities, and both can propagate functions through // function pointers. When this happens, we often have to resolve varargs // calls, etc, so let instcombine do this. PM.add(createInstructionCombiningPass()); addExtensionsToPM(EP_Peephole, PM); // Inline small functions bool RunInliner = Inliner; if (RunInliner) { PM.add(Inliner); Inliner = nullptr; } PM.add(createPruneEHPass()); // Remove dead EH info. // Optimize globals again if we ran the inliner. if (RunInliner) PM.add(createGlobalOptimizerPass()); PM.add(createGlobalDCEPass()); // Remove dead functions. // If we didn't decide to inline a function, check to see if we can // transform it to pass arguments by value instead of by reference. PM.add(createArgumentPromotionPass()); // The IPO passes may leave cruft around. Clean up after them. PM.add(createInstructionCombiningPass()); addExtensionsToPM(EP_Peephole, PM); // HLSL Change. PM.add(createJumpThreadingPass()); // Break up allocas if (UseNewSROA) PM.add(createSROAPass()); else PM.add(createScalarReplAggregatesPass()); // Run a few AA driven optimizations here and now, to cleanup the code. PM.add(createFunctionAttrsPass()); // Add nocapture. PM.add(createGlobalsModRefPass()); // IP alias analysis. // HLSL Change - disable LICM in frontend for not consider register pressure. // PM.add(createLICMPass()); // Hoist loop invariants. if (EnableMLSM) PM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds. if (EnableGVN) // HLSL Change PM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies. PM.add(createMemCpyOptPass()); // Remove dead memcpys. // Nuke dead stores. PM.add(createDeadStoreEliminationPass(ScanLimit)); // HLSL Change - add ScanLimit // More loops are countable; try to optimize them. PM.add(createIndVarSimplifyPass()); PM.add(createLoopDeletionPass()); if (EnableLoopInterchange) PM.add(createLoopInterchangePass()); #if HLSL_VECTORIZATION_ENABLED // HLSL Change - don't build vectorization passes PM.add(createLoopVectorizePass(true, LoopVectorize)); // More scalar chains could be vectorized due to more alias information if (RunSLPAfterLoopVectorization) if (SLPVectorize) PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains. // After vectorization, assume intrinsics may tell us more about pointer // alignments. PM.add(createAlignmentFromAssumptionsPass()); #endif if (LoadCombine) PM.add(createLoadCombinePass()); // Cleanup and simplify the code after the scalar optimizations. PM.add(createInstructionCombiningPass()); addExtensionsToPM(EP_Peephole, PM); // HLSL Change. PM.add(createJumpThreadingPass()); } void PassManagerBuilder::addLateLTOOptimizationPasses( legacy::PassManagerBase &PM) { // Delete basic blocks, which optimization passes may have killed. PM.add(createCFGSimplificationPass()); // Now that we have optimized the program, discard unreachable functions. PM.add(createGlobalDCEPass()); // FIXME: this is profitable (for compiler time) to do at -O0 too, but // currently it damages debug info. if (MergeFunctions) PM.add(createMergeFunctionsPass()); } void PassManagerBuilder::populateLTOPassManager(legacy::PassManagerBase &PM) { if (LibraryInfo) PM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo)); if (VerifyInput) PM.add(createVerifierPass()); if (OptLevel > 1) addLTOOptimizationPasses(PM); // Lower bit sets to globals. This pass supports Clang's control flow // integrity mechanisms (-fsanitize=cfi*) and needs to run at link time if CFI // is enabled. The pass does nothing if CFI is disabled. PM.add(createLowerBitSetsPass()); if (OptLevel != 0) addLateLTOOptimizationPasses(PM); if (VerifyOutput) PM.add(createVerifierPass()); } #endif inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) { return reinterpret_cast<PassManagerBuilder*>(P); } inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) { return reinterpret_cast<LLVMPassManagerBuilderRef>(P); } LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate() { PassManagerBuilder *PMB = new PassManagerBuilder(); return wrap(PMB); } void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) { PassManagerBuilder *Builder = unwrap(PMB); delete Builder; } void LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB, unsigned OptLevel) { PassManagerBuilder *Builder = unwrap(PMB); Builder->OptLevel = OptLevel; } void LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB, unsigned SizeLevel) { PassManagerBuilder *Builder = unwrap(PMB); Builder->SizeLevel = SizeLevel; } void LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB, LLVMBool Value) { PassManagerBuilder *Builder = unwrap(PMB); Builder->DisableUnitAtATime = Value; } void LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB, LLVMBool Value) { PassManagerBuilder *Builder = unwrap(PMB); Builder->DisableUnrollLoops = Value; } void LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB, LLVMBool Value) { // NOTE: The simplify-libcalls pass has been removed. } void LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB, unsigned Threshold) { PassManagerBuilder *Builder = unwrap(PMB); Builder->Inliner = createFunctionInliningPass(Threshold); } void LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) { PassManagerBuilder *Builder = unwrap(PMB); legacy::FunctionPassManager *FPM = unwrap<legacy::FunctionPassManager>(PM); Builder->populateFunctionPassManager(*FPM); } void LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) { PassManagerBuilder *Builder = unwrap(PMB); legacy::PassManagerBase *MPM = unwrap(PM); Builder->populateModulePassManager(*MPM); } #if 0 // HLSL Change: No LTO void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM, LLVMBool Internalize, LLVMBool RunInliner) { PassManagerBuilder *Builder = unwrap(PMB); legacy::PassManagerBase *LPM = unwrap(PM); // A small backwards compatibility hack. populateLTOPassManager used to take // an RunInliner option. if (RunInliner && !Builder->Inliner) Builder->Inliner = createFunctionInliningPass(); Builder->populateLTOPassManager(*LPM); } #endif
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/InlineAlways.cpp
//===- InlineAlways.cpp - Code to inline always_inline functions ----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a custom inliner that handles only functions that // are marked as "always inline". // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/InlineCost.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/Transforms/IPO/InlinerPass.h" using namespace llvm; #define DEBUG_TYPE "inline" namespace { /// \brief Inliner pass which only handles "always inline" functions. class AlwaysInliner : public Inliner { InlineCostAnalysis *ICA; public: // Use extremely low threshold. AlwaysInliner() : Inliner(ID, -2000000000, /*InsertLifetime*/ true), ICA(nullptr) { initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry()); } AlwaysInliner(bool InsertLifetime) : Inliner(ID, -2000000000, InsertLifetime), ICA(nullptr) { initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry()); } static char ID; // Pass identification, replacement for typeid InlineCost getInlineCost(CallSite CS) override; void getAnalysisUsage(AnalysisUsage &AU) const override; bool runOnSCC(CallGraphSCC &SCC) override; using llvm::Pass::doFinalization; bool doFinalization(CallGraph &CG) override { return removeDeadFunctions(CG, /*AlwaysInlineOnly=*/ true); } }; } char AlwaysInliner::ID = 0; INITIALIZE_PASS_BEGIN(AlwaysInliner, "always-inline", "Inliner for always_inline functions", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) INITIALIZE_PASS_DEPENDENCY(InlineCostAnalysis) INITIALIZE_PASS_END(AlwaysInliner, "always-inline", "Inliner for always_inline functions", false, false) Pass *llvm::createAlwaysInlinerPass() { return new AlwaysInliner(); } Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) { return new AlwaysInliner(InsertLifetime); } /// \brief Get the inline cost for the always-inliner. /// /// The always inliner *only* handles functions which are marked with the /// attribute to force inlining. As such, it is dramatically simpler and avoids /// using the powerful (but expensive) inline cost analysis. Instead it uses /// a very simple and boring direct walk of the instructions looking for /// impossible-to-inline constructs. /// /// Note, it would be possible to go to some lengths to cache the information /// computed here, but as we only expect to do this for relatively few and /// small functions which have the explicit attribute to force inlining, it is /// likely not worth it in practice. InlineCost AlwaysInliner::getInlineCost(CallSite CS) { Function *Callee = CS.getCalledFunction(); // Only inline direct calls to functions with always-inline attributes // that are viable for inlining. FIXME: We shouldn't even get here for // declarations. if (Callee && !Callee->isDeclaration() && CS.hasFnAttr(Attribute::AlwaysInline) && ICA->isInlineViable(*Callee)) return InlineCost::getAlways(); return InlineCost::getNever(); } bool AlwaysInliner::runOnSCC(CallGraphSCC &SCC) { ICA = &getAnalysis<InlineCostAnalysis>(); return Inliner::runOnSCC(SCC); } void AlwaysInliner::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<InlineCostAnalysis>(); Inliner::getAnalysisUsage(AU); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/IPO/StripSymbols.cpp
//===- StripSymbols.cpp - Strip symbols and debug info from a module ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // The StripSymbols transformation implements code stripping. Specifically, it // can delete: // // * names for virtual registers // * symbols for internal globals and functions // * debug information // // Note that this transformation makes code much less readable, so it should // only be used in situations where the 'strip' utility would be used, such as // reducing code size or making it harder to reverse engineer code. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/IPO.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/TypeFinder.h" #include "llvm/IR/ValueSymbolTable.h" #include "llvm/Pass.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; namespace { class StripSymbols : public ModulePass { bool OnlyDebugInfo; public: static char ID; // Pass identification, replacement for typeid explicit StripSymbols(bool ODI = false) : ModulePass(ID), OnlyDebugInfo(ODI) { initializeStripSymbolsPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; class StripNonDebugSymbols : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit StripNonDebugSymbols() : ModulePass(ID) { initializeStripNonDebugSymbolsPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; class StripDebugDeclare : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit StripDebugDeclare() : ModulePass(ID) { initializeStripDebugDeclarePass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; class StripDeadDebugInfo : public ModulePass { public: static char ID; // Pass identification, replacement for typeid explicit StripDeadDebugInfo() : ModulePass(ID) { initializeStripDeadDebugInfoPass(*PassRegistry::getPassRegistry()); } bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; } char StripSymbols::ID = 0; INITIALIZE_PASS(StripSymbols, "strip", "Strip all symbols from a module", false, false) ModulePass *llvm::createStripSymbolsPass(bool OnlyDebugInfo) { return new StripSymbols(OnlyDebugInfo); } char StripNonDebugSymbols::ID = 0; INITIALIZE_PASS(StripNonDebugSymbols, "strip-nondebug", "Strip all symbols, except dbg symbols, from a module", false, false) ModulePass *llvm::createStripNonDebugSymbolsPass() { return new StripNonDebugSymbols(); } char StripDebugDeclare::ID = 0; INITIALIZE_PASS(StripDebugDeclare, "strip-debug-declare", "Strip all llvm.dbg.declare intrinsics", false, false) ModulePass *llvm::createStripDebugDeclarePass() { return new StripDebugDeclare(); } char StripDeadDebugInfo::ID = 0; INITIALIZE_PASS(StripDeadDebugInfo, "strip-dead-debug-info", "Strip debug info for unused symbols", false, false) ModulePass *llvm::createStripDeadDebugInfoPass() { return new StripDeadDebugInfo(); } /// OnlyUsedBy - Return true if V is only used by Usr. static bool OnlyUsedBy(Value *V, Value *Usr) { for (User *U : V->users()) if (U != Usr) return false; return true; } static void RemoveDeadConstant(Constant *C) { assert(C->use_empty() && "Constant is not dead!"); SmallPtrSet<Constant*, 4> Operands; for (Value *Op : C->operands()) if (OnlyUsedBy(Op, C)) Operands.insert(cast<Constant>(Op)); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { if (!GV->hasLocalLinkage()) return; // Don't delete non-static globals. GV->eraseFromParent(); } else if (!isa<Function>(C)) if (isa<CompositeType>(C->getType())) C->destroyConstant(); // If the constant referenced anything, see if we can delete it as well. for (Constant *O : Operands) RemoveDeadConstant(O); } // Strip the symbol table of its names. // static void StripSymtab(ValueSymbolTable &ST, bool PreserveDbgInfo) { for (ValueSymbolTable::iterator VI = ST.begin(), VE = ST.end(); VI != VE; ) { Value *V = VI->getValue(); ++VI; if (!isa<GlobalValue>(V) || cast<GlobalValue>(V)->hasLocalLinkage()) { if (!PreserveDbgInfo || !V->getName().startswith("llvm.dbg")) // Set name to "", removing from symbol table! V->setName(""); } } } // Strip any named types of their names. static void StripTypeNames(Module &M, bool PreserveDbgInfo) { TypeFinder StructTypes; StructTypes.run(M, false); for (unsigned i = 0, e = StructTypes.size(); i != e; ++i) { StructType *STy = StructTypes[i]; if (STy->isLiteral() || STy->getName().empty()) continue; if (PreserveDbgInfo && STy->getName().startswith("llvm.dbg")) continue; STy->setName(""); } } /// Find values that are marked as llvm.used. static void findUsedValues(GlobalVariable *LLVMUsed, SmallPtrSetImpl<const GlobalValue*> &UsedValues) { if (!LLVMUsed) return; UsedValues.insert(LLVMUsed); ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer()); for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) if (GlobalValue *GV = dyn_cast<GlobalValue>(Inits->getOperand(i)->stripPointerCasts())) UsedValues.insert(GV); } /// StripSymbolNames - Strip symbol names. static bool StripSymbolNames(Module &M, bool PreserveDbgInfo) { SmallPtrSet<const GlobalValue*, 8> llvmUsedValues; findUsedValues(M.getGlobalVariable("llvm.used"), llvmUsedValues); findUsedValues(M.getGlobalVariable("llvm.compiler.used"), llvmUsedValues); for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { if (I->hasLocalLinkage() && llvmUsedValues.count(I) == 0) if (!PreserveDbgInfo || !I->getName().startswith("llvm.dbg")) I->setName(""); // Internal symbols can't participate in linkage } for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { if (I->hasLocalLinkage() && llvmUsedValues.count(I) == 0) if (!PreserveDbgInfo || !I->getName().startswith("llvm.dbg")) I->setName(""); // Internal symbols can't participate in linkage StripSymtab(I->getValueSymbolTable(), PreserveDbgInfo); } // Remove all names from types. StripTypeNames(M, PreserveDbgInfo); return true; } bool StripSymbols::runOnModule(Module &M) { bool Changed = false; Changed |= StripDebugInfo(M); if (!OnlyDebugInfo) Changed |= StripSymbolNames(M, false); return Changed; } bool StripNonDebugSymbols::runOnModule(Module &M) { return StripSymbolNames(M, true); } bool StripDebugDeclare::runOnModule(Module &M) { Function *Declare = M.getFunction("llvm.dbg.declare"); std::vector<Constant*> DeadConstants; if (Declare) { while (!Declare->use_empty()) { CallInst *CI = cast<CallInst>(Declare->user_back()); Value *Arg1 = CI->getArgOperand(0); Value *Arg2 = CI->getArgOperand(1); assert(CI->use_empty() && "llvm.dbg intrinsic should have void result"); CI->eraseFromParent(); if (Arg1->use_empty()) { if (Constant *C = dyn_cast<Constant>(Arg1)) DeadConstants.push_back(C); else RecursivelyDeleteTriviallyDeadInstructions(Arg1); } if (Arg2->use_empty()) if (Constant *C = dyn_cast<Constant>(Arg2)) DeadConstants.push_back(C); } Declare->eraseFromParent(); } while (!DeadConstants.empty()) { Constant *C = DeadConstants.back(); DeadConstants.pop_back(); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { if (GV->hasLocalLinkage()) RemoveDeadConstant(GV); } else RemoveDeadConstant(C); } return true; } /// Remove any debug info for global variables/functions in the given module for /// which said global variable/function no longer exists (i.e. is null). /// /// Debugging information is encoded in llvm IR using metadata. This is designed /// such a way that debug info for symbols preserved even if symbols are /// optimized away by the optimizer. This special pass removes debug info for /// such symbols. bool StripDeadDebugInfo::runOnModule(Module &M) { bool Changed = false; LLVMContext &C = M.getContext(); // Find all debug info in F. This is actually overkill in terms of what we // want to do, but we want to try and be as resilient as possible in the face // of potential debug info changes by using the formal interfaces given to us // as much as possible. DebugInfoFinder F; F.processModule(M); // For each compile unit, find the live set of global variables/functions and // replace the current list of potentially dead global variables/functions // with the live list. SmallVector<Metadata *, 64> LiveGlobalVariables; SmallVector<Metadata *, 64> LiveSubprograms; DenseSet<const MDNode *> VisitedSet; for (DICompileUnit *DIC : F.compile_units()) { // Create our live subprogram list. bool SubprogramChange = false; for (DISubprogram *DISP : DIC->getSubprograms()) { // Make sure we visit each subprogram only once. if (!VisitedSet.insert(DISP).second) continue; // If the function referenced by DISP is not null, the function is live. if (DISP->getFunction()) LiveSubprograms.push_back(DISP); else SubprogramChange = true; } // Create our live global variable list. bool GlobalVariableChange = false; for (DIGlobalVariable *DIG : DIC->getGlobalVariables()) { // Make sure we only visit each global variable only once. if (!VisitedSet.insert(DIG).second) continue; // If the global variable referenced by DIG is not null, the global // variable is live. if (DIG->getVariable()) LiveGlobalVariables.push_back(DIG); else GlobalVariableChange = true; } // If we found dead subprograms or global variables, replace the current // subprogram list/global variable list with our new live subprogram/global // variable list. if (SubprogramChange) { DIC->replaceSubprograms(MDTuple::get(C, LiveSubprograms)); Changed = true; } if (GlobalVariableChange) { DIC->replaceGlobalVariables(MDTuple::get(C, LiveGlobalVariables)); Changed = true; } // Reset lists for the next iteration. LiveSubprograms.clear(); LiveGlobalVariables.clear(); } return Changed; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
//===-- ThreadSanitizer.cpp - race detector -------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of ThreadSanitizer, a race detector. // // The tool is under development, for the details about previous versions see // http://code.google.com/p/data-race-test // // The instrumentation phase is quite simple: // - Insert calls to run-time library before every memory access. // - Optimizations may apply to avoid instrumenting some of the accesses. // - Insert calls at function entry/exit. // The rest is handled by the run-time library. //===----------------------------------------------------------------------===// #include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/ModuleUtils.h" using namespace llvm; #define DEBUG_TYPE "tsan" static cl::opt<bool> ClInstrumentMemoryAccesses( "tsan-instrument-memory-accesses", cl::init(true), cl::desc("Instrument memory accesses"), cl::Hidden); static cl::opt<bool> ClInstrumentFuncEntryExit( "tsan-instrument-func-entry-exit", cl::init(true), cl::desc("Instrument function entry and exit"), cl::Hidden); static cl::opt<bool> ClInstrumentAtomics( "tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden); static cl::opt<bool> ClInstrumentMemIntrinsics( "tsan-instrument-memintrinsics", cl::init(true), cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); STATISTIC(NumOmittedReadsBeforeWrite, "Number of reads ignored due to following writes"); STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads"); STATISTIC(NumOmittedReadsFromConstantGlobals, "Number of reads from constant globals"); STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing"); static const char *const kTsanModuleCtorName = "tsan.module_ctor"; static const char *const kTsanInitName = "__tsan_init"; namespace { /// ThreadSanitizer: instrument the code in module to find races. struct ThreadSanitizer : public FunctionPass { ThreadSanitizer() : FunctionPass(ID) {} StringRef getPassName() const override; bool runOnFunction(Function &F) override; bool doInitialization(Module &M) override; static char ID; // Pass identification, replacement for typeid. private: void initializeCallbacks(Module &M); bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); bool instrumentAtomic(Instruction *I, const DataLayout &DL); bool instrumentMemIntrinsic(Instruction *I); void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All, const DataLayout &DL); bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); Type *IntptrTy; IntegerType *OrdTy; // Callbacks to run-time library are computed in doInitialization. Function *TsanFuncEntry; Function *TsanFuncExit; // Accesses sizes are powers of two: 1, 2, 4, 8, 16. static const size_t kNumberOfAccessSizes = 5; Function *TsanRead[kNumberOfAccessSizes]; Function *TsanWrite[kNumberOfAccessSizes]; Function *TsanUnalignedRead[kNumberOfAccessSizes]; Function *TsanUnalignedWrite[kNumberOfAccessSizes]; Function *TsanAtomicLoad[kNumberOfAccessSizes]; Function *TsanAtomicStore[kNumberOfAccessSizes]; Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; Function *TsanAtomicCAS[kNumberOfAccessSizes]; Function *TsanAtomicThreadFence; Function *TsanAtomicSignalFence; Function *TsanVptrUpdate; Function *TsanVptrLoad; Function *MemmoveFn, *MemcpyFn, *MemsetFn; Function *TsanCtorFunction; }; } // namespace char ThreadSanitizer::ID = 0; INITIALIZE_PASS(ThreadSanitizer, "tsan", "ThreadSanitizer: detects data races.", false, false) const char *ThreadSanitizer::getPassName() const { return "ThreadSanitizer"; } FunctionPass *llvm::createThreadSanitizerPass() { return new ThreadSanitizer(); } void ThreadSanitizer::initializeCallbacks(Module &M) { IRBuilder<> IRB(M.getContext()); // Initialize the callbacks. TsanFuncEntry = checkSanitizerInterfaceFunction(M.getOrInsertFunction( "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); TsanFuncExit = checkSanitizerInterfaceFunction( M.getOrInsertFunction("__tsan_func_exit", IRB.getVoidTy(), nullptr)); OrdTy = IRB.getInt32Ty(); for (size_t i = 0; i < kNumberOfAccessSizes; ++i) { const size_t ByteSize = 1 << i; const size_t BitSize = ByteSize * 8; SmallString<32> ReadName("__tsan_read" + itostr(ByteSize)); TsanRead[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); SmallString<32> WriteName("__tsan_write" + itostr(ByteSize)); TsanWrite[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); SmallString<64> UnalignedReadName("__tsan_unaligned_read" + itostr(ByteSize)); TsanUnalignedRead[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( UnalignedReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); SmallString<64> UnalignedWriteName("__tsan_unaligned_write" + itostr(ByteSize)); TsanUnalignedWrite[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( UnalignedWriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); Type *Ty = Type::getIntNTy(M.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) + "_load"); TsanAtomicLoad[i] = checkSanitizerInterfaceFunction( M.getOrInsertFunction(AtomicLoadName, Ty, PtrTy, OrdTy, nullptr)); SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) + "_store"); TsanAtomicStore[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, nullptr)); for (int op = AtomicRMWInst::FIRST_BINOP; op <= AtomicRMWInst::LAST_BINOP; ++op) { TsanAtomicRMW[op][i] = nullptr; const char *NamePart = nullptr; if (op == AtomicRMWInst::Xchg) NamePart = "_exchange"; else if (op == AtomicRMWInst::Add) NamePart = "_fetch_add"; else if (op == AtomicRMWInst::Sub) NamePart = "_fetch_sub"; else if (op == AtomicRMWInst::And) NamePart = "_fetch_and"; else if (op == AtomicRMWInst::Or) NamePart = "_fetch_or"; else if (op == AtomicRMWInst::Xor) NamePart = "_fetch_xor"; else if (op == AtomicRMWInst::Nand) NamePart = "_fetch_nand"; else continue; SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); TsanAtomicRMW[op][i] = checkSanitizerInterfaceFunction( M.getOrInsertFunction(RMWName, Ty, PtrTy, Ty, OrdTy, nullptr)); } SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + "_compare_exchange_val"); TsanAtomicCAS[i] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, nullptr)); } TsanVptrUpdate = checkSanitizerInterfaceFunction( M.getOrInsertFunction("__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), nullptr)); TsanVptrLoad = checkSanitizerInterfaceFunction(M.getOrInsertFunction( "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr)); TsanAtomicThreadFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction( "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, nullptr)); TsanAtomicSignalFence = checkSanitizerInterfaceFunction(M.getOrInsertFunction( "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, nullptr)); MemmoveFn = checkSanitizerInterfaceFunction( M.getOrInsertFunction("memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); MemcpyFn = checkSanitizerInterfaceFunction( M.getOrInsertFunction("memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); MemsetFn = checkSanitizerInterfaceFunction( M.getOrInsertFunction("memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr)); } bool ThreadSanitizer::doInitialization(Module &M) { const DataLayout &DL = M.getDataLayout(); IntptrTy = DL.getIntPtrType(M.getContext()); std::tie(TsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions( M, kTsanModuleCtorName, kTsanInitName, /*InitArgTypes=*/{}, /*InitArgs=*/{}); appendToGlobalCtors(M, TsanCtorFunction, 0); return true; } static bool isVtableAccess(Instruction *I) { if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) return Tag->isTBAAVtableAccess(); return false; } bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { // If this is a GEP, just analyze its pointer operand. if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) Addr = GEP->getPointerOperand(); if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { if (GV->isConstant()) { // Reads from constant globals can not race with any writes. NumOmittedReadsFromConstantGlobals++; return true; } } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) { if (isVtableAccess(L)) { // Reads from a vtable pointer can not race with any writes. NumOmittedReadsFromVtable++; return true; } } return false; } // Instrumenting some of the accesses may be proven redundant. // Currently handled: // - read-before-write (within same BB, no calls between) // - not captured variables // // We do not handle some of the patterns that should not survive // after the classic compiler optimizations. // E.g. two reads from the same temp should be eliminated by CSE, // two writes should be eliminated by DSE, etc. // // 'Local' is a vector of insns within the same BB (no calls between). // 'All' is a vector of insns that will be instrumented. void ThreadSanitizer::chooseInstructionsToInstrument( SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All, const DataLayout &DL) { SmallSet<Value*, 8> WriteTargets; // Iterate from the end. for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(), E = Local.rend(); It != E; ++It) { Instruction *I = *It; if (StoreInst *Store = dyn_cast<StoreInst>(I)) { WriteTargets.insert(Store->getPointerOperand()); } else { LoadInst *Load = cast<LoadInst>(I); Value *Addr = Load->getPointerOperand(); if (WriteTargets.count(Addr)) { // We will write to this temp, so no reason to analyze the read. NumOmittedReadsBeforeWrite++; continue; } if (addrPointsToConstantData(Addr)) { // Addr points to some constant data -- it can not race with any writes. continue; } } Value *Addr = isa<StoreInst>(*I) ? cast<StoreInst>(I)->getPointerOperand() : cast<LoadInst>(I)->getPointerOperand(); if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && !PointerMayBeCaptured(Addr, true, true)) { // The variable is addressable but not captured, so it cannot be // referenced from a different thread and participate in a data race // (see llvm/Analysis/CaptureTracking.h for details). NumOmittedNonCaptured++; continue; } All.push_back(I); } Local.clear(); } static bool isAtomic(Instruction *I) { if (LoadInst *LI = dyn_cast<LoadInst>(I)) return LI->isAtomic() && LI->getSynchScope() == CrossThread; if (StoreInst *SI = dyn_cast<StoreInst>(I)) return SI->isAtomic() && SI->getSynchScope() == CrossThread; if (isa<AtomicRMWInst>(I)) return true; if (isa<AtomicCmpXchgInst>(I)) return true; if (isa<FenceInst>(I)) return true; return false; } bool ThreadSanitizer::runOnFunction(Function &F) { // This is required to prevent instrumenting call to __tsan_init from within // the module constructor. if (&F == TsanCtorFunction) return false; initializeCallbacks(*F.getParent()); SmallVector<Instruction*, 8> RetVec; SmallVector<Instruction*, 8> AllLoadsAndStores; SmallVector<Instruction*, 8> LocalLoadsAndStores; SmallVector<Instruction*, 8> AtomicAccesses; SmallVector<Instruction*, 8> MemIntrinCalls; bool Res = false; bool HasCalls = false; bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread); const DataLayout &DL = F.getParent()->getDataLayout(); // Traverse all instructions, collect loads/stores/returns, check for calls. for (auto &BB : F) { for (auto &Inst : BB) { if (isAtomic(&Inst)) AtomicAccesses.push_back(&Inst); else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) LocalLoadsAndStores.push_back(&Inst); else if (isa<ReturnInst>(Inst)) RetVec.push_back(&Inst); else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) { if (isa<MemIntrinsic>(Inst)) MemIntrinCalls.push_back(&Inst); HasCalls = true; chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL); } } chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL); } // We have collected all loads and stores. // FIXME: many of these accesses do not need to be checked for races // (e.g. variables that do not escape, etc). // Instrument memory accesses only if we want to report bugs in the function. if (ClInstrumentMemoryAccesses && SanitizeFunction) for (auto Inst : AllLoadsAndStores) { Res |= instrumentLoadOrStore(Inst, DL); } // Instrument atomic memory accesses in any case (they can be used to // implement synchronization). if (ClInstrumentAtomics) for (auto Inst : AtomicAccesses) { Res |= instrumentAtomic(Inst, DL); } if (ClInstrumentMemIntrinsics && SanitizeFunction) for (auto Inst : MemIntrinCalls) { Res |= instrumentMemIntrinsic(Inst); } // Instrument function entry/exit points if there were instrumented accesses. if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); Value *ReturnAddress = IRB.CreateCall( Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), IRB.getInt32(0)); IRB.CreateCall(TsanFuncEntry, ReturnAddress); for (auto RetInst : RetVec) { IRBuilder<> IRBRet(RetInst); IRBRet.CreateCall(TsanFuncExit, {}); } Res = true; } return Res; } bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I, const DataLayout &DL) { IRBuilder<> IRB(I); bool IsWrite = isa<StoreInst>(*I); Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand() : cast<LoadInst>(I)->getPointerOperand(); int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; if (IsWrite && isVtableAccess(I)) { DEBUG(dbgs() << " VPTR : " << *I << "\n"); Value *StoredValue = cast<StoreInst>(I)->getValueOperand(); // StoredValue may be a vector type if we are storing several vptrs at once. // In this case, just take the first element of the vector since this is // enough to find vptr races. if (isa<VectorType>(StoredValue->getType())) StoredValue = IRB.CreateExtractElement( StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0)); if (StoredValue->getType()->isIntegerTy()) StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); // Call TsanVptrUpdate. IRB.CreateCall(TsanVptrUpdate, {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())}); NumInstrumentedVtableWrites++; return true; } if (!IsWrite && isVtableAccess(I)) { IRB.CreateCall(TsanVptrLoad, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); NumInstrumentedVtableReads++; return true; } const unsigned Alignment = IsWrite ? cast<StoreInst>(I)->getAlignment() : cast<LoadInst>(I)->getAlignment(); Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); Value *OnAccessFunc = nullptr; if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; else OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx]; IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); if (IsWrite) NumInstrumentedWrites++; else NumInstrumentedReads++; return true; } static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { uint32_t v = 0; switch (ord) { case NotAtomic: llvm_unreachable("unexpected atomic ordering!"); case Unordered: // Fall-through. case Monotonic: v = 0; break; // case Consume: v = 1; break; // Not specified yet. case Acquire: v = 2; break; case Release: v = 3; break; case AcquireRelease: v = 4; break; case SequentiallyConsistent: v = 5; break; } return IRB->getInt32(v); } // If a memset intrinsic gets inlined by the code gen, we will miss races on it. // So, we either need to ensure the intrinsic is not inlined, or instrument it. // We do not instrument memset/memmove/memcpy intrinsics (too complicated), // instead we simply replace them with regular function calls, which are then // intercepted by the run-time. // Since tsan is running after everyone else, the calls should not be // replaced back with intrinsics. If that becomes wrong at some point, // we will need to call e.g. __tsan_memset to avoid the intrinsics. bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { IRBuilder<> IRB(I); if (MemSetInst *M = dyn_cast<MemSetInst>(I)) { IRB.CreateCall( MemsetFn, {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); I->eraseFromParent(); } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) { IRB.CreateCall( isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn, {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); I->eraseFromParent(); } return false; } // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x // standards. For background see C++11 standard. A slightly older, publicly // available draft of the standard (not entirely up-to-date, but close enough // for casual browsing) is available here: // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf // The following page contains more background information: // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) { IRBuilder<> IRB(I); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { Value *Addr = LI->getPointerOperand(); int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; const size_t ByteSize = 1 << Idx; const size_t BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), createOrdering(&IRB, LI->getOrdering())}; CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args); ReplaceInstWithInst(I, C); } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { Value *Addr = SI->getPointerOperand(); int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; const size_t ByteSize = 1 << Idx; const size_t BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), IRB.CreateIntCast(SI->getValueOperand(), Ty, false), createOrdering(&IRB, SI->getOrdering())}; CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args); ReplaceInstWithInst(I, C); } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) { Value *Addr = RMWI->getPointerOperand(); int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; if (!F) return false; const size_t ByteSize = 1 << Idx; const size_t BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), createOrdering(&IRB, RMWI->getOrdering())}; CallInst *C = CallInst::Create(F, Args); ReplaceInstWithInst(I, C); } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) { Value *Addr = CASI->getPointerOperand(); int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; const size_t ByteSize = 1 << Idx; const size_t BitSize = ByteSize * 8; Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), createOrdering(&IRB, CASI->getSuccessOrdering()), createOrdering(&IRB, CASI->getFailureOrdering())}; CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args); Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand()); Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0); Res = IRB.CreateInsertValue(Res, Success, 1); I->replaceAllUsesWith(Res); I->eraseFromParent(); } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) { Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; Function *F = FI->getSynchScope() == SingleThread ? TsanAtomicSignalFence : TsanAtomicThreadFence; CallInst *C = CallInst::Create(F, Args); ReplaceInstWithInst(I, C); } return true; } int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL) { Type *OrigPtrTy = Addr->getType(); Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType(); assert(OrigTy->isSized()); uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); if (TypeSize != 8 && TypeSize != 16 && TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { NumAccessesWithBadSize++; // Ignore all unusual sizes. return -1; } size_t Idx = countTrailingZeros(TypeSize / 8); assert(Idx < kNumberOfAccessSizes); return Idx; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Instrumentation/Instrumentation.cpp
//===-- Instrumentation.cpp - TransformUtils Infrastructure ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the common initialization infrastructure for the // Instrumentation library. // //===----------------------------------------------------------------------===// #include "llvm/InitializePasses.h" #include "llvm-c/Initialization.h" #include "llvm/PassRegistry.h" using namespace llvm; /// initializeInstrumentation - Initialize all passes in the TransformUtils /// library. void llvm::initializeInstrumentation(PassRegistry &Registry) { initializeAddressSanitizerPass(Registry); initializeAddressSanitizerModulePass(Registry); initializeBoundsCheckingPass(Registry); initializeGCOVProfilerPass(Registry); initializeInstrProfilingPass(Registry); initializeMemorySanitizerPass(Registry); initializeThreadSanitizerPass(Registry); initializeSanitizerCoverageModulePass(Registry); initializeDataFlowSanitizerPass(Registry); initializeSafeStackPass(Registry); } /// LLVMInitializeInstrumentation - C binding for /// initializeInstrumentation. void LLVMInitializeInstrumentation(LLVMPassRegistryRef R) { initializeInstrumentation(*unwrap(R)); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Instrumentation/AddressSanitizer.cpp
//===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a part of AddressSanitizer, an address sanity checker. // Details of the algorithm: // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Endian.h" #include "llvm/Support/SwapByteOrder.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include <algorithm> #include <string> #include <system_error> using namespace llvm; #define DEBUG_TYPE "asan" static const uint64_t kDefaultShadowScale = 3; static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; static const uint64_t kIOSShadowOffset32 = 1ULL << 30; static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G. static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000; static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41; static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; static const uint64_t kWindowsShadowOffset32 = 3ULL << 28; static const size_t kMinStackMallocSize = 1 << 6; // 64B static const size_t kMaxStackMallocSize = 1 << 16; // 64K static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E; static const char *const kAsanModuleCtorName = "asan.module_ctor"; static const char *const kAsanModuleDtorName = "asan.module_dtor"; static const uint64_t kAsanCtorAndDtorPriority = 1; static const char *const kAsanReportErrorTemplate = "__asan_report_"; static const char *const kAsanRegisterGlobalsName = "__asan_register_globals"; static const char *const kAsanUnregisterGlobalsName = "__asan_unregister_globals"; static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; static const char *const kAsanInitName = "__asan_init_v5"; static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; static const int kMaxAsanStackMallocSizeClass = 10; static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; static const char *const kAsanGenPrefix = "__asan_gen_"; static const char *const kSanCovGenPrefix = "__sancov_gen_"; static const char *const kAsanPoisonStackMemoryName = "__asan_poison_stack_memory"; static const char *const kAsanUnpoisonStackMemoryName = "__asan_unpoison_stack_memory"; static const char *const kAsanOptionDetectUAR = "__asan_option_detect_stack_use_after_return"; static const char *const kAsanAllocaPoison = "__asan_alloca_poison"; static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison"; // Accesses sizes are powers of two: 1, 2, 4, 8, 16. static const size_t kNumberOfAccessSizes = 5; static const unsigned kAllocaRzSize = 32; // Command-line flags. static cl::opt<bool> ClEnableKasan( "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false)); // This flag may need to be replaced with -f[no-]asan-reads. static cl::opt<bool> ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClInstrumentWrites( "asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClInstrumentAtomics( "asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClAlwaysSlowPath( "asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false)); // This flag limits the number of instructions to be instrumented // in any given BB. Normally, this should be set to unlimited (INT_MAX), // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary // set it to 10000. static cl::opt<int> ClMaxInsnsToInstrumentPerBB( "asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden); // This flag may need to be replaced with -f[no]asan-stack. static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClUseAfterReturn("asan-use-after-return", cl::desc("Check return-after-free"), cl::Hidden, cl::init(true)); // This flag may need to be replaced with -f[no]asan-globals. static cl::opt<bool> ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClInvalidPointerPairs( "asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false)); static cl::opt<unsigned> ClRealignStack( "asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32)); static cl::opt<int> ClInstrumentationWithCallsThreshold( "asan-instrumentation-with-call-threshold", cl::desc( "If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000)); static cl::opt<std::string> ClMemoryAccessCallbackPrefix( "asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_")); static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(false)); static cl::opt<bool> ClSkipPromotableAllocas( "asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true)); // These flags allow to change the shadow mapping. // The shadow mapping looks like // Shadow = (Mem >> scale) + (1 << offset_log) static cl::opt<int> ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0)); // Optimization flags. Not user visible, used mostly for testing // and benchmarking the tool. static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClOptSameTemp( "asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClOptStack( "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false)); static cl::opt<bool> ClCheckLifetime( "asan-check-lifetime", cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), cl::Hidden, cl::init(false)); static cl::opt<bool> ClDynamicAllocaStack( "asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true)); static cl::opt<uint32_t> ClForceExperiment( "asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0)); // Debug flags. static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0)); static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0)); static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func")); static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1)); static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), cl::Hidden, cl::init(-1)); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); STATISTIC(NumOptimizedAccessesToGlobalVar, "Number of optimized accesses to global vars"); STATISTIC(NumOptimizedAccessesToStackVar, "Number of optimized accesses to stack vars"); namespace { /// Frontend-provided metadata for source location. struct LocationMetadata { StringRef Filename; int LineNo; int ColumnNo; LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {} bool empty() const { return Filename.empty(); } void parse(MDNode *MDN) { assert(MDN->getNumOperands() == 3); MDString *DIFilename = cast<MDString>(MDN->getOperand(0)); Filename = DIFilename->getString(); LineNo = mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue(); ColumnNo = mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue(); } }; /// Frontend-provided metadata for global variables. class GlobalsMetadata { public: struct Entry { Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {} LocationMetadata SourceLoc; StringRef Name; bool IsDynInit; bool IsBlacklisted; }; GlobalsMetadata() : inited_(false) {} void init(Module &M) { assert(!inited_); inited_ = true; NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals"); if (!Globals) return; for (auto MDN : Globals->operands()) { // Metadata node contains the global and the fields of "Entry". assert(MDN->getNumOperands() == 5); auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0)); // The optimizer may optimize away a global entirely. if (!GV) continue; // We can already have an entry for GV if it was merged with another // global. Entry &E = Entries[GV]; if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1))) E.SourceLoc.parse(Loc); if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2))) E.Name = Name->getString(); ConstantInt *IsDynInit = mdconst::extract<ConstantInt>(MDN->getOperand(3)); E.IsDynInit |= IsDynInit->isOne(); ConstantInt *IsBlacklisted = mdconst::extract<ConstantInt>(MDN->getOperand(4)); E.IsBlacklisted |= IsBlacklisted->isOne(); } } /// Returns metadata entry for a given global. Entry get(GlobalVariable *G) const { auto Pos = Entries.find(G); return (Pos != Entries.end()) ? Pos->second : Entry(); } private: bool inited_; DenseMap<GlobalVariable *, Entry> Entries; }; /// This struct defines the shadow mapping using the rule: /// shadow = (mem >> Scale) ADD-or-OR Offset. struct ShadowMapping { int Scale; uint64_t Offset; bool OrShadowOffset; }; static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize, bool IsKasan) { bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android; bool IsIOS = TargetTriple.isiOS(); bool IsFreeBSD = TargetTriple.isOSFreeBSD(); bool IsLinux = TargetTriple.isOSLinux(); bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 || TargetTriple.getArch() == llvm::Triple::ppc64le; bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips || TargetTriple.getArch() == llvm::Triple::mipsel; bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 || TargetTriple.getArch() == llvm::Triple::mips64el; bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64; bool IsWindows = TargetTriple.isOSWindows(); ShadowMapping Mapping; if (LongSize == 32) { if (IsAndroid) Mapping.Offset = 0; else if (IsMIPS32) Mapping.Offset = kMIPS32_ShadowOffset32; else if (IsFreeBSD) Mapping.Offset = kFreeBSD_ShadowOffset32; else if (IsIOS) Mapping.Offset = kIOSShadowOffset32; else if (IsWindows) Mapping.Offset = kWindowsShadowOffset32; else Mapping.Offset = kDefaultShadowOffset32; } else { // LongSize == 64 if (IsPPC64) Mapping.Offset = kPPC64_ShadowOffset64; else if (IsFreeBSD) Mapping.Offset = kFreeBSD_ShadowOffset64; else if (IsLinux && IsX86_64) { if (IsKasan) Mapping.Offset = kLinuxKasan_ShadowOffset64; else Mapping.Offset = kSmallX86_64ShadowOffset; } else if (IsMIPS64) Mapping.Offset = kMIPS64_ShadowOffset64; else if (IsAArch64) Mapping.Offset = kAArch64_ShadowOffset64; else Mapping.Offset = kDefaultShadowOffset64; } Mapping.Scale = kDefaultShadowScale; if (ClMappingScale) { Mapping.Scale = ClMappingScale; } // OR-ing shadow offset if more efficient (at least on x86) if the offset // is a power of two, but on ppc64 we have to use add since the shadow // offset is not necessary 1/8-th of the address space. Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1)); return Mapping; } static size_t RedzoneSizeForScale(int MappingScale) { // Redzone used for stack and globals is at least 32 bytes. // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. return std::max(32U, 1U << MappingScale); } /// AddressSanitizer: instrument the code in module to find memory bugs. struct AddressSanitizer : public FunctionPass { explicit AddressSanitizer(bool CompileKernel = false) : FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan) { initializeAddressSanitizerPass(*PassRegistry::getPassRegistry()); } StringRef getPassName() const override { return "AddressSanitizerFunctionPass"; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<DominatorTreeWrapperPass>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); } uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { Type *Ty = AI->getAllocatedType(); uint64_t SizeInBytes = AI->getModule()->getDataLayout().getTypeAllocSize(Ty); return SizeInBytes; } /// Check if we want (and can) handle this alloca. bool isInterestingAlloca(AllocaInst &AI); // Check if we have dynamic alloca. bool isDynamicAlloca(AllocaInst &AI) const { return AI.isArrayAllocation() || !AI.isStaticAlloca(); } /// If it is an interesting memory access, return the PointerOperand /// and set IsWrite/Alignment. Otherwise return nullptr. Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, uint64_t *TypeSize, unsigned *Alignment); void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I, bool UseCalls, const DataLayout &DL); void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp); void instrumentUnusualSizeOrAlignment(Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp); Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeSize); Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, uint32_t Exp); void instrumentMemIntrinsic(MemIntrinsic *MI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool runOnFunction(Function &F) override; bool maybeInsertAsanInitAtFunctionEntry(Function &F); bool doInitialization(Module &M) override; static char ID; // Pass identification, replacement for typeid DominatorTree &getDominatorTree() const { return *DT; } private: void initializeCallbacks(Module &M); bool LooksLikeCodeInBug11395(Instruction *I); bool GlobalIsLinkerInitialized(GlobalVariable *G); bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, uint64_t TypeSize) const; LLVMContext *C; Triple TargetTriple; int LongSize; bool CompileKernel; Type *IntptrTy; ShadowMapping Mapping; DominatorTree *DT; Function *AsanCtorFunction = nullptr; Function *AsanInitFunction = nullptr; Function *AsanHandleNoReturnFunc; Function *AsanPtrCmpFunction, *AsanPtrSubFunction; // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize). Function *AsanErrorCallback[2][2][kNumberOfAccessSizes]; Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes]; // This array is indexed by AccessIsWrite and Experiment. Function *AsanErrorCallbackSized[2][2]; Function *AsanMemoryAccessCallbackSized[2][2]; Function *AsanMemmove, *AsanMemcpy, *AsanMemset; InlineAsm *EmptyAsm; GlobalsMetadata GlobalsMD; DenseMap<AllocaInst *, bool> ProcessedAllocas; friend struct FunctionStackPoisoner; }; class AddressSanitizerModule : public ModulePass { public: explicit AddressSanitizerModule(bool CompileKernel = false) : ModulePass(ID), CompileKernel(CompileKernel || ClEnableKasan) {} bool runOnModule(Module &M) override; static char ID; // Pass identification, replacement for typeid StringRef getPassName() const override { return "AddressSanitizerModule"; } private: void initializeCallbacks(Module &M); bool InstrumentGlobals(IRBuilder<> &IRB, Module &M); bool ShouldInstrumentGlobal(GlobalVariable *G); void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName); void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); size_t MinRedzoneSizeForGlobal() const { return RedzoneSizeForScale(Mapping.Scale); } GlobalsMetadata GlobalsMD; bool CompileKernel; Type *IntptrTy; LLVMContext *C; Triple TargetTriple; ShadowMapping Mapping; Function *AsanPoisonGlobals; Function *AsanUnpoisonGlobals; Function *AsanRegisterGlobals; Function *AsanUnregisterGlobals; }; // Stack poisoning does not play well with exception handling. // When an exception is thrown, we essentially bypass the code // that unpoisones the stack. This is why the run-time library has // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire // stack in the interceptor. This however does not work inside the // actual function which catches the exception. Most likely because the // compiler hoists the load of the shadow value somewhere too high. // This causes asan to report a non-existing bug on 453.povray. // It sounds like an LLVM bug. struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> { Function &F; AddressSanitizer &ASan; DIBuilder DIB; LLVMContext *C; Type *IntptrTy; Type *IntptrPtrTy; ShadowMapping Mapping; SmallVector<AllocaInst *, 16> AllocaVec; SmallVector<Instruction *, 8> RetVec; unsigned StackAlignment; Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1]; Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; Function *AsanAllocaPoisonFunc, *AsanAllocasUnpoisonFunc; // Stores a place and arguments of poisoning/unpoisoning call for alloca. struct AllocaPoisonCall { IntrinsicInst *InsBefore; AllocaInst *AI; uint64_t Size; bool DoPoison; }; SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec; SmallVector<AllocaInst *, 1> DynamicAllocaVec; SmallVector<IntrinsicInst *, 1> StackRestoreVec; AllocaInst *DynamicAllocaLayout = nullptr; // Maps Value to an AllocaInst from which the Value is originated. typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy; AllocaForValueMapTy AllocaForValue; bool HasNonEmptyInlineAsm; std::unique_ptr<CallInst> EmptyInlineAsm; FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C), IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), StackAlignment(1 << Mapping.Scale), HasNonEmptyInlineAsm(false), EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {} bool runOnFunction() { if (!ClStack) return false; // Collect alloca, ret, lifetime instructions etc. for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false; initializeCallbacks(*F.getParent()); poisonStack(); if (ClDebugStack) { DEBUG(dbgs() << F); } return true; } // Finds all Alloca instructions and puts // poisoned red zones around all of them. // Then unpoison everything back before the function returns. void poisonStack(); void createDynamicAllocasInitStorage(); // ----------------------- Visitors. /// \brief Collect all Ret instructions. void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); } void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore, Value *SavedStack) { IRBuilder<> IRB(InstBefore); IRB.CreateCall(AsanAllocasUnpoisonFunc, {IRB.CreateLoad(DynamicAllocaLayout), IRB.CreatePtrToInt(SavedStack, IntptrTy)}); } // Unpoison dynamic allocas redzones. void unpoisonDynamicAllocas() { for (auto &Ret : RetVec) unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout); for (auto &StackRestoreInst : StackRestoreVec) unpoisonDynamicAllocasBeforeInst(StackRestoreInst, StackRestoreInst->getOperand(0)); } // Deploy and poison redzones around dynamic alloca call. To do this, we // should replace this call with another one with changed parameters and // replace all its uses with new address, so // addr = alloca type, old_size, align // is replaced by // new_size = (old_size + additional_size) * sizeof(type) // tmp = alloca i8, new_size, max(align, 32) // addr = tmp + 32 (first 32 bytes are for the left redzone). // Additional_size is added to make new memory allocation contain not only // requested memory, but also left, partial and right redzones. void handleDynamicAllocaCall(AllocaInst *AI); /// \brief Collect Alloca instructions we want (and can) handle. void visitAllocaInst(AllocaInst &AI) { if (!ASan.isInterestingAlloca(AI)) return; StackAlignment = std::max(StackAlignment, AI.getAlignment()); if (ASan.isDynamicAlloca(AI)) DynamicAllocaVec.push_back(&AI); else AllocaVec.push_back(&AI); } /// \brief Collect lifetime intrinsic calls to check for use-after-scope /// errors. void visitIntrinsicInst(IntrinsicInst &II) { Intrinsic::ID ID = II.getIntrinsicID(); if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II); if (!ClCheckLifetime) return; if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end) return; // Found lifetime intrinsic, add ASan instrumentation if necessary. ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0)); // If size argument is undefined, don't do anything. if (Size->isMinusOne()) return; // Check that size doesn't saturate uint64_t and can // be stored in IntptrTy. const uint64_t SizeValue = Size->getValue().getLimitedValue(); if (SizeValue == ~0ULL || !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) return; // Find alloca instruction that corresponds to llvm.lifetime argument. AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); if (!AI) return; bool DoPoison = (ID == Intrinsic::lifetime_end); AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; AllocaPoisonCallVec.push_back(APC); } void visitCallInst(CallInst &CI) { HasNonEmptyInlineAsm |= CI.isInlineAsm() && !CI.isIdenticalTo(EmptyInlineAsm.get()); } // ---------------------- Helpers. void initializeCallbacks(Module &M); bool doesDominateAllExits(const Instruction *I) const { for (auto Ret : RetVec) { if (!ASan.getDominatorTree().dominates(I, Ret)) return false; } return true; } /// Finds alloca where the value comes from. AllocaInst *findAllocaForValue(Value *V); void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB, Value *ShadowBase, bool DoPoison); void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase, int Size); Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic); PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, Instruction *ThenTerm, Value *ValueIfFalse); }; } // namespace char AddressSanitizer::ID = 0; INITIALIZE_PASS_BEGIN( AddressSanitizer, "asan", "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END( AddressSanitizer, "asan", "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, false) FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel) { return new AddressSanitizer(CompileKernel); } char AddressSanitizerModule::ID = 0; INITIALIZE_PASS( AddressSanitizerModule, "asan-module", "AddressSanitizer: detects use-after-free and out-of-bounds bugs." "ModulePass", false, false) ModulePass *llvm::createAddressSanitizerModulePass(bool CompileKernel) { return new AddressSanitizerModule(CompileKernel); } static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { size_t Res = countTrailingZeros(TypeSize / 8); assert(Res < kNumberOfAccessSizes); return Res; } // \brief Create a constant for Str so that we can pass it to the run-time lib. static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); // We use private linkage for module-local strings. If they can be merged // with another one, we set the unnamed_addr attribute. GlobalVariable *GV = new GlobalVariable(M, StrConst->getType(), true, GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix); if (AllowMerging) GV->setUnnamedAddr(true); GV->setAlignment(1); // Strings may not be merged w/o setting align 1. return GV; } /// \brief Create a global describing a source location. static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M, LocationMetadata MD) { Constant *LocData[] = { createPrivateGlobalForString(M, MD.Filename, true), ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo), ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo), }; auto LocStruct = ConstantStruct::getAnon(LocData); auto GV = new GlobalVariable(M, LocStruct->getType(), true, GlobalValue::PrivateLinkage, LocStruct, kAsanGenPrefix); GV->setUnnamedAddr(true); return GV; } static bool GlobalWasGeneratedByAsan(GlobalVariable *G) { return G->getName().find(kAsanGenPrefix) == 0 || G->getName().find(kSanCovGenPrefix) == 0; } Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); if (Mapping.Offset == 0) return Shadow; // (Shadow >> scale) | offset if (Mapping.OrShadowOffset) return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); else return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); } // Instrument memset/memmove/memcpy void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { IRBuilder<> IRB(MI); if (isa<MemTransferInst>(MI)) { IRB.CreateCall( isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); } else if (isa<MemSetInst>(MI)) { IRB.CreateCall( AsanMemset, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false), IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); } MI->eraseFromParent(); } /// Check if we want (and can) handle this alloca. bool AddressSanitizer::isInterestingAlloca(AllocaInst &AI) { auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI); if (PreviouslySeenAllocaInfo != ProcessedAllocas.end()) return PreviouslySeenAllocaInfo->getSecond(); bool IsInteresting = (AI.getAllocatedType()->isSized() && // alloca() may be called with 0 size, ignore it. getAllocaSizeInBytes(&AI) > 0 && // We are only interested in allocas not promotable to registers. // Promotable allocas are common under -O0. (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI) || isDynamicAlloca(AI))); ProcessedAllocas[&AI] = IsInteresting; return IsInteresting; } /// If I is an interesting memory access, return the PointerOperand /// and set IsWrite/Alignment. Otherwise return nullptr. Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I, bool *IsWrite, uint64_t *TypeSize, unsigned *Alignment) { // Skip memory accesses inserted by another instrumentation. if (I->getMetadata("nosanitize")) return nullptr; Value *PtrOperand = nullptr; const DataLayout &DL = I->getModule()->getDataLayout(); if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (!ClInstrumentReads) return nullptr; *IsWrite = false; *TypeSize = DL.getTypeStoreSizeInBits(LI->getType()); *Alignment = LI->getAlignment(); PtrOperand = LI->getPointerOperand(); } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { if (!ClInstrumentWrites) return nullptr; *IsWrite = true; *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType()); *Alignment = SI->getAlignment(); PtrOperand = SI->getPointerOperand(); } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); *Alignment = 0; PtrOperand = RMW->getPointerOperand(); } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { if (!ClInstrumentAtomics) return nullptr; *IsWrite = true; *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType()); *Alignment = 0; PtrOperand = XCHG->getPointerOperand(); } // Treat memory accesses to promotable allocas as non-interesting since they // will not cause memory violations. This greatly speeds up the instrumented // executable at -O0. if (ClSkipPromotableAllocas) if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand)) return isInterestingAlloca(*AI) ? AI : nullptr; return PtrOperand; } static bool isPointerOperand(Value *V) { return V->getType()->isPointerTy() || isa<PtrToIntInst>(V); } // This is a rough heuristic; it may cause both false positives and // false negatives. The proper implementation requires cooperation with // the frontend. static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) { if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) { if (!Cmp->isRelational()) return false; } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { if (BO->getOpcode() != Instruction::Sub) return false; } else { return false; } if (!isPointerOperand(I->getOperand(0)) || !isPointerOperand(I->getOperand(1))) return false; return true; } bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { // If a global variable does not have dynamic initialization we don't // have to instrument it. However, if a global does not have initializer // at all, we assume it has dynamic initializer (in other TU). return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit; } void AddressSanitizer::instrumentPointerComparisonOrSubtraction( Instruction *I) { IRBuilder<> IRB(I); Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; for (int i = 0; i < 2; i++) { if (Param[i]->getType()->isPointerTy()) Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy); } IRB.CreateCall(F, Param); } void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I, bool UseCalls, const DataLayout &DL) { bool IsWrite = false; unsigned Alignment = 0; uint64_t TypeSize = 0; Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment); assert(Addr); // Optimization experiments. // The experiments can be used to evaluate potential optimizations that remove // instrumentation (assess false negatives). Instead of completely removing // some instrumentation, you set Exp to a non-zero value (mask of optimization // experiments that want to remove instrumentation of this instruction). // If Exp is non-zero, this pass will emit special calls into runtime // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls // make runtime terminate the program in a special way (with a different // exit status). Then you run the new compiler on a buggy corpus, collect // the special terminations (ideally, you don't see them at all -- no false // negatives) and make the decision on the optimization. uint32_t Exp = ClForceExperiment; if (ClOpt && ClOptGlobals) { // If initialization order checking is disabled, a simple access to a // dynamically initialized global is always valid. GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL)); if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) && isSafeAccess(ObjSizeVis, Addr, TypeSize)) { NumOptimizedAccessesToGlobalVar++; return; } } if (ClOpt && ClOptStack) { // A direct inbounds access to a stack variable is always valid. if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) && isSafeAccess(ObjSizeVis, Addr, TypeSize)) { NumOptimizedAccessesToStackVar++; return; } } if (IsWrite) NumInstrumentedWrites++; else NumInstrumentedReads++; unsigned Granularity = 1 << Mapping.Scale; // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check // if the data is properly aligned. if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || TypeSize == 128) && (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8)) return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls, Exp); instrumentUnusualSizeOrAlignment(I, Addr, TypeSize, IsWrite, nullptr, UseCalls, Exp); } Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, uint32_t Exp) { IRBuilder<> IRB(InsertBefore); Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp); CallInst *Call = nullptr; if (SizeArgument) { if (Exp == 0) Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0], {Addr, SizeArgument}); else Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1], {Addr, SizeArgument, ExpVal}); } else { if (Exp == 0) Call = IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr); else Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal}); } // We don't do Call->setDoesNotReturn() because the BB already has // UnreachableInst at the end. // This EmptyAsm is required to avoid callback merge. IRB.CreateCall(EmptyAsm, {}); return Call; } Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeSize) { size_t Granularity = 1 << Mapping.Scale; // Addr & (Granularity - 1) Value *LastAccessedByte = IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); // (Addr & (Granularity - 1)) + size - 1 if (TypeSize / 8 > 1) LastAccessedByte = IRB.CreateAdd( LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); // (uint8_t) ((Addr & (Granularity-1)) + size - 1) LastAccessedByte = IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false); // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); } void AddressSanitizer::instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { IRBuilder<> IRB(InsertBefore); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); if (UseCalls) { if (Exp == 0) IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong); else IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex], {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)}); return; } Type *ShadowTy = IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale)); Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); Value *ShadowPtr = memToShadow(AddrLong, IRB); Value *CmpVal = Constant::getNullValue(ShadowTy); Value *ShadowValue = IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); size_t Granularity = 1 << Mapping.Scale; TerminatorInst *CrashTerm = nullptr; if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { // We use branch weights for the slow path check, to indicate that the slow // path is rarely taken. This seems to be the case for SPEC benchmarks. TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen( Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000)); assert(cast<BranchInst>(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); BasicBlock *CrashBlock = BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); CrashTerm = new UnreachableInst(*C, CrashBlock); BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); ReplaceInstWithInst(CheckTerm, NewTerm); } else { CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true); } Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp); Crash->setDebugLoc(OrigIns->getDebugLoc()); } // Instrument unusual size or unusual alignment. // We can not do it with a single check, so we do 1-byte check for the first // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able // to report the actual access size. void AddressSanitizer::instrumentUnusualSizeOrAlignment( Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { IRBuilder<> IRB(I); Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); if (UseCalls) { if (Exp == 0) IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0], {AddrLong, Size}); else IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1], {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)}); } else { Value *LastByte = IRB.CreateIntToPtr( IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), Addr->getType()); instrumentAddress(I, I, Addr, 8, IsWrite, Size, false, Exp); instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false, Exp); } } void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName) { // Set up the arguments to our poison/unpoison functions. IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt()); // Add a call to poison all external globals before the given function starts. Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); // Add calls to unpoison all globals before each return instruction. for (auto &BB : GlobalInit.getBasicBlockList()) if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator())) CallInst::Create(AsanUnpoisonGlobals, "", RI); } void AddressSanitizerModule::createInitializerPoisonCalls( Module &M, GlobalValue *ModuleName) { GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); for (Use &OP : CA->operands()) { if (isa<ConstantAggregateZero>(OP)) continue; ConstantStruct *CS = cast<ConstantStruct>(OP); // Must have a function or null ptr. if (Function *F = dyn_cast<Function>(CS->getOperand(1))) { if (F->getName() == kAsanModuleCtorName) continue; ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0)); // Don't instrument CTORs that will run before asan.module_ctor. if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue; poisonOneInitializer(*F, ModuleName); } } } bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { Type *Ty = cast<PointerType>(G->getType())->getElementType(); DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); if (GlobalsMD.get(G).IsBlacklisted) return false; if (!Ty->isSized()) return false; if (!G->hasInitializer()) return false; if (GlobalWasGeneratedByAsan(G)) return false; // Our own global. // Touch only those globals that will not be defined in other modules. // Don't handle ODR linkage types and COMDATs since other modules may be built // without ASan. if (G->getLinkage() != GlobalVariable::ExternalLinkage && G->getLinkage() != GlobalVariable::PrivateLinkage && G->getLinkage() != GlobalVariable::InternalLinkage) return false; if (G->hasComdat()) return false; // Two problems with thread-locals: // - The address of the main thread's copy can't be computed at link-time. // - Need to poison all copies, not just the main thread's one. if (G->isThreadLocal()) return false; // For now, just ignore this Global if the alignment is large. if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false; if (G->hasSection()) { StringRef Section(G->getSection()); // Globals from llvm.metadata aren't emitted, do not instrument them. if (Section == "llvm.metadata") return false; // Do not instrument globals from special LLVM sections. if (Section.find("__llvm") != StringRef::npos) return false; // Callbacks put into the CRT initializer/terminator sections // should not be instrumented. // See https://code.google.com/p/address-sanitizer/issues/detail?id=305 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx if (Section.startswith(".CRT")) { DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n"); return false; } if (TargetTriple.isOSBinFormatMachO()) { StringRef ParsedSegment, ParsedSection; unsigned TAA = 0, StubSize = 0; bool TAAParsed; std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier( Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize); if (!ErrorCode.empty()) { assert(false && "Invalid section specifier."); return false; } // Ignore the globals from the __OBJC section. The ObjC runtime assumes // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to // them. if (ParsedSegment == "__OBJC" || (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) { DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n"); return false; } // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 // Constant CFString instances are compiled in the following way: // -- the string buffer is emitted into // __TEXT,__cstring,cstring_literals // -- the constant NSConstantString structure referencing that buffer // is placed into __DATA,__cfstring // Therefore there's no point in placing redzones into __DATA,__cfstring. // Moreover, it causes the linker to crash on OS X 10.7 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") { DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n"); return false; } // The linker merges the contents of cstring_literals and removes the // trailing zeroes. if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) { DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n"); return false; } } } return true; } void AddressSanitizerModule::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); // Declare our poisoning and unpoisoning functions. AsanPoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr)); AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); AsanUnpoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr)); AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); // Declare functions that register/unregister globals. AsanRegisterGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); AsanUnregisterGlobals = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); } // This function replaces all global variables with new variables that have // trailing redzones. It also creates a function that poisons // redzones and inserts this function into llvm.global_ctors. bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) { GlobalsMD.init(M); SmallVector<GlobalVariable *, 16> GlobalsToChange; for (auto &G : M.globals()) { if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G); } size_t n = GlobalsToChange.size(); if (n == 0) return false; // A global is described by a structure // size_t beg; // size_t size; // size_t size_with_redzone; // const char *name; // const char *module_name; // size_t has_dynamic_init; // void *source_location; // We initialize an array of such structures and pass it to a run-time call. StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, nullptr); SmallVector<Constant *, 16> Initializers(n); bool HasDynamicallyInitializedGlobals = false; // We shouldn't merge same module names, as this string serves as unique // module ID in runtime. GlobalVariable *ModuleName = createPrivateGlobalForString( M, M.getModuleIdentifier(), /*AllowMerging*/ false); auto &DL = M.getDataLayout(); for (size_t i = 0; i < n; i++) { static const uint64_t kMaxGlobalRedzone = 1 << 18; GlobalVariable *G = GlobalsToChange[i]; auto MD = GlobalsMD.get(G); // Create string holding the global name (use global name from metadata // if it's available, otherwise just write the name of global variable). GlobalVariable *Name = createPrivateGlobalForString( M, MD.Name.empty() ? G->getName() : MD.Name, /*AllowMerging*/ true); PointerType *PtrTy = cast<PointerType>(G->getType()); Type *Ty = PtrTy->getElementType(); uint64_t SizeInBytes = DL.getTypeAllocSize(Ty); uint64_t MinRZ = MinRedzoneSizeForGlobal(); // MinRZ <= RZ <= kMaxGlobalRedzone // and trying to make RZ to be ~ 1/4 of SizeInBytes. uint64_t RZ = std::max( MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ)); uint64_t RightRedzoneSize = RZ; // Round up to MinRZ if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0); Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr); Constant *NewInitializer = ConstantStruct::get(NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy), nullptr); // Create a new global variable with enough space for a redzone. GlobalValue::LinkageTypes Linkage = G->getLinkage(); if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage) Linkage = GlobalValue::InternalLinkage; GlobalVariable *NewGlobal = new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G, G->getThreadLocalMode()); NewGlobal->copyAttributesFrom(G); NewGlobal->setAlignment(MinRZ); Value *Indices2[2]; Indices2[0] = IRB.getInt32(0); Indices2[1] = IRB.getInt32(0); G->replaceAllUsesWith( ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true)); NewGlobal->takeName(G); G->eraseFromParent(); Constant *SourceLoc; if (!MD.SourceLoc.empty()) { auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc); SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy); } else { SourceLoc = ConstantInt::get(IntptrTy, 0); } Initializers[i] = ConstantStruct::get( GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy), ConstantInt::get(IntptrTy, SizeInBytes), ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), ConstantExpr::getPointerCast(Name, IntptrTy), ConstantExpr::getPointerCast(ModuleName, IntptrTy), ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr); if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true; DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); } ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n); GlobalVariable *AllGlobals = new GlobalVariable( M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage, ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); // Create calls for poisoning before initializers run and unpoisoning after. if (HasDynamicallyInitializedGlobals) createInitializerPoisonCalls(M, ModuleName); IRB.CreateCall(AsanRegisterGlobals, {IRB.CreatePointerCast(AllGlobals, IntptrTy), ConstantInt::get(IntptrTy, n)}); // We also need to unregister globals at the end, e.g. when a shared library // gets closed. Function *AsanDtorFunction = Function::Create(FunctionType::get(Type::getVoidTy(*C), false), GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); IRB_Dtor.CreateCall(AsanUnregisterGlobals, {IRB.CreatePointerCast(AllGlobals, IntptrTy), ConstantInt::get(IntptrTy, n)}); appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority); DEBUG(dbgs() << M); return true; } bool AddressSanitizerModule::runOnModule(Module &M) { C = &(M.getContext()); int LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); TargetTriple = Triple(M.getTargetTriple()); Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel); initializeCallbacks(M); bool Changed = false; // TODO(glider): temporarily disabled globals instrumentation for KASan. if (ClGlobals && !CompileKernel) { Function *CtorFunc = M.getFunction(kAsanModuleCtorName); assert(CtorFunc); IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); Changed |= InstrumentGlobals(IRB, M); } return Changed; } void AddressSanitizer::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); // Create __asan_report* callbacks. // IsWrite, TypeSize and Exp are encoded in the function name. for (int Exp = 0; Exp < 2; Exp++) { for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { const std::string TypeStr = AccessIsWrite ? "store" : "load"; const std::string ExpStr = Exp ? "exp_" : ""; const std::string SuffixStr = CompileKernel ? "N" : "_n"; const std::string EndingStr = CompileKernel ? "_noabort" : ""; const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr; // TODO(glider): for KASan builds add _noabort to error reporting // functions and make them actually noabort (remove the UnreachableInst). AsanErrorCallbackSized[AccessIsWrite][Exp] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanReportErrorTemplate + ExpStr + TypeStr + SuffixStr, IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr, IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr)); for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; AccessSizeIndex++) { const std::string Suffix = TypeStr + itostr((int64_t)(1) << AccessSizeIndex); // HLSL Change - keep as 64-bit operation AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanReportErrorTemplate + ExpStr + Suffix, IRB.getVoidTy(), IntptrTy, ExpType, nullptr)); AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] = checkSanitizerInterfaceFunction(M.getOrInsertFunction( ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr, IRB.getVoidTy(), IntptrTy, ExpType, nullptr)); } } } const std::string MemIntrinCallbackPrefix = CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix; AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction( MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction( MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr)); AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction( MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr)); AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr)); AsanPtrCmpFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanPtrSubFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); // We insert an empty inline asm after __asan_report* to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), StringRef(""), StringRef(""), /*hasSideEffects=*/true); } // virtual bool AddressSanitizer::doInitialization(Module &M) { // Initialize the private fields. No one has accessed them before. GlobalsMD.init(M); C = &(M.getContext()); LongSize = M.getDataLayout().getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); TargetTriple = Triple(M.getTargetTriple()); if (!CompileKernel) { std::tie(AsanCtorFunction, AsanInitFunction) = createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{}, /*InitArgs=*/{}); appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority); } Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel); return true; } bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { // For each NSObject descendant having a +load method, this method is invoked // by the ObjC runtime before any of the static constructors is called. // Therefore we need to instrument such methods with a call to __asan_init // at the beginning in order to initialize our runtime before any access to // the shadow memory. // We cannot just ignore these methods, because they may call other // instrumented functions. if (F.getName().find(" load]") != std::string::npos) { IRBuilder<> IRB(F.begin()->begin()); IRB.CreateCall(AsanInitFunction, {}); return true; } return false; } bool AddressSanitizer::runOnFunction(Function &F) { if (&F == AsanCtorFunction) return false; if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); initializeCallbacks(*F.getParent()); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); // If needed, insert __asan_init before checking for SanitizeAddress attr. maybeInsertAsanInitAtFunctionEntry(F); if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return false; if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false; // We want to instrument every address only once per basic block (unless there // are calls between uses). SmallSet<Value *, 16> TempsToInstrument; SmallVector<Instruction *, 16> ToInstrument; SmallVector<Instruction *, 8> NoReturnCalls; SmallVector<BasicBlock *, 16> AllBlocks; SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts; int NumAllocas = 0; bool IsWrite; unsigned Alignment; uint64_t TypeSize; // Fill the set of memory operations to instrument. for (auto &BB : F) { AllBlocks.push_back(&BB); TempsToInstrument.clear(); int NumInsnsPerBB = 0; for (auto &Inst : BB) { if (LooksLikeCodeInBug11395(&Inst)) return false; if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize, &Alignment)) { if (ClOpt && ClOptSameTemp) { if (!TempsToInstrument.insert(Addr).second) continue; // We've seen this temp in the current BB. } } else if (ClInvalidPointerPairs && isInterestingPointerComparisonOrSubtraction(&Inst)) { PointerComparisonsOrSubtracts.push_back(&Inst); continue; } else if (isa<MemIntrinsic>(Inst)) { // ok, take it. } else { if (isa<AllocaInst>(Inst)) NumAllocas++; CallSite CS(&Inst); if (CS) { // A call inside BB. TempsToInstrument.clear(); if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction()); } continue; } ToInstrument.push_back(&Inst); NumInsnsPerBB++; if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; } } bool UseCalls = CompileKernel || (ClInstrumentationWithCallsThreshold >= 0 && ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold); const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); const DataLayout &DL = F.getParent()->getDataLayout(); ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(), /*RoundToAlign=*/true); // Instrument. int NumInstrumented = 0; for (auto Inst : ToInstrument) { if (ClDebugMin < 0 || ClDebugMax < 0 || (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment)) instrumentMop(ObjSizeVis, Inst, UseCalls, F.getParent()->getDataLayout()); else instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); } NumInstrumented++; } FunctionStackPoisoner FSP(F, *this); bool ChangedStack = FSP.runOnFunction(); // We must unpoison the stack before every NoReturn call (throw, _exit, etc). // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 for (auto CI : NoReturnCalls) { IRBuilder<> IRB(CI); IRB.CreateCall(AsanHandleNoReturnFunc, {}); } for (auto Inst : PointerComparisonsOrSubtracts) { instrumentPointerComparisonOrSubtraction(Inst); NumInstrumented++; } bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty(); DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n"); return res; } // Workaround for bug 11395: we don't want to instrument stack in functions // with large assembly blobs (32-bit only), otherwise reg alloc may crash. // FIXME: remove once the bug 11395 is fixed. bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { if (LongSize != 32) return false; CallInst *CI = dyn_cast<CallInst>(I); if (!CI || !CI->isInlineAsm()) return false; if (CI->getNumArgOperands() <= 5) return false; // We have inline assembly with quite a few arguments. return true; } void FunctionStackPoisoner::initializeCallbacks(Module &M) { IRBuilder<> IRB(*C); for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) { std::string Suffix = itostr(i); AsanStackMallocFunc[i] = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy, nullptr)); AsanStackFreeFunc[i] = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); } AsanPoisonStackMemoryFunc = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanUnpoisonStackMemoryFunc = checkSanitizerInterfaceFunction( M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanAllocaPoisonFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); AsanAllocasUnpoisonFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction( kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr)); } void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB, Value *ShadowBase, bool DoPoison) { size_t n = ShadowBytes.size(); size_t i = 0; // We need to (un)poison n bytes of stack shadow. Poison as many as we can // using 64-bit stores (if we are on 64-bit arch), then poison the rest // with 32-bit stores, then with 16-byte stores, then with 8-byte stores. for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8; LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) { for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) { uint64_t Val = 0; for (size_t j = 0; j < LargeStoreSizeInBytes; j++) { if (F.getParent()->getDataLayout().isLittleEndian()) Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); else Val = (Val << 8) | ShadowBytes[i + j]; } if (!Val) continue; Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8); Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0); IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo())); } } } // Fake stack allocator (asan_fake_stack.h) has 11 size classes // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass static int StackMallocSizeClass(uint64_t LocalStackSize) { assert(LocalStackSize <= kMaxStackMallocSize); uint64_t MaxSize = kMinStackMallocSize; for (int i = 0;; i++, MaxSize *= 2) if (LocalStackSize <= MaxSize) return i; llvm_unreachable("impossible LocalStackSize"); } // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic. // We can not use MemSet intrinsic because it may end up calling the actual // memset. Size is a multiple of 8. // Currently this generates 8-byte stores on x86_64; it may be better to // generate wider stores. void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined( IRBuilder<> &IRB, Value *ShadowBase, int Size) { assert(!(Size % 8)); // kAsanStackAfterReturnMagic is 0xf5. const uint64_t kAsanStackAfterReturnMagic64 = 0xf5f5f5f5f5f5f5f5ULL; for (int i = 0; i < Size; i += 8) { Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); IRB.CreateStore( ConstantInt::get(IRB.getInt64Ty(), kAsanStackAfterReturnMagic64), IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo())); } } PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue, Instruction *ThenTerm, Value *ValueIfFalse) { PHINode *PHI = IRB.CreatePHI(IntptrTy, 2); BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent(); PHI->addIncoming(ValueIfFalse, CondBlock); BasicBlock *ThenBlock = ThenTerm->getParent(); PHI->addIncoming(ValueIfTrue, ThenBlock); return PHI; } Value *FunctionStackPoisoner::createAllocaForLayout( IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) { AllocaInst *Alloca; if (Dynamic) { Alloca = IRB.CreateAlloca(IRB.getInt8Ty(), ConstantInt::get(IRB.getInt64Ty(), L.FrameSize), "MyAlloca"); } else { Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize), nullptr, "MyAlloca"); assert(Alloca->isStaticAlloca()); } assert((ClRealignStack & (ClRealignStack - 1)) == 0); size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); Alloca->setAlignment(FrameAlignment); return IRB.CreatePointerCast(Alloca, IntptrTy); } void FunctionStackPoisoner::createDynamicAllocasInitStorage() { BasicBlock &FirstBB = *F.begin(); IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin())); DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr); IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout); DynamicAllocaLayout->setAlignment(32); } void FunctionStackPoisoner::poisonStack() { assert(AllocaVec.size() > 0 || DynamicAllocaVec.size() > 0); if (ClInstrumentAllocas && DynamicAllocaVec.size() > 0) { // Handle dynamic allocas. createDynamicAllocasInitStorage(); for (auto &AI : DynamicAllocaVec) handleDynamicAllocaCall(AI); unpoisonDynamicAllocas(); } if (AllocaVec.size() == 0) return; int StackMallocIdx = -1; DebugLoc EntryDebugLocation; if (auto SP = getDISubprogram(&F)) EntryDebugLocation = DebugLoc::get(SP->getScopeLine(), 0, SP); Instruction *InsBefore = AllocaVec[0]; IRBuilder<> IRB(InsBefore); IRB.SetCurrentDebugLocation(EntryDebugLocation); SmallVector<ASanStackVariableDescription, 16> SVD; SVD.reserve(AllocaVec.size()); for (AllocaInst *AI : AllocaVec) { ASanStackVariableDescription D = {AI->getName().data(), ASan.getAllocaSizeInBytes(AI), AI->getAlignment(), AI, 0}; SVD.push_back(D); } // Minimal header size (left redzone) is 4 pointers, // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. size_t MinHeaderSize = ASan.LongSize / 2; ASanStackFrameLayout L; ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L); DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n"); uint64_t LocalStackSize = L.FrameSize; bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize; // Don't do dynamic alloca or stack malloc in presence of inline asm: // too often it makes assumptions on which registers are available. bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm; DoStackMalloc &= !HasNonEmptyInlineAsm; Value *StaticAlloca = DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false); Value *FakeStack; Value *LocalStackBase; if (DoStackMalloc) { // void *FakeStack = __asan_option_detect_stack_use_after_return // ? __asan_stack_malloc_N(LocalStackSize) // : nullptr; // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize); Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal( kAsanOptionDetectUAR, IRB.getInt32Ty()); Value *UARIsEnabled = IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR), Constant::getNullValue(IRB.getInt32Ty())); Instruction *Term = SplitBlockAndInsertIfThen(UARIsEnabled, InsBefore, false); IRBuilder<> IRBIf(Term); IRBIf.SetCurrentDebugLocation(EntryDebugLocation); StackMallocIdx = StackMallocSizeClass(LocalStackSize); assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); Value *FakeStackValue = IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx], ConstantInt::get(IntptrTy, LocalStackSize)); IRB.SetInsertPoint(InsBefore); IRB.SetCurrentDebugLocation(EntryDebugLocation); FakeStack = createPHI(IRB, UARIsEnabled, FakeStackValue, Term, ConstantInt::get(IntptrTy, 0)); Value *NoFakeStack = IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy)); Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false); IRBIf.SetInsertPoint(Term); IRBIf.SetCurrentDebugLocation(EntryDebugLocation); Value *AllocaValue = DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca; IRB.SetInsertPoint(InsBefore); IRB.SetCurrentDebugLocation(EntryDebugLocation); LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack); } else { // void *FakeStack = nullptr; // void *LocalStackBase = alloca(LocalStackSize); FakeStack = ConstantInt::get(IntptrTy, 0); LocalStackBase = DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca; } // Insert poison calls for lifetime intrinsics for alloca. bool HavePoisonedAllocas = false; for (const auto &APC : AllocaPoisonCallVec) { assert(APC.InsBefore); assert(APC.AI); IRBuilder<> IRB(APC.InsBefore); poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); HavePoisonedAllocas |= APC.DoPoison; } // Replace Alloca instructions with base+offset. for (const auto &Desc : SVD) { AllocaInst *AI = Desc.AI; Value *NewAllocaPtr = IRB.CreateIntToPtr( IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)), AI->getType()); replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true); AI->replaceAllUsesWith(NewAllocaPtr); } // The left-most redzone has enough space for at least 4 pointers. // Write the Magic value to redzone[0]. Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), BasePlus0); // Write the frame description constant to redzone[1]. Value *BasePlus1 = IRB.CreateIntToPtr( IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize / 8)), IntptrPtrTy); GlobalVariable *StackDescriptionGlobal = createPrivateGlobalForString(*F.getParent(), L.DescriptionString, /*AllowMerging*/ true); Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); IRB.CreateStore(Description, BasePlus1); // Write the PC to redzone[2]. Value *BasePlus2 = IRB.CreateIntToPtr( IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)), IntptrPtrTy); IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); // Poison the stack redzones at the entry. Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true); // (Un)poison the stack before all ret instructions. for (auto Ret : RetVec) { IRBuilder<> IRBRet(Ret); // Mark the current frame as retired. IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), BasePlus0); if (DoStackMalloc) { assert(StackMallocIdx >= 0); // if FakeStack != 0 // LocalStackBase == FakeStack // // In use-after-return mode, poison the whole stack frame. // if StackMallocIdx <= 4 // // For small sizes inline the whole thing: // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); // **SavedFlagPtr(FakeStack) = 0 // else // __asan_stack_free_N(FakeStack, LocalStackSize) // else // <This is not a fake stack; unpoison the redzones> Value *Cmp = IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy)); TerminatorInst *ThenTerm, *ElseTerm; SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); IRBuilder<> IRBPoison(ThenTerm); if (StackMallocIdx <= 4) { int ClassSize = kMinStackMallocSize << StackMallocIdx; SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase, ClassSize >> Mapping.Scale); Value *SavedFlagPtrPtr = IRBPoison.CreateAdd( FakeStack, ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8)); Value *SavedFlagPtr = IRBPoison.CreateLoad( IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy)); IRBPoison.CreateStore( Constant::getNullValue(IRBPoison.getInt8Ty()), IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); } else { // For larger frames call __asan_stack_free_*. IRBPoison.CreateCall( AsanStackFreeFunc[StackMallocIdx], {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)}); } IRBuilder<> IRBElse(ElseTerm); poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false); } else if (HavePoisonedAllocas) { // If we poisoned some allocas in llvm.lifetime analysis, // unpoison whole stack frame now. poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false); } else { poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false); } } // We are done. Remove the old unused alloca instructions. for (auto AI : AllocaVec) AI->eraseFromParent(); } void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison) { // For now just insert the call to ASan runtime. Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); Value *SizeArg = ConstantInt::get(IntptrTy, Size); IRB.CreateCall( DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc, {AddrArg, SizeArg}); } // Handling llvm.lifetime intrinsics for a given %alloca: // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect // invalid accesses) and unpoison it for llvm.lifetime.start (the memory // could be poisoned by previous llvm.lifetime.end instruction, as the // variable may go in and out of scope several times, e.g. in loops). // (3) if we poisoned at least one %alloca in a function, // unpoison the whole stack frame at function exit. AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) // We're intested only in allocas we can handle. return ASan.isInterestingAlloca(*AI) ? AI : nullptr; // See if we've already calculated (or started to calculate) alloca for a // given value. AllocaForValueMapTy::iterator I = AllocaForValue.find(V); if (I != AllocaForValue.end()) return I->second; // Store 0 while we're calculating alloca for value V to avoid // infinite recursion if the value references itself. AllocaForValue[V] = nullptr; AllocaInst *Res = nullptr; if (CastInst *CI = dyn_cast<CastInst>(V)) Res = findAllocaForValue(CI->getOperand(0)); else if (PHINode *PN = dyn_cast<PHINode>(V)) { for (Value *IncValue : PN->incoming_values()) { // Allow self-referencing phi-nodes. if (IncValue == PN) continue; AllocaInst *IncValueAI = findAllocaForValue(IncValue); // AI for incoming values should exist and should all be equal. if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res)) return nullptr; Res = IncValueAI; } } if (Res) AllocaForValue[V] = Res; return Res; } void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) { IRBuilder<> IRB(AI); const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment()); const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1; Value *Zero = Constant::getNullValue(IntptrTy); Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize); Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask); // Since we need to extend alloca with additional memory to locate // redzones, and OldSize is number of allocated blocks with // ElementSize size, get allocated memory size in bytes by // OldSize * ElementSize. const unsigned ElementSize = F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType()); Value *OldSize = IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false), ConstantInt::get(IntptrTy, ElementSize)); // PartialSize = OldSize % 32 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask); // Misalign = kAllocaRzSize - PartialSize; Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize); // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0; Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize); Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero); // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize // Align is added to locate left redzone, PartialPadding for possible // partial redzone and kAllocaRzSize for right redzone respectively. Value *AdditionalChunkSize = IRB.CreateAdd( ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding); Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize); // Insert new alloca with new NewSize and Align params. AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize); NewAlloca->setAlignment(Align); // NewAddress = Address + Align Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy), ConstantInt::get(IntptrTy, Align)); // Insert __asan_alloca_poison call for new created alloca. IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize}); // Store the last alloca's address to DynamicAllocaLayout. We'll need this // for unpoisoning stuff. IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout); Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType()); // Replace all uses of AddessReturnedByAlloca with NewAddressPtr. AI->replaceAllUsesWith(NewAddressPtr); // We are done. Erase old alloca from parent. AI->eraseFromParent(); } // isSafeAccess returns true if Addr is always inbounds with respect to its // base object. For example, it is a field access or an array access with // constant inbounds index. bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr, uint64_t TypeSize) const { SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr); if (!ObjSizeVis.bothKnown(SizeOffset)) return false; uint64_t Size = SizeOffset.first.getZExtValue(); int64_t Offset = SizeOffset.second.getSExtValue(); // Three checks are required to ensure safety: // . Offset >= 0 (since the offset is given from the base ptr) // . Size >= Offset (unsigned) // . Size - Offset >= NeededSize (unsigned) return Offset >= 0 && Size >= uint64_t(Offset) && Size - uint64_t(Offset) >= TypeSize / 8; }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Instrumentation/SafeStack.cpp
//===-- SafeStack.cpp - Safe Stack Insertion ------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass splits the stack into the safe stack (kept as-is for LLVM backend) // and the unsafe stack (explicitly allocated and managed through the runtime // support library). // // http://clang.llvm.org/docs/SafeStack.html // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/Triple.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Format.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_os_ostream.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" using namespace llvm; #define DEBUG_TYPE "safestack" namespace llvm { STATISTIC(NumFunctions, "Total number of functions"); STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack"); STATISTIC(NumUnsafeStackRestorePointsFunctions, "Number of functions that use setjmp or exceptions"); STATISTIC(NumAllocas, "Total number of allocas"); STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas"); STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas"); STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads"); } // namespace llvm namespace { /// Check whether a given alloca instruction (AI) should be put on the safe /// stack or not. The function analyzes all uses of AI and checks whether it is /// only accessed in a memory safe way (as decided statically). bool IsSafeStackAlloca(const AllocaInst *AI) { // Go through all uses of this alloca and check whether all accesses to the // allocated object are statically known to be memory safe and, hence, the // object can be placed on the safe stack. SmallPtrSet<const Value *, 16> Visited; SmallVector<const Instruction *, 8> WorkList; WorkList.push_back(AI); // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc. while (!WorkList.empty()) { const Instruction *V = WorkList.pop_back_val(); for (const Use &UI : V->uses()) { auto I = cast<const Instruction>(UI.getUser()); assert(V == UI.get()); switch (I->getOpcode()) { case Instruction::Load: // Loading from a pointer is safe. break; case Instruction::VAArg: // "va-arg" from a pointer is safe. break; case Instruction::Store: if (V == I->getOperand(0)) // Stored the pointer - conservatively assume it may be unsafe. return false; // Storing to the pointee is safe. break; case Instruction::GetElementPtr: if (!cast<const GetElementPtrInst>(I)->hasAllConstantIndices()) // GEP with non-constant indices can lead to memory errors. // This also applies to inbounds GEPs, as the inbounds attribute // represents an assumption that the address is in bounds, rather than // an assertion that it is. return false; // We assume that GEP on static alloca with constant indices is safe, // otherwise a compiler would detect it and warn during compilation. if (!isa<const ConstantInt>(AI->getArraySize())) // However, if the array size itself is not constant, the access // might still be unsafe at runtime. return false; /* fallthrough */ case Instruction::BitCast: case Instruction::IntToPtr: case Instruction::PHI: case Instruction::PtrToInt: case Instruction::Select: // The object can be safe or not, depending on how the result of the // instruction is used. if (Visited.insert(I).second) WorkList.push_back(cast<const Instruction>(I)); break; case Instruction::Call: case Instruction::Invoke: { // FIXME: add support for memset and memcpy intrinsics. ImmutableCallSite CS(I); // LLVM 'nocapture' attribute is only set for arguments whose address // is not stored, passed around, or used in any other non-trivial way. // We assume that passing a pointer to an object as a 'nocapture' // argument is safe. // FIXME: a more precise solution would require an interprocedural // analysis here, which would look at all uses of an argument inside // the function being called. ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end(); for (ImmutableCallSite::arg_iterator A = B; A != E; ++A) if (A->get() == V && !CS.doesNotCapture(A - B)) // The parameter is not marked 'nocapture' - unsafe. return false; continue; } default: // The object is unsafe if it is used in any other way. return false; } } } // All uses of the alloca are safe, we can place it on the safe stack. return true; } /// The SafeStack pass splits the stack of each function into the /// safe stack, which is only accessed through memory safe dereferences /// (as determined statically), and the unsafe stack, which contains all /// local variables that are accessed in unsafe ways. class SafeStack : public FunctionPass { const DataLayout *DL; Type *StackPtrTy; Type *IntPtrTy; Type *Int32Ty; Type *Int8Ty; Constant *UnsafeStackPtr = nullptr; /// Unsafe stack alignment. Each stack frame must ensure that the stack is /// aligned to this value. We need to re-align the unsafe stack if the /// alignment of any object on the stack exceeds this value. /// /// 16 seems like a reasonable upper bound on the alignment of objects that we /// might expect to appear on the stack on most common targets. enum { StackAlignment = 16 }; /// \brief Build a constant representing a pointer to the unsafe stack /// pointer. Constant *getOrCreateUnsafeStackPtr(Module &M); /// \brief Find all static allocas, dynamic allocas, return instructions and /// stack restore points (exception unwind blocks and setjmp calls) in the /// given function and append them to the respective vectors. void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas, SmallVectorImpl<AllocaInst *> &DynamicAllocas, SmallVectorImpl<ReturnInst *> &Returns, SmallVectorImpl<Instruction *> &StackRestorePoints); /// \brief Allocate space for all static allocas in \p StaticAllocas, /// replace allocas with pointers into the unsafe stack and generate code to /// restore the stack pointer before all return instructions in \p Returns. /// /// \returns A pointer to the top of the unsafe stack after all unsafe static /// allocas are allocated. Value *moveStaticAllocasToUnsafeStack(Function &F, ArrayRef<AllocaInst *> StaticAllocas, ArrayRef<ReturnInst *> Returns); /// \brief Generate code to restore the stack after all stack restore points /// in \p StackRestorePoints. /// /// \returns A local variable in which to maintain the dynamic top of the /// unsafe stack if needed. AllocaInst * createStackRestorePoints(Function &F, ArrayRef<Instruction *> StackRestorePoints, Value *StaticTop, bool NeedDynamicTop); /// \brief Replace all allocas in \p DynamicAllocas with code to allocate /// space dynamically on the unsafe stack and store the dynamic unsafe stack /// top to \p DynamicTop if non-null. void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop, ArrayRef<AllocaInst *> DynamicAllocas); public: static char ID; // Pass identification, replacement for typeid. SafeStack() : FunctionPass(ID), DL(nullptr) { initializeSafeStackPass(*PassRegistry::getPassRegistry()); } virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<AliasAnalysis>(); } virtual bool doInitialization(Module &M) { DL = &M.getDataLayout(); StackPtrTy = Type::getInt8PtrTy(M.getContext()); IntPtrTy = DL->getIntPtrType(M.getContext()); Int32Ty = Type::getInt32Ty(M.getContext()); Int8Ty = Type::getInt8Ty(M.getContext()); return false; } bool runOnFunction(Function &F); }; // class SafeStack Constant *SafeStack::getOrCreateUnsafeStackPtr(Module &M) { // The unsafe stack pointer is stored in a global variable with a magic name. const char *kUnsafeStackPtrVar = "__safestack_unsafe_stack_ptr"; auto UnsafeStackPtr = dyn_cast_or_null<GlobalVariable>(M.getNamedValue(kUnsafeStackPtrVar)); if (!UnsafeStackPtr) { // The global variable is not defined yet, define it ourselves. // We use the initial-exec TLS model because we do not support the variable // living anywhere other than in the main executable. UnsafeStackPtr = new GlobalVariable( /*Module=*/M, /*Type=*/StackPtrTy, /*isConstant=*/false, /*Linkage=*/GlobalValue::ExternalLinkage, /*Initializer=*/0, /*Name=*/kUnsafeStackPtrVar, /*InsertBefore=*/nullptr, /*ThreadLocalMode=*/GlobalValue::InitialExecTLSModel); } else { // The variable exists, check its type and attributes. if (UnsafeStackPtr->getValueType() != StackPtrTy) { report_fatal_error(Twine(kUnsafeStackPtrVar) + " must have void* type"); } if (!UnsafeStackPtr->isThreadLocal()) { report_fatal_error(Twine(kUnsafeStackPtrVar) + " must be thread-local"); } } return UnsafeStackPtr; } void SafeStack::findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas, SmallVectorImpl<AllocaInst *> &DynamicAllocas, SmallVectorImpl<ReturnInst *> &Returns, SmallVectorImpl<Instruction *> &StackRestorePoints) { for (Instruction &I : inst_range(&F)) { if (auto AI = dyn_cast<AllocaInst>(&I)) { ++NumAllocas; if (IsSafeStackAlloca(AI)) continue; if (AI->isStaticAlloca()) { ++NumUnsafeStaticAllocas; StaticAllocas.push_back(AI); } else { ++NumUnsafeDynamicAllocas; DynamicAllocas.push_back(AI); } } else if (auto RI = dyn_cast<ReturnInst>(&I)) { Returns.push_back(RI); } else if (auto CI = dyn_cast<CallInst>(&I)) { // setjmps require stack restore. if (CI->getCalledFunction() && CI->canReturnTwice()) StackRestorePoints.push_back(CI); } else if (auto LP = dyn_cast<LandingPadInst>(&I)) { // Exception landing pads require stack restore. StackRestorePoints.push_back(LP); } else if (auto II = dyn_cast<IntrinsicInst>(&I)) { if (II->getIntrinsicID() == Intrinsic::gcroot) llvm::report_fatal_error( "gcroot intrinsic not compatible with safestack attribute"); } } } AllocaInst * SafeStack::createStackRestorePoints(Function &F, ArrayRef<Instruction *> StackRestorePoints, Value *StaticTop, bool NeedDynamicTop) { if (StackRestorePoints.empty()) return nullptr; IRBuilder<> IRB(StaticTop ? cast<Instruction>(StaticTop)->getNextNode() : (Instruction *)F.getEntryBlock().getFirstInsertionPt()); // We need the current value of the shadow stack pointer to restore // after longjmp or exception catching. // FIXME: On some platforms this could be handled by the longjmp/exception // runtime itself. AllocaInst *DynamicTop = nullptr; if (NeedDynamicTop) // If we also have dynamic alloca's, the stack pointer value changes // throughout the function. For now we store it in an alloca. DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr, "unsafe_stack_dynamic_ptr"); if (!StaticTop) // We need the original unsafe stack pointer value, even if there are // no unsafe static allocas. StaticTop = IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr"); if (NeedDynamicTop) IRB.CreateStore(StaticTop, DynamicTop); // Restore current stack pointer after longjmp/exception catch. for (Instruction *I : StackRestorePoints) { ++NumUnsafeStackRestorePoints; IRB.SetInsertPoint(cast<Instruction>(I->getNextNode())); Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop; IRB.CreateStore(CurrentTop, UnsafeStackPtr); } return DynamicTop; } Value * SafeStack::moveStaticAllocasToUnsafeStack(Function &F, ArrayRef<AllocaInst *> StaticAllocas, ArrayRef<ReturnInst *> Returns) { if (StaticAllocas.empty()) return nullptr; IRBuilder<> IRB(F.getEntryBlock().getFirstInsertionPt()); DIBuilder DIB(*F.getParent()); // We explicitly compute and set the unsafe stack layout for all unsafe // static alloca instructions. We save the unsafe "base pointer" in the // prologue into a local variable and restore it in the epilogue. // Load the current stack pointer (we'll also use it as a base pointer). // FIXME: use a dedicated register for it ? Instruction *BasePointer = IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr"); assert(BasePointer->getType() == StackPtrTy); for (ReturnInst *RI : Returns) { IRB.SetInsertPoint(RI); IRB.CreateStore(BasePointer, UnsafeStackPtr); } // Compute maximum alignment among static objects on the unsafe stack. unsigned MaxAlignment = 0; for (AllocaInst *AI : StaticAllocas) { Type *Ty = AI->getAllocatedType(); unsigned Align = std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment()); if (Align > MaxAlignment) MaxAlignment = Align; } if (MaxAlignment > StackAlignment) { // Re-align the base pointer according to the max requested alignment. assert(isPowerOf2_32(MaxAlignment)); IRB.SetInsertPoint(cast<Instruction>(BasePointer->getNextNode())); BasePointer = cast<Instruction>(IRB.CreateIntToPtr( IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy), ConstantInt::get(IntPtrTy, ~uint64_t(MaxAlignment - 1))), StackPtrTy)); } // Allocate space for every unsafe static AllocaInst on the unsafe stack. int64_t StaticOffset = 0; // Current stack top. for (AllocaInst *AI : StaticAllocas) { IRB.SetInsertPoint(AI); auto CArraySize = cast<ConstantInt>(AI->getArraySize()); Type *Ty = AI->getAllocatedType(); uint64_t Size = DL->getTypeAllocSize(Ty) * CArraySize->getZExtValue(); if (Size == 0) Size = 1; // Don't create zero-sized stack objects. // Ensure the object is properly aligned. unsigned Align = std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment()); // Add alignment. // NOTE: we ensure that BasePointer itself is aligned to >= Align. StaticOffset += Size; StaticOffset = RoundUpToAlignment(StaticOffset, Align); Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8* ConstantInt::get(Int32Ty, -StaticOffset)); Value *NewAI = IRB.CreateBitCast(Off, AI->getType(), AI->getName()); if (AI->hasName() && isa<Instruction>(NewAI)) cast<Instruction>(NewAI)->takeName(AI); // Replace alloc with the new location. replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true); AI->replaceAllUsesWith(NewAI); AI->eraseFromParent(); } // Re-align BasePointer so that our callees would see it aligned as // expected. // FIXME: no need to update BasePointer in leaf functions. StaticOffset = RoundUpToAlignment(StaticOffset, StackAlignment); // Update shadow stack pointer in the function epilogue. IRB.SetInsertPoint(cast<Instruction>(BasePointer->getNextNode())); Value *StaticTop = IRB.CreateGEP(BasePointer, ConstantInt::get(Int32Ty, -StaticOffset), "unsafe_stack_static_top"); IRB.CreateStore(StaticTop, UnsafeStackPtr); return StaticTop; } void SafeStack::moveDynamicAllocasToUnsafeStack( Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop, ArrayRef<AllocaInst *> DynamicAllocas) { DIBuilder DIB(*F.getParent()); for (AllocaInst *AI : DynamicAllocas) { IRBuilder<> IRB(AI); // Compute the new SP value (after AI). Value *ArraySize = AI->getArraySize(); if (ArraySize->getType() != IntPtrTy) ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false); Type *Ty = AI->getAllocatedType(); uint64_t TySize = DL->getTypeAllocSize(Ty); Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize)); Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy); SP = IRB.CreateSub(SP, Size); // Align the SP value to satisfy the AllocaInst, type and stack alignments. unsigned Align = std::max( std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment()), (unsigned)StackAlignment); assert(isPowerOf2_32(Align)); Value *NewTop = IRB.CreateIntToPtr( IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))), StackPtrTy); // Save the stack pointer. IRB.CreateStore(NewTop, UnsafeStackPtr); if (DynamicTop) IRB.CreateStore(NewTop, DynamicTop); Value *NewAI = IRB.CreateIntToPtr(SP, AI->getType()); if (AI->hasName() && isa<Instruction>(NewAI)) NewAI->takeName(AI); replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true); AI->replaceAllUsesWith(NewAI); AI->eraseFromParent(); } if (!DynamicAllocas.empty()) { // Now go through the instructions again, replacing stacksave/stackrestore. for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) { Instruction *I = &*(It++); auto II = dyn_cast<IntrinsicInst>(I); if (!II) continue; if (II->getIntrinsicID() == Intrinsic::stacksave) { IRBuilder<> IRB(II); Instruction *LI = IRB.CreateLoad(UnsafeStackPtr); LI->takeName(II); II->replaceAllUsesWith(LI); II->eraseFromParent(); } else if (II->getIntrinsicID() == Intrinsic::stackrestore) { IRBuilder<> IRB(II); Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr); SI->takeName(II); assert(II->use_empty()); II->eraseFromParent(); } } } } bool SafeStack::runOnFunction(Function &F) { auto AA = &getAnalysis<AliasAnalysis>(); DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n"); if (!F.hasFnAttribute(Attribute::SafeStack)) { DEBUG(dbgs() << "[SafeStack] safestack is not requested" " for this function\n"); return false; } if (F.isDeclaration()) { DEBUG(dbgs() << "[SafeStack] function definition" " is not available\n"); return false; } { // Make sure the regular stack protector won't run on this function // (safestack attribute takes precedence). AttrBuilder B; B.addAttribute(Attribute::StackProtect) .addAttribute(Attribute::StackProtectReq) .addAttribute(Attribute::StackProtectStrong); F.removeAttributes( AttributeSet::FunctionIndex, AttributeSet::get(F.getContext(), AttributeSet::FunctionIndex, B)); } if (AA->onlyReadsMemory(&F)) { // XXX: we don't protect against information leak attacks for now. DEBUG(dbgs() << "[SafeStack] function only reads memory\n"); return false; } ++NumFunctions; SmallVector<AllocaInst *, 16> StaticAllocas; SmallVector<AllocaInst *, 4> DynamicAllocas; SmallVector<ReturnInst *, 4> Returns; // Collect all points where stack gets unwound and needs to be restored // This is only necessary because the runtime (setjmp and unwind code) is // not aware of the unsafe stack and won't unwind/restore it prorerly. // To work around this problem without changing the runtime, we insert // instrumentation to restore the unsafe stack pointer when necessary. SmallVector<Instruction *, 4> StackRestorePoints; // Find all static and dynamic alloca instructions that must be moved to the // unsafe stack, all return instructions and stack restore points. findInsts(F, StaticAllocas, DynamicAllocas, Returns, StackRestorePoints); if (StaticAllocas.empty() && DynamicAllocas.empty() && StackRestorePoints.empty()) return false; // Nothing to do in this function. if (!StaticAllocas.empty() || !DynamicAllocas.empty()) ++NumUnsafeStackFunctions; // This function has the unsafe stack. if (!StackRestorePoints.empty()) ++NumUnsafeStackRestorePointsFunctions; if (!UnsafeStackPtr) UnsafeStackPtr = getOrCreateUnsafeStackPtr(*F.getParent()); // The top of the unsafe stack after all unsafe static allocas are allocated. Value *StaticTop = moveStaticAllocasToUnsafeStack(F, StaticAllocas, Returns); // Safe stack object that stores the current unsafe stack top. It is updated // as unsafe dynamic (non-constant-sized) allocas are allocated and freed. // This is only needed if we need to restore stack pointer after longjmp // or exceptions, and we have dynamic allocations. // FIXME: a better alternative might be to store the unsafe stack pointer // before setjmp / invoke instructions. AllocaInst *DynamicTop = createStackRestorePoints( F, StackRestorePoints, StaticTop, !DynamicAllocas.empty()); // Handle dynamic allocas. moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop, DynamicAllocas); DEBUG(dbgs() << "[SafeStack] safestack applied\n"); return true; } } // end anonymous namespace char SafeStack::ID = 0; INITIALIZE_PASS_BEGIN(SafeStack, "safe-stack", "Safe Stack instrumentation pass", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(SafeStack, "safe-stack", "Safe Stack instrumentation pass", false, false) FunctionPass *llvm::createSafeStackPass() { return new SafeStack(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Instrumentation/memorysanitizer.cpp
//===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This file is a part of MemorySanitizer, a detector of uninitialized /// reads. /// /// The algorithm of the tool is similar to Memcheck /// (http://goo.gl/QKbem). We associate a few shadow bits with every /// byte of the application memory, poison the shadow of the malloc-ed /// or alloca-ed memory, load the shadow bits on every memory read, /// propagate the shadow bits through some of the arithmetic /// instruction (including MOV), store the shadow bits on every memory /// write, report a bug on some other instructions (e.g. JMP) if the /// associated shadow is poisoned. /// /// But there are differences too. The first and the major one: /// compiler instrumentation instead of binary instrumentation. This /// gives us much better register allocation, possible compiler /// optimizations and a fast start-up. But this brings the major issue /// as well: msan needs to see all program events, including system /// calls and reads/writes in system libraries, so we either need to /// compile *everything* with msan or use a binary translation /// component (e.g. DynamoRIO) to instrument pre-built libraries. /// Another difference from Memcheck is that we use 8 shadow bits per /// byte of application memory and use a direct shadow mapping. This /// greatly simplifies the instrumentation code and avoids races on /// shadow updates (Memcheck is single-threaded so races are not a /// concern there. Memcheck uses 2 shadow bits per byte with a slow /// path storage that uses 8 bits per byte). /// /// The default value of shadow is 0, which means "clean" (not poisoned). /// /// Every module initializer should call __msan_init to ensure that the /// shadow memory is ready. On error, __msan_warning is called. Since /// parameters and return values may be passed via registers, we have a /// specialized thread-local shadow for return values /// (__msan_retval_tls) and parameters (__msan_param_tls). /// /// Origin tracking. /// /// MemorySanitizer can track origins (allocation points) of all uninitialized /// values. This behavior is controlled with a flag (msan-track-origins) and is /// disabled by default. /// /// Origins are 4-byte values created and interpreted by the runtime library. /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes /// of application memory. Propagation of origins is basically a bunch of /// "select" instructions that pick the origin of a dirty argument, if an /// instruction has one. /// /// Every 4 aligned, consecutive bytes of application memory have one origin /// value associated with them. If these bytes contain uninitialized data /// coming from 2 different allocations, the last store wins. Because of this, /// MemorySanitizer reports can show unrelated origins, but this is unlikely in /// practice. /// /// Origins are meaningless for fully initialized values, so MemorySanitizer /// avoids storing origin to memory when a fully initialized value is stored. /// This way it avoids needless overwritting origin of the 4-byte region on /// a short (i.e. 1 byte) clean store, and it is also good for performance. /// /// Atomic handling. /// /// Ideally, every atomic store of application value should update the /// corresponding shadow location in an atomic way. Unfortunately, atomic store /// of two disjoint locations can not be done without severe slowdown. /// /// Therefore, we implement an approximation that may err on the safe side. /// In this implementation, every atomically accessed location in the program /// may only change from (partially) uninitialized to fully initialized, but /// not the other way around. We load the shadow _after_ the application load, /// and we store the shadow _before_ the app store. Also, we always store clean /// shadow (if the application store is atomic). This way, if the store-load /// pair constitutes a happens-before arc, shadow store and load are correctly /// ordered such that the load will get either the value that was stored, or /// some later value (which is always clean). /// /// This does not work very well with Compare-And-Swap (CAS) and /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW /// must store the new shadow before the app operation, and load the shadow /// after the app operation. Computers don't work this way. Current /// implementation ignores the load aspect of CAS/RMW, always returning a clean /// value. It implements the store part as a simple atomic store by storing a /// clean shadow. #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/ValueMap.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" using namespace llvm; #define DEBUG_TYPE "msan" static const unsigned kOriginSize = 4; static const unsigned kMinOriginAlignment = 4; static const unsigned kShadowTLSAlignment = 8; // These constants must be kept in sync with the ones in msan.h. static const unsigned kParamTLSSize = 800; static const unsigned kRetvalTLSSize = 800; // Accesses sizes are powers of two: 1, 2, 4, 8. static const size_t kNumberOfAccessSizes = 4; /// \brief Track origins of uninitialized values. /// /// Adds a section to MemorySanitizer report that points to the allocation /// (stack or heap) the uninitialized bits came from originally. static cl::opt<int> ClTrackOrigins( "msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0)); static cl::opt<bool> ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false)); static cl::opt<bool> ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClPoisonStackWithCall( "msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false)); static cl::opt<int> ClPoisonStackPattern( "msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given patter"), cl::Hidden, cl::init(0xff)); static cl::opt<bool> ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(false)); // This flag controls whether we check the shadow of the address // operand of load or store. Such bugs are very rare, since load from // a garbage address typically results in SEGV, but still happen // (e.g. only lower bits of address are garbage, or the access happens // early at program startup where malloc-ed memory is more likely to // be zeroed. As of 2012-08-28 this flag adds 20% slowdown. static cl::opt<bool> ClCheckAccessAddress( "msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true)); static cl::opt<bool> ClDumpStrictInstructions( "msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false)); static cl::opt<int> ClInstrumentationWithCallThreshold( "msan-instrumentation-with-call-threshold", cl::desc( "If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500)); // This is an experiment to enable handling of cases where shadow is a non-zero // compile-time constant. For some unexplainable reason they were silently // ignored in the instrumentation. static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(false)); static const char *const kMsanModuleCtorName = "msan.module_ctor"; static const char *const kMsanInitName = "__msan_init"; namespace { // Memory map parameters used in application-to-shadow address calculation. // Offset = (Addr & ~AndMask) ^ XorMask // Shadow = ShadowBase + Offset // Origin = OriginBase + Offset struct MemoryMapParams { uint64_t AndMask; uint64_t XorMask; uint64_t ShadowBase; uint64_t OriginBase; }; struct PlatformMemoryMapParams { const MemoryMapParams *bits32; const MemoryMapParams *bits64; }; // i386 Linux static const MemoryMapParams Linux_I386_MemoryMapParams = { 0x000080000000, // AndMask 0, // XorMask (not used) 0, // ShadowBase (not used) 0x000040000000, // OriginBase }; // x86_64 Linux static const MemoryMapParams Linux_X86_64_MemoryMapParams = { 0x400000000000, // AndMask 0, // XorMask (not used) 0, // ShadowBase (not used) 0x200000000000, // OriginBase }; // mips64 Linux static const MemoryMapParams Linux_MIPS64_MemoryMapParams = { 0x004000000000, // AndMask 0, // XorMask (not used) 0, // ShadowBase (not used) 0x002000000000, // OriginBase }; // ppc64 Linux static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = { 0x200000000000, // AndMask 0x100000000000, // XorMask 0x080000000000, // ShadowBase 0x1C0000000000, // OriginBase }; // i386 FreeBSD static const MemoryMapParams FreeBSD_I386_MemoryMapParams = { 0x000180000000, // AndMask 0x000040000000, // XorMask 0x000020000000, // ShadowBase 0x000700000000, // OriginBase }; // x86_64 FreeBSD static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = { 0xc00000000000, // AndMask 0x200000000000, // XorMask 0x100000000000, // ShadowBase 0x380000000000, // OriginBase }; static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = { &Linux_I386_MemoryMapParams, &Linux_X86_64_MemoryMapParams, }; static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = { NULL, &Linux_MIPS64_MemoryMapParams, }; static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = { NULL, &Linux_PowerPC64_MemoryMapParams, }; static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = { &FreeBSD_I386_MemoryMapParams, &FreeBSD_X86_64_MemoryMapParams, }; /// \brief An instrumentation pass implementing detection of uninitialized /// reads. /// /// MemorySanitizer: instrument the code in module to find /// uninitialized reads. class MemorySanitizer : public FunctionPass { public: MemorySanitizer(int TrackOrigins = 0) : FunctionPass(ID), TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)), WarningFn(nullptr) {} StringRef getPassName() const override { return "MemorySanitizer"; } bool runOnFunction(Function &F) override; bool doInitialization(Module &M) override; static char ID; // Pass identification, replacement for typeid. private: void initializeCallbacks(Module &M); /// \brief Track origins (allocation points) of uninitialized values. int TrackOrigins; LLVMContext *C; Type *IntptrTy; Type *OriginTy; /// \brief Thread-local shadow storage for function parameters. GlobalVariable *ParamTLS; /// \brief Thread-local origin storage for function parameters. GlobalVariable *ParamOriginTLS; /// \brief Thread-local shadow storage for function return value. GlobalVariable *RetvalTLS; /// \brief Thread-local origin storage for function return value. GlobalVariable *RetvalOriginTLS; /// \brief Thread-local shadow storage for in-register va_arg function /// parameters (x86_64-specific). GlobalVariable *VAArgTLS; /// \brief Thread-local shadow storage for va_arg overflow area /// (x86_64-specific). GlobalVariable *VAArgOverflowSizeTLS; /// \brief Thread-local space used to pass origin value to the UMR reporting /// function. GlobalVariable *OriginTLS; /// \brief The run-time callback to print a warning. Value *WarningFn; // These arrays are indexed by log2(AccessSize). Value *MaybeWarningFn[kNumberOfAccessSizes]; Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; /// \brief Run-time helper that generates a new origin value for a stack /// allocation. Value *MsanSetAllocaOrigin4Fn; /// \brief Run-time helper that poisons stack on function entry. Value *MsanPoisonStackFn; /// \brief Run-time helper that records a store (or any event) of an /// uninitialized value and returns an updated origin id encoding this info. Value *MsanChainOriginFn; /// \brief MSan runtime replacements for memmove, memcpy and memset. Value *MemmoveFn, *MemcpyFn, *MemsetFn; /// \brief Memory map parameters used in application-to-shadow calculation. const MemoryMapParams *MapParams; MDNode *ColdCallWeights; /// \brief Branch weights for origin store. MDNode *OriginStoreWeights; /// \brief An empty volatile inline asm that prevents callback merge. InlineAsm *EmptyAsm; Function *MsanCtorFunction; friend struct MemorySanitizerVisitor; friend struct VarArgAMD64Helper; friend struct VarArgMIPS64Helper; }; } // namespace char MemorySanitizer::ID = 0; INITIALIZE_PASS(MemorySanitizer, "msan", "MemorySanitizer: detects uninitialized reads.", false, false) FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) { return new MemorySanitizer(TrackOrigins); } /// \brief Create a non-const global initialized with the given string. /// /// Creates a writable global for Str so that we can pass it to the /// run-time lib. Runtime uses first 4 bytes of the string to store the /// frame ID, so the string needs to be mutable. static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, StringRef Str) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false, GlobalValue::PrivateLinkage, StrConst, ""); } /// \brief Insert extern declaration of runtime-provided functions and globals. void MemorySanitizer::initializeCallbacks(Module &M) { // Only do this once. if (WarningFn) return; IRBuilder<> IRB(*C); // Create the callback. // FIXME: this function should have "Cold" calling conv, // which is not yet implemented. StringRef WarningFnName = ClKeepGoing ? "__msan_warning" : "__msan_warning_noreturn"; WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr); for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; AccessSizeIndex++) { unsigned AccessSize = 1 << AccessSizeIndex; std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize); MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction( FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty(), nullptr); FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize); MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction( FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr); } MsanSetAllocaOrigin4Fn = M.getOrInsertFunction( "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy(), IntptrTy, nullptr); MsanPoisonStackFn = M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr); MsanChainOriginFn = M.getOrInsertFunction( "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr); MemmoveFn = M.getOrInsertFunction("__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr); MemcpyFn = M.getOrInsertFunction("__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr); MemsetFn = M.getOrInsertFunction("__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr); // Create globals. RetvalTLS = new GlobalVariable( M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false, GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, GlobalVariable::InitialExecTLSModel); RetvalOriginTLS = new GlobalVariable( M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); ParamTLS = new GlobalVariable( M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, GlobalVariable::InitialExecTLSModel); ParamOriginTLS = new GlobalVariable( M, ArrayType::get(OriginTy, kParamTLSSize / 4), false, GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); VAArgTLS = new GlobalVariable( M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, GlobalVariable::InitialExecTLSModel); VAArgOverflowSizeTLS = new GlobalVariable( M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_overflow_size_tls", nullptr, GlobalVariable::InitialExecTLSModel); OriginTLS = new GlobalVariable( M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); // We insert an empty inline asm after __msan_report* to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), StringRef(""), StringRef(""), /*hasSideEffects=*/true); } /// \brief Module-level initialization. /// /// inserts a call to __msan_init to the module's constructor list. bool MemorySanitizer::doInitialization(Module &M) { auto &DL = M.getDataLayout(); Triple TargetTriple(M.getTargetTriple()); switch (TargetTriple.getOS()) { case Triple::FreeBSD: switch (TargetTriple.getArch()) { case Triple::x86_64: MapParams = FreeBSD_X86_MemoryMapParams.bits64; break; case Triple::x86: MapParams = FreeBSD_X86_MemoryMapParams.bits32; break; default: report_fatal_error("unsupported architecture"); } break; case Triple::Linux: switch (TargetTriple.getArch()) { case Triple::x86_64: MapParams = Linux_X86_MemoryMapParams.bits64; break; case Triple::x86: MapParams = Linux_X86_MemoryMapParams.bits32; break; case Triple::mips64: case Triple::mips64el: MapParams = Linux_MIPS_MemoryMapParams.bits64; break; case Triple::ppc64: case Triple::ppc64le: MapParams = Linux_PowerPC_MemoryMapParams.bits64; break; default: report_fatal_error("unsupported architecture"); } break; default: report_fatal_error("unsupported operating system"); } C = &(M.getContext()); IRBuilder<> IRB(*C); IntptrTy = IRB.getIntPtrTy(DL); OriginTy = IRB.getInt32Ty(); ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); std::tie(MsanCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName, /*InitArgTypes=*/{}, /*InitArgs=*/{}); appendToGlobalCtors(M, MsanCtorFunction, 0); if (TrackOrigins) new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, IRB.getInt32(TrackOrigins), "__msan_track_origins"); if (ClKeepGoing) new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, IRB.getInt32(ClKeepGoing), "__msan_keep_going"); return true; } namespace { /// \brief A helper class that handles instrumentation of VarArg /// functions on a particular platform. /// /// Implementations are expected to insert the instrumentation /// necessary to propagate argument shadow through VarArg function /// calls. Visit* methods are called during an InstVisitor pass over /// the function, and should avoid creating new basic blocks. A new /// instance of this class is created for each instrumented function. struct VarArgHelper { /// \brief Visit a CallSite. virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; /// \brief Visit a va_start call. virtual void visitVAStartInst(VAStartInst &I) = 0; /// \brief Visit a va_copy call. virtual void visitVACopyInst(VACopyInst &I) = 0; /// \brief Finalize function instrumentation. /// /// This method is called after visiting all interesting (see above) /// instructions in a function. virtual void finalizeInstrumentation() = 0; virtual ~VarArgHelper() {} }; struct MemorySanitizerVisitor; VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor); unsigned TypeSizeToSizeIndex(unsigned TypeSize) { if (TypeSize <= 8) return 0; return Log2_32_Ceil(TypeSize / 8); } /// This class does all the work for a given function. Store and Load /// instructions store and load corresponding shadow and origin /// values. Most instructions propagate shadow from arguments to their /// return values. Certain instructions (most importantly, BranchInst) /// test their argument shadow and print reports (with a runtime call) if it's /// non-zero. struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Function &F; MemorySanitizer &MS; SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes; ValueMap<Value *, Value *> ShadowMap, OriginMap; std::unique_ptr<VarArgHelper> VAHelper; // The following flags disable parts of MSan instrumentation based on // blacklist contents and command-line options. bool InsertChecks; bool PropagateShadow; bool PoisonStack; bool PoisonUndef; bool CheckReturnValue; struct ShadowOriginAndInsertPoint { Value *Shadow; Value *Origin; Instruction *OrigIns; ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I) : Shadow(S), Origin(O), OrigIns(I) {} }; SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList; SmallVector<Instruction *, 16> StoreList; MemorySanitizerVisitor(Function &F, MemorySanitizer &MS) : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) { bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory); InsertChecks = SanitizeFunction; PropagateShadow = SanitizeFunction; PoisonStack = SanitizeFunction && ClPoisonStack; PoisonUndef = SanitizeFunction && ClPoisonUndef; // FIXME: Consider using SpecialCaseList to specify a list of functions that // must always return fully initialized values. For now, we hardcode "main". CheckReturnValue = SanitizeFunction && (F.getName() == "main"); DEBUG(if (!InsertChecks) dbgs() << "MemorySanitizer is not inserting checks into '" << F.getName() << "'\n"); } Value *updateOrigin(Value *V, IRBuilder<> &IRB) { if (MS.TrackOrigins <= 1) return V; return IRB.CreateCall(MS.MsanChainOriginFn, V); } Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) { const DataLayout &DL = F.getParent()->getDataLayout(); unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); if (IntptrSize == kOriginSize) return Origin; assert(IntptrSize == kOriginSize * 2); Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false); return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8)); } /// \brief Fill memory range with the given origin value. void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, unsigned Size, unsigned Alignment) { const DataLayout &DL = F.getParent()->getDataLayout(); unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy); unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); assert(IntptrAlignment >= kMinOriginAlignment); assert(IntptrSize >= kOriginSize); unsigned Ofs = 0; unsigned CurrentAlignment = Alignment; if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) { Value *IntptrOrigin = originToIntptr(IRB, Origin); Value *IntptrOriginPtr = IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0)); for (unsigned i = 0; i < Size / IntptrSize; ++i) { Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i) : IntptrOriginPtr; IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment); Ofs += IntptrSize / kOriginSize; CurrentAlignment = IntptrAlignment; } } for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) { Value *GEP = i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr; IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment); CurrentAlignment = kMinOriginAlignment; } } void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin, unsigned Alignment, bool AsCall) { const DataLayout &DL = F.getParent()->getDataLayout(); unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType()); if (isa<StructType>(Shadow->getType())) { paintOrigin(IRB, updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB, Alignment), StoreSize, OriginAlignment); } else { Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); if (ConstantShadow) { if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) paintOrigin(IRB, updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB, Alignment), StoreSize, OriginAlignment); return; } unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); if (AsCall && SizeIndex < kNumberOfAccessSizes) { Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt( ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); IRB.CreateCall(Fn, {ConvertedShadow2, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), Origin}); } else { Value *Cmp = IRB.CreateICmpNE( ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); Instruction *CheckTerm = SplitBlockAndInsertIfThen( Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights); IRBuilder<> IRBNew(CheckTerm); paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), getOriginPtr(Addr, IRBNew, Alignment), StoreSize, OriginAlignment); } } } void materializeStores(bool InstrumentWithCalls) { for (auto Inst : StoreList) { StoreInst &SI = *dyn_cast<StoreInst>(Inst); IRBuilder<> IRB(&SI); Value *Val = SI.getValueOperand(); Value *Addr = SI.getPointerOperand(); Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val); Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment()); DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); (void)NewSI; if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI); if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering())); if (MS.TrackOrigins && !SI.isAtomic()) storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(), InstrumentWithCalls); } } void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, bool AsCall) { IRBuilder<> IRB(OrigIns); DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); if (ConstantShadow) { if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) { if (MS.TrackOrigins) { IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), MS.OriginTLS); } IRB.CreateCall(MS.WarningFn, {}); IRB.CreateCall(MS.EmptyAsm, {}); // FIXME: Insert UnreachableInst if !ClKeepGoing? // This may invalidate some of the following checks and needs to be done // at the very end. } return; } const DataLayout &DL = OrigIns->getModule()->getDataLayout(); unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType()); unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits); if (AsCall && SizeIndex < kNumberOfAccessSizes) { Value *Fn = MS.MaybeWarningFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin ? Origin : (Value *)IRB.getInt32(0)}); } else { Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); Instruction *CheckTerm = SplitBlockAndInsertIfThen( Cmp, OrigIns, /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights); IRB.SetInsertPoint(CheckTerm); if (MS.TrackOrigins) { IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), MS.OriginTLS); } IRB.CreateCall(MS.WarningFn, {}); IRB.CreateCall(MS.EmptyAsm, {}); DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); } } void materializeChecks(bool InstrumentWithCalls) { for (const auto &ShadowData : InstrumentationList) { Instruction *OrigIns = ShadowData.OrigIns; Value *Shadow = ShadowData.Shadow; Value *Origin = ShadowData.Origin; materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); } DEBUG(dbgs() << "DONE:\n" << F); } /// \brief Add MemorySanitizer instrumentation to a function. bool runOnFunction() { MS.initializeCallbacks(*F.getParent()); // In the presence of unreachable blocks, we may see Phi nodes with // incoming nodes from such blocks. Since InstVisitor skips unreachable // blocks, such nodes will not have any shadow value associated with them. // It's easier to remove unreachable blocks than deal with missing shadow. removeUnreachableBlocks(F); // Iterate all BBs in depth-first order and create shadow instructions // for all instructions (where applicable). // For PHI nodes we create dummy shadow PHIs which will be finalized later. for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); // Finalize PHI nodes. for (PHINode *PN : ShadowPHINodes) { PHINode *PNS = cast<PHINode>(getShadow(PN)); PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr; size_t NumValues = PN->getNumIncomingValues(); for (size_t v = 0; v < NumValues; v++) { PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v)); if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v)); } } VAHelper->finalizeInstrumentation(); bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 && InstrumentationList.size() + StoreList.size() > (unsigned)ClInstrumentationWithCallThreshold; // Delayed instrumentation of StoreInst. // This may add new checks to be inserted later. materializeStores(InstrumentWithCalls); // Insert shadow value checks. materializeChecks(InstrumentWithCalls); return true; } /// \brief Compute the shadow type that corresponds to a given Value. Type *getShadowTy(Value *V) { return getShadowTy(V->getType()); } /// \brief Compute the shadow type that corresponds to a given Type. Type *getShadowTy(Type *OrigTy) { if (!OrigTy->isSized()) { return nullptr; } // For integer type, shadow is the same as the original type. // This may return weird-sized types like i1. if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy)) return IT; const DataLayout &DL = F.getParent()->getDataLayout(); if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) { uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType()); return VectorType::get(IntegerType::get(*MS.C, EltSize), VT->getNumElements()); } if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) { return ArrayType::get(getShadowTy(AT->getElementType()), AT->getNumElements()); } if (StructType *ST = dyn_cast<StructType>(OrigTy)) { SmallVector<Type *, 4> Elements; for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) Elements.push_back(getShadowTy(ST->getElementType(i))); StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); return Res; } uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy); return IntegerType::get(*MS.C, TypeSize); } /// \brief Flatten a vector type. Type *getShadowTyNoVec(Type *ty) { if (VectorType *vt = dyn_cast<VectorType>(ty)) return IntegerType::get(*MS.C, vt->getBitWidth()); return ty; } /// \brief Convert a shadow value to it's flattened variant. Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { Type *Ty = V->getType(); Type *NoVecTy = getShadowTyNoVec(Ty); if (Ty == NoVecTy) return V; return IRB.CreateBitCast(V, NoVecTy); } /// \brief Compute the integer shadow offset that corresponds to a given /// application address. /// /// Offset = (Addr & ~AndMask) ^ XorMask Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) { uint64_t AndMask = MS.MapParams->AndMask; assert(AndMask != 0 && "AndMask shall be specified"); Value *OffsetLong = IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), ConstantInt::get(MS.IntptrTy, ~AndMask)); uint64_t XorMask = MS.MapParams->XorMask; if (XorMask != 0) OffsetLong = IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask)); return OffsetLong; } /// \brief Compute the shadow address that corresponds to a given application /// address. /// /// Shadow = ShadowBase + Offset Value *getShadowPtr(Value *Addr, Type *ShadowTy, IRBuilder<> &IRB) { Value *ShadowLong = getShadowPtrOffset(Addr, IRB); uint64_t ShadowBase = MS.MapParams->ShadowBase; if (ShadowBase != 0) ShadowLong = IRB.CreateAdd(ShadowLong, ConstantInt::get(MS.IntptrTy, ShadowBase)); return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0)); } /// \brief Compute the origin address that corresponds to a given application /// address. /// /// OriginAddr = (OriginBase + Offset) & ~3ULL Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) { Value *OriginLong = getShadowPtrOffset(Addr, IRB); uint64_t OriginBase = MS.MapParams->OriginBase; if (OriginBase != 0) OriginLong = IRB.CreateAdd(OriginLong, ConstantInt::get(MS.IntptrTy, OriginBase)); if (Alignment < kMinOriginAlignment) { uint64_t Mask = kMinOriginAlignment - 1; OriginLong = IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask)); } return IRB.CreateIntToPtr(OriginLong, PointerType::get(IRB.getInt32Ty(), 0)); } /// \brief Compute the shadow address for a given function argument. /// /// Shadow = ParamTLS+ArgOffset. Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), "_msarg"); } /// \brief Compute the origin address for a given function argument. Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) { if (!MS.TrackOrigins) return nullptr; Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), "_msarg_o"); } /// \brief Compute the shadow address for a retval. Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy); return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), "_msret"); } /// \brief Compute the origin address for a retval. Value *getOriginPtrForRetval(IRBuilder<> &IRB) { // We keep a single origin for the entire retval. Might be too optimistic. return MS.RetvalOriginTLS; } /// \brief Set SV to be the shadow value for V. void setShadow(Value *V, Value *SV) { assert(!ShadowMap.count(V) && "Values may only have one shadow"); ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V); } /// \brief Set Origin to be the origin value for V. void setOrigin(Value *V, Value *Origin) { if (!MS.TrackOrigins) return; assert(!OriginMap.count(V) && "Values may only have one origin"); DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); OriginMap[V] = Origin; } /// \brief Create a clean shadow value for a given value. /// /// Clean shadow (all zeroes) means all bits of the value are defined /// (initialized). Constant *getCleanShadow(Value *V) { Type *ShadowTy = getShadowTy(V); if (!ShadowTy) return nullptr; return Constant::getNullValue(ShadowTy); } /// \brief Create a dirty shadow of a given shadow type. Constant *getPoisonedShadow(Type *ShadowTy) { assert(ShadowTy); if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) return Constant::getAllOnesValue(ShadowTy); if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) { SmallVector<Constant *, 4> Vals(AT->getNumElements(), getPoisonedShadow(AT->getElementType())); return ConstantArray::get(AT, Vals); } if (StructType *ST = dyn_cast<StructType>(ShadowTy)) { SmallVector<Constant *, 4> Vals; for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) Vals.push_back(getPoisonedShadow(ST->getElementType(i))); return ConstantStruct::get(ST, Vals); } llvm_unreachable("Unexpected shadow type"); } /// \brief Create a dirty shadow for a given value. Constant *getPoisonedShadow(Value *V) { Type *ShadowTy = getShadowTy(V); if (!ShadowTy) return nullptr; return getPoisonedShadow(ShadowTy); } /// \brief Create a clean (zero) origin. Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); } /// \brief Get the shadow value for a given Value. /// /// This function either returns the value set earlier with setShadow, /// or extracts if from ParamTLS (for function arguments). Value *getShadow(Value *V) { if (!PropagateShadow) return getCleanShadow(V); if (Instruction *I = dyn_cast<Instruction>(V)) { // For instructions the shadow is already stored in the map. Value *Shadow = ShadowMap[V]; if (!Shadow) { DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); (void)I; assert(Shadow && "No shadow for a value"); } return Shadow; } if (UndefValue *U = dyn_cast<UndefValue>(V)) { Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); (void)U; return AllOnes; } if (Argument *A = dyn_cast<Argument>(V)) { // For arguments we compute the shadow on demand and store it in the map. Value **ShadowPtr = &ShadowMap[V]; if (*ShadowPtr) return *ShadowPtr; Function *F = A->getParent(); IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); unsigned ArgOffset = 0; const DataLayout &DL = F->getParent()->getDataLayout(); for (auto &FArg : F->args()) { if (!FArg.getType()->isSized()) { DEBUG(dbgs() << "Arg is not sized\n"); continue; } unsigned Size = FArg.hasByValAttr() ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType()) : DL.getTypeAllocSize(FArg.getType()); if (A == &FArg) { bool Overflow = ArgOffset + Size > kParamTLSSize; Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset); if (FArg.hasByValAttr()) { // ByVal pointer itself has clean shadow. We copy the actual // argument shadow to the underlying memory. // Figure out maximal valid memcpy alignment. unsigned ArgAlign = FArg.getParamAlignment(); if (ArgAlign == 0) { Type *EltType = A->getType()->getPointerElementType(); ArgAlign = DL.getABITypeAlignment(EltType); } if (Overflow) { // ParamTLS overflow. EntryIRB.CreateMemSet( getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign); } else { unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); Value *Cpy = EntryIRB.CreateMemCpy( getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size, CopyAlign); DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); (void)Cpy; } *ShadowPtr = getCleanShadow(V); } else { if (Overflow) { // ParamTLS overflow. *ShadowPtr = getCleanShadow(V); } else { *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); } } DEBUG(dbgs() << " ARG: " << FArg << " ==> " << **ShadowPtr << "\n"); if (MS.TrackOrigins && !Overflow) { Value *OriginPtr = getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset); setOrigin(A, EntryIRB.CreateLoad(OriginPtr)); } else { setOrigin(A, getCleanOrigin()); } } ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment); } assert(*ShadowPtr && "Could not find shadow for an argument"); return *ShadowPtr; } // For everything else the shadow is zero. return getCleanShadow(V); } /// \brief Get the shadow for i-th argument of the instruction I. Value *getShadow(Instruction *I, int i) { return getShadow(I->getOperand(i)); } /// \brief Get the origin for a value. Value *getOrigin(Value *V) { if (!MS.TrackOrigins) return nullptr; if (!PropagateShadow) return getCleanOrigin(); if (isa<Constant>(V)) return getCleanOrigin(); assert((isa<Instruction>(V) || isa<Argument>(V)) && "Unexpected value type in getOrigin()"); Value *Origin = OriginMap[V]; assert(Origin && "Missing origin"); return Origin; } /// \brief Get the origin for i-th argument of the instruction I. Value *getOrigin(Instruction *I, int i) { return getOrigin(I->getOperand(i)); } /// \brief Remember the place where a shadow check should be inserted. /// /// This location will be later instrumented with a check that will print a /// UMR warning in runtime if the shadow value is not 0. void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) { assert(Shadow); if (!InsertChecks) return; #ifndef NDEBUG Type *ShadowTy = Shadow->getType(); assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) && "Can only insert checks for integer and vector shadow types"); #endif InstrumentationList.push_back( ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); } /// \brief Remember the place where a shadow check should be inserted. /// /// This location will be later instrumented with a check that will print a /// UMR warning in runtime if the value is not fully defined. void insertShadowCheck(Value *Val, Instruction *OrigIns) { assert(Val); Value *Shadow, *Origin; if (ClCheckConstantShadow) { Shadow = getShadow(Val); if (!Shadow) return; Origin = getOrigin(Val); } else { Shadow = dyn_cast_or_null<Instruction>(getShadow(Val)); if (!Shadow) return; Origin = dyn_cast_or_null<Instruction>(getOrigin(Val)); } insertShadowCheck(Shadow, Origin, OrigIns); } AtomicOrdering addReleaseOrdering(AtomicOrdering a) { switch (a) { case NotAtomic: return NotAtomic; case Unordered: case Monotonic: case Release: return Release; case Acquire: case AcquireRelease: return AcquireRelease; case SequentiallyConsistent: return SequentiallyConsistent; } llvm_unreachable("Unknown ordering"); } AtomicOrdering addAcquireOrdering(AtomicOrdering a) { switch (a) { case NotAtomic: return NotAtomic; case Unordered: case Monotonic: case Acquire: return Acquire; case Release: case AcquireRelease: return AcquireRelease; case SequentiallyConsistent: return SequentiallyConsistent; } llvm_unreachable("Unknown ordering"); } // ------------------- Visitors. /// \brief Instrument LoadInst /// /// Loads the corresponding shadow and (optionally) origin. /// Optionally, checks that the load address is fully defined. void visitLoadInst(LoadInst &I) { assert(I.getType()->isSized() && "Load type must have size"); IRBuilder<> IRB(I.getNextNode()); Type *ShadowTy = getShadowTy(&I); Value *Addr = I.getPointerOperand(); if (PropagateShadow && !I.getMetadata("nosanitize")) { Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld")); } else { setShadow(&I, getCleanShadow(&I)); } if (ClCheckAccessAddress) insertShadowCheck(I.getPointerOperand(), &I); if (I.isAtomic()) I.setOrdering(addAcquireOrdering(I.getOrdering())); if (MS.TrackOrigins) { if (PropagateShadow) { unsigned Alignment = I.getAlignment(); unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment), OriginAlignment)); } else { setOrigin(&I, getCleanOrigin()); } } } /// \brief Instrument StoreInst /// /// Stores the corresponding shadow and (optionally) origin. /// Optionally, checks that the store address is fully defined. void visitStoreInst(StoreInst &I) { StoreList.push_back(&I); } void handleCASOrRMW(Instruction &I) { assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I)); IRBuilder<> IRB(&I); Value *Addr = I.getOperand(0); Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB); if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); // Only test the conditional argument of cmpxchg instruction. // The other argument can potentially be uninitialized, but we can not // detect this situation reliably without possible false positives. if (isa<AtomicCmpXchgInst>(I)) insertShadowCheck(I.getOperand(1), &I); IRB.CreateStore(getCleanShadow(&I), ShadowPtr); setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); } void visitAtomicRMWInst(AtomicRMWInst &I) { handleCASOrRMW(I); I.setOrdering(addReleaseOrdering(I.getOrdering())); } void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { handleCASOrRMW(I); I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering())); } // Vector manipulation. void visitExtractElementInst(ExtractElementInst &I) { insertShadowCheck(I.getOperand(1), &I); IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1), "_msprop")); setOrigin(&I, getOrigin(&I, 0)); } void visitInsertElementInst(InsertElementInst &I) { insertShadowCheck(I.getOperand(2), &I); IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1), I.getOperand(2), "_msprop")); setOriginForNaryOp(I); } void visitShuffleVectorInst(ShuffleVectorInst &I) { insertShadowCheck(I.getOperand(2), &I); IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1), I.getOperand(2), "_msprop")); setOriginForNaryOp(I); } // Casts. void visitSExtInst(SExtInst &I) { IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop")); setOrigin(&I, getOrigin(&I, 0)); } void visitZExtInst(ZExtInst &I) { IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop")); setOrigin(&I, getOrigin(&I, 0)); } void visitTruncInst(TruncInst &I) { IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop")); setOrigin(&I, getOrigin(&I, 0)); } void visitBitCastInst(BitCastInst &I) { IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); setOrigin(&I, getOrigin(&I, 0)); } void visitPtrToIntInst(PtrToIntInst &I) { IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, "_msprop_ptrtoint")); setOrigin(&I, getOrigin(&I, 0)); } void visitIntToPtrInst(IntToPtrInst &I) { IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false, "_msprop_inttoptr")); setOrigin(&I, getOrigin(&I, 0)); } void visitFPToSIInst(CastInst &I) { handleShadowOr(I); } void visitFPToUIInst(CastInst &I) { handleShadowOr(I); } void visitSIToFPInst(CastInst &I) { handleShadowOr(I); } void visitUIToFPInst(CastInst &I) { handleShadowOr(I); } void visitFPExtInst(CastInst &I) { handleShadowOr(I); } void visitFPTruncInst(CastInst &I) { handleShadowOr(I); } /// \brief Propagate shadow for bitwise AND. /// /// This code is exact, i.e. if, for example, a bit in the left argument /// is defined and 0, then neither the value not definedness of the /// corresponding bit in B don't affect the resulting shadow. void visitAnd(BinaryOperator &I) { IRBuilder<> IRB(&I); // "And" of 0 and a poisoned value results in unpoisoned value. // 1&1 => 1; 0&1 => 0; p&1 => p; // 1&0 => 0; 0&0 => 0; p&0 => 0; // 1&p => p; 0&p => 0; p&p => p; // S = (S1 & S2) | (V1 & S2) | (S1 & V2) Value *S1 = getShadow(&I, 0); Value *S2 = getShadow(&I, 1); Value *V1 = I.getOperand(0); Value *V2 = I.getOperand(1); if (V1->getType() != S1->getType()) { V1 = IRB.CreateIntCast(V1, S1->getType(), false); V2 = IRB.CreateIntCast(V2, S2->getType(), false); } Value *S1S2 = IRB.CreateAnd(S1, S2); Value *V1S2 = IRB.CreateAnd(V1, S2); Value *S1V2 = IRB.CreateAnd(S1, V2); setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); setOriginForNaryOp(I); } void visitOr(BinaryOperator &I) { IRBuilder<> IRB(&I); // "Or" of 1 and a poisoned value results in unpoisoned value. // 1|1 => 1; 0|1 => 1; p|1 => 1; // 1|0 => 1; 0|0 => 0; p|0 => p; // 1|p => 1; 0|p => p; p|p => p; // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2) Value *S1 = getShadow(&I, 0); Value *S2 = getShadow(&I, 1); Value *V1 = IRB.CreateNot(I.getOperand(0)); Value *V2 = IRB.CreateNot(I.getOperand(1)); if (V1->getType() != S1->getType()) { V1 = IRB.CreateIntCast(V1, S1->getType(), false); V2 = IRB.CreateIntCast(V2, S2->getType(), false); } Value *S1S2 = IRB.CreateAnd(S1, S2); Value *V1S2 = IRB.CreateAnd(V1, S2); Value *S1V2 = IRB.CreateAnd(S1, V2); setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2))); setOriginForNaryOp(I); } /// \brief Default propagation of shadow and/or origin. /// /// This class implements the general case of shadow propagation, used in all /// cases where we don't know and/or don't care about what the operation /// actually does. It converts all input shadow values to a common type /// (extending or truncating as necessary), and bitwise OR's them. /// /// This is much cheaper than inserting checks (i.e. requiring inputs to be /// fully initialized), and less prone to false positives. /// /// This class also implements the general case of origin propagation. For a /// Nary operation, result origin is set to the origin of an argument that is /// not entirely initialized. If there is more than one such arguments, the /// rightmost of them is picked. It does not matter which one is picked if all /// arguments are initialized. template <bool CombineShadow> class Combiner { Value *Shadow; Value *Origin; IRBuilder<> &IRB; MemorySanitizerVisitor *MSV; public: Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {} /// \brief Add a pair of shadow and origin values to the mix. Combiner &Add(Value *OpShadow, Value *OpOrigin) { if (CombineShadow) { assert(OpShadow); if (!Shadow) Shadow = OpShadow; else { OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType()); Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop"); } } if (MSV->MS.TrackOrigins) { assert(OpOrigin); if (!Origin) { Origin = OpOrigin; } else { Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin); // No point in adding something that might result in 0 origin value. if (!ConstOrigin || !ConstOrigin->isNullValue()) { Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB); Value *Cond = IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow)); Origin = IRB.CreateSelect(Cond, OpOrigin, Origin); } } } return *this; } /// \brief Add an application value to the mix. Combiner &Add(Value *V) { Value *OpShadow = MSV->getShadow(V); Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr; return Add(OpShadow, OpOrigin); } /// \brief Set the current combined values as the given instruction's shadow /// and origin. void Done(Instruction *I) { if (CombineShadow) { assert(Shadow); Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I)); MSV->setShadow(I, Shadow); } if (MSV->MS.TrackOrigins) { assert(Origin); MSV->setOrigin(I, Origin); } } }; typedef Combiner<true> ShadowAndOriginCombiner; typedef Combiner<false> OriginCombiner; /// \brief Propagate origin for arbitrary operation. void setOriginForNaryOp(Instruction &I) { if (!MS.TrackOrigins) return; IRBuilder<> IRB(&I); OriginCombiner OC(this, IRB); for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) OC.Add(OI->get()); OC.Done(&I); } size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && "Vector of pointers is not a valid shadow type"); return Ty->isVectorTy() ? Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : Ty->getPrimitiveSizeInBits(); } /// \brief Cast between two shadow types, extending or truncating as /// necessary. Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, bool Signed = false) { Type *srcTy = V->getType(); if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) return IRB.CreateIntCast(V, dstTy, Signed); if (dstTy->isVectorTy() && srcTy->isVectorTy() && dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) return IRB.CreateIntCast(V, dstTy, Signed); size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy); size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy); Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); Value *V2 = IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed); return IRB.CreateBitCast(V2, dstTy); // TODO: handle struct types. } /// \brief Cast an application value to the type of its own shadow. Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { Type *ShadowTy = getShadowTy(V); if (V->getType() == ShadowTy) return V; if (V->getType()->isPtrOrPtrVectorTy()) return IRB.CreatePtrToInt(V, ShadowTy); else return IRB.CreateBitCast(V, ShadowTy); } /// \brief Propagate shadow for arbitrary operation. void handleShadowOr(Instruction &I) { IRBuilder<> IRB(&I); ShadowAndOriginCombiner SC(this, IRB); for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI) SC.Add(OI->get()); SC.Done(&I); } // \brief Handle multiplication by constant. // // Handle a special case of multiplication by constant that may have one or // more zeros in the lower bits. This makes corresponding number of lower bits // of the result zero as well. We model it by shifting the other operand // shadow left by the required number of bits. Effectively, we transform // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B). // We use multiplication by 2**N instead of shift to cover the case of // multiplication by 0, which may occur in some elements of a vector operand. void handleMulByConstant(BinaryOperator &I, Constant *ConstArg, Value *OtherArg) { Constant *ShadowMul; Type *Ty = ConstArg->getType(); if (Ty->isVectorTy()) { unsigned NumElements = Ty->getVectorNumElements(); Type *EltTy = Ty->getSequentialElementType(); SmallVector<Constant *, 16> Elements; for (unsigned Idx = 0; Idx < NumElements; ++Idx) { ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx)); APInt V = Elt->getValue(); APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); Elements.push_back(ConstantInt::get(EltTy, V2)); } ShadowMul = ConstantVector::get(Elements); } else { ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg); APInt V = Elt->getValue(); APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); ShadowMul = ConstantInt::get(Elt->getType(), V2); } IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst")); setOrigin(&I, getOrigin(OtherArg)); } void visitMul(BinaryOperator &I) { Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); if (constOp0 && !constOp1) handleMulByConstant(I, constOp0, I.getOperand(1)); else if (constOp1 && !constOp0) handleMulByConstant(I, constOp1, I.getOperand(0)); else handleShadowOr(I); } void visitFAdd(BinaryOperator &I) { handleShadowOr(I); } void visitFSub(BinaryOperator &I) { handleShadowOr(I); } void visitFMul(BinaryOperator &I) { handleShadowOr(I); } void visitAdd(BinaryOperator &I) { handleShadowOr(I); } void visitSub(BinaryOperator &I) { handleShadowOr(I); } void visitXor(BinaryOperator &I) { handleShadowOr(I); } void handleDiv(Instruction &I) { IRBuilder<> IRB(&I); // Strict on the second argument. insertShadowCheck(I.getOperand(1), &I); setShadow(&I, getShadow(&I, 0)); setOrigin(&I, getOrigin(&I, 0)); } void visitUDiv(BinaryOperator &I) { handleDiv(I); } void visitSDiv(BinaryOperator &I) { handleDiv(I); } void visitFDiv(BinaryOperator &I) { handleDiv(I); } void visitURem(BinaryOperator &I) { handleDiv(I); } void visitSRem(BinaryOperator &I) { handleDiv(I); } void visitFRem(BinaryOperator &I) { handleDiv(I); } /// \brief Instrument == and != comparisons. /// /// Sometimes the comparison result is known even if some of the bits of the /// arguments are not. void handleEqualityComparison(ICmpInst &I) { IRBuilder<> IRB(&I); Value *A = I.getOperand(0); Value *B = I.getOperand(1); Value *Sa = getShadow(A); Value *Sb = getShadow(B); // Get rid of pointers and vectors of pointers. // For ints (and vectors of ints), types of A and Sa match, // and this is a no-op. A = IRB.CreatePointerCast(A, Sa->getType()); B = IRB.CreatePointerCast(B, Sb->getType()); // A == B <==> (C = A^B) == 0 // A != B <==> (C = A^B) != 0 // Sc = Sa | Sb Value *C = IRB.CreateXor(A, B); Value *Sc = IRB.CreateOr(Sa, Sb); // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now) // Result is defined if one of the following is true // * there is a defined 1 bit in C // * C is fully defined // Si = !(C & ~Sc) && Sc Value *Zero = Constant::getNullValue(Sc->getType()); Value *MinusOne = Constant::getAllOnesValue(Sc->getType()); Value *Si = IRB.CreateAnd( IRB.CreateICmpNE(Sc, Zero), IRB.CreateICmpEQ(IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero)); Si->setName("_msprop_icmp"); setShadow(&I, Si); setOriginForNaryOp(I); } /// \brief Build the lowest possible value of V, taking into account V's /// uninitialized bits. Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, bool isSigned) { if (isSigned) { // Split shadow into sign bit and other bits. Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); // Maximise the undefined shadow bit, minimize other undefined bits. return IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit); } else { // Minimize undefined bits. return IRB.CreateAnd(A, IRB.CreateNot(Sa)); } } /// \brief Build the highest possible value of V, taking into account V's /// uninitialized bits. Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, bool isSigned) { if (isSigned) { // Split shadow into sign bit and other bits. Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1); Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits); // Minimise the undefined shadow bit, maximise other undefined bits. return IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits); } else { // Maximize undefined bits. return IRB.CreateOr(A, Sa); } } /// \brief Instrument relational comparisons. /// /// This function does exact shadow propagation for all relational /// comparisons of integers, pointers and vectors of those. /// FIXME: output seems suboptimal when one of the operands is a constant void handleRelationalComparisonExact(ICmpInst &I) { IRBuilder<> IRB(&I); Value *A = I.getOperand(0); Value *B = I.getOperand(1); Value *Sa = getShadow(A); Value *Sb = getShadow(B); // Get rid of pointers and vectors of pointers. // For ints (and vectors of ints), types of A and Sa match, // and this is a no-op. A = IRB.CreatePointerCast(A, Sa->getType()); B = IRB.CreatePointerCast(B, Sb->getType()); // Let [a0, a1] be the interval of possible values of A, taking into account // its undefined bits. Let [b0, b1] be the interval of possible values of B. // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0). bool IsSigned = I.isSigned(); Value *S1 = IRB.CreateICmp(I.getPredicate(), getLowestPossibleValue(IRB, A, Sa, IsSigned), getHighestPossibleValue(IRB, B, Sb, IsSigned)); Value *S2 = IRB.CreateICmp(I.getPredicate(), getHighestPossibleValue(IRB, A, Sa, IsSigned), getLowestPossibleValue(IRB, B, Sb, IsSigned)); Value *Si = IRB.CreateXor(S1, S2); setShadow(&I, Si); setOriginForNaryOp(I); } /// \brief Instrument signed relational comparisons. /// /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by /// propagating the highest bit of the shadow. Everything else is delegated /// to handleShadowOr(). void handleSignedRelationalComparison(ICmpInst &I) { Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0)); Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1)); Value *op = nullptr; CmpInst::Predicate pre = I.getPredicate(); if (constOp0 && constOp0->isNullValue() && (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { op = I.getOperand(1); } else if (constOp1 && constOp1->isNullValue() && (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { op = I.getOperand(0); } if (op) { IRBuilder<> IRB(&I); Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); setShadow(&I, Shadow); setOrigin(&I, getOrigin(op)); } else { handleShadowOr(I); } } void visitICmpInst(ICmpInst &I) { if (!ClHandleICmp) { handleShadowOr(I); return; } if (I.isEquality()) { handleEqualityComparison(I); return; } assert(I.isRelational()); if (ClHandleICmpExact) { handleRelationalComparisonExact(I); return; } if (I.isSigned()) { handleSignedRelationalComparison(I); return; } assert(I.isUnsigned()); if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) { handleRelationalComparisonExact(I); return; } handleShadowOr(I); } void visitFCmpInst(FCmpInst &I) { handleShadowOr(I); } void handleShift(BinaryOperator &I) { IRBuilder<> IRB(&I); // If any of the S2 bits are poisoned, the whole thing is poisoned. // Otherwise perform the same shift on S1. Value *S1 = getShadow(&I, 0); Value *S2 = getShadow(&I, 1); Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType()); Value *V2 = I.getOperand(1); Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2); setShadow(&I, IRB.CreateOr(Shift, S2Conv)); setOriginForNaryOp(I); } void visitShl(BinaryOperator &I) { handleShift(I); } void visitAShr(BinaryOperator &I) { handleShift(I); } void visitLShr(BinaryOperator &I) { handleShift(I); } /// \brief Instrument llvm.memmove /// /// At this point we don't know if llvm.memmove will be inlined or not. /// If we don't instrument it and it gets inlined, /// our interceptor will not kick in and we will lose the memmove. /// If we instrument the call here, but it does not get inlined, /// we will memove the shadow twice: which is bad in case /// of overlapping regions. So, we simply lower the intrinsic to a call. /// /// Similar situation exists for memcpy and memset. void visitMemMoveInst(MemMoveInst &I) { IRBuilder<> IRB(&I); IRB.CreateCall( MS.MemmoveFn, {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); I.eraseFromParent(); } // Similar to memmove: avoid copying shadow twice. // This is somewhat unfortunate as it may slowdown small constant memcpys. // FIXME: consider doing manual inline for small constant sizes and proper // alignment. void visitMemCpyInst(MemCpyInst &I) { IRBuilder<> IRB(&I); IRB.CreateCall( MS.MemcpyFn, {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); I.eraseFromParent(); } // Same as memcpy. void visitMemSetInst(MemSetInst &I) { IRBuilder<> IRB(&I); IRB.CreateCall( MS.MemsetFn, {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); I.eraseFromParent(); } void visitVAStartInst(VAStartInst &I) { VAHelper->visitVAStartInst(I); } void visitVACopyInst(VACopyInst &I) { VAHelper->visitVACopyInst(I); } enum IntrinsicKind { IK_DoesNotAccessMemory, IK_OnlyReadsMemory, IK_WritesMemory }; static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { const int DoesNotAccessMemory = IK_DoesNotAccessMemory; const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; const int OnlyReadsMemory = IK_OnlyReadsMemory; const int OnlyAccessesArgumentPointees = IK_WritesMemory; const int UnknownModRefBehavior = IK_WritesMemory; #define GET_INTRINSIC_MODREF_BEHAVIOR #define ModRefBehavior IntrinsicKind #include "llvm/IR/Intrinsics.gen" #undef ModRefBehavior #undef GET_INTRINSIC_MODREF_BEHAVIOR } /// \brief Handle vector store-like intrinsics. /// /// Instrument intrinsics that look like a simple SIMD store: writes memory, /// has 1 pointer argument and 1 vector argument, returns void. bool handleVectorStoreIntrinsic(IntrinsicInst &I) { IRBuilder<> IRB(&I); Value *Addr = I.getArgOperand(0); Value *Shadow = getShadow(&I, 1); Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB); // We don't know the pointer alignment (could be unaligned SSE store!). // Have to assume to worst case. IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); // FIXME: use ClStoreCleanOrigin // FIXME: factor out common code from materializeStores if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1)); return true; } /// \brief Handle vector load-like intrinsics. /// /// Instrument intrinsics that look like a simple SIMD load: reads memory, /// has 1 pointer argument, returns a vector. bool handleVectorLoadIntrinsic(IntrinsicInst &I) { IRBuilder<> IRB(&I); Value *Addr = I.getArgOperand(0); Type *ShadowTy = getShadowTy(&I); if (PropagateShadow) { Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB); // We don't know the pointer alignment (could be unaligned SSE load!). // Have to assume to worst case. setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld")); } else { setShadow(&I, getCleanShadow(&I)); } if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); if (MS.TrackOrigins) { if (PropagateShadow) setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1))); else setOrigin(&I, getCleanOrigin()); } return true; } /// \brief Handle (SIMD arithmetic)-like intrinsics. /// /// Instrument intrinsics with any number of arguments of the same type, /// equal to the return type. The type should be simple (no aggregates or /// pointers; vectors are fine). /// Caller guarantees that this intrinsic does not access memory. bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) { Type *RetTy = I.getType(); if (!(RetTy->isIntOrIntVectorTy() || RetTy->isFPOrFPVectorTy() || RetTy->isX86_MMXTy())) return false; unsigned NumArgOperands = I.getNumArgOperands(); for (unsigned i = 0; i < NumArgOperands; ++i) { Type *Ty = I.getArgOperand(i)->getType(); if (Ty != RetTy) return false; } IRBuilder<> IRB(&I); ShadowAndOriginCombiner SC(this, IRB); for (unsigned i = 0; i < NumArgOperands; ++i) SC.Add(I.getArgOperand(i)); SC.Done(&I); return true; } /// \brief Heuristically instrument unknown intrinsics. /// /// The main purpose of this code is to do something reasonable with all /// random intrinsics we might encounter, most importantly - SIMD intrinsics. /// We recognize several classes of intrinsics by their argument types and /// ModRefBehaviour and apply special intrumentation when we are reasonably /// sure that we know what the intrinsic does. /// /// We special-case intrinsics where this approach fails. See llvm.bswap /// handling as an example of that. bool handleUnknownIntrinsic(IntrinsicInst &I) { unsigned NumArgOperands = I.getNumArgOperands(); if (NumArgOperands == 0) return false; Intrinsic::ID iid = I.getIntrinsicID(); IntrinsicKind IK = getIntrinsicKind(iid); bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; bool WritesMemory = IK == IK_WritesMemory; assert(!(OnlyReadsMemory && WritesMemory)); if (NumArgOperands == 2 && I.getArgOperand(0)->getType()->isPointerTy() && I.getArgOperand(1)->getType()->isVectorTy() && I.getType()->isVoidTy() && WritesMemory) { // This looks like a vector store. return handleVectorStoreIntrinsic(I); } if (NumArgOperands == 1 && I.getArgOperand(0)->getType()->isPointerTy() && I.getType()->isVectorTy() && OnlyReadsMemory) { // This looks like a vector load. return handleVectorLoadIntrinsic(I); } if (!OnlyReadsMemory && !WritesMemory) if (maybeHandleSimpleNomemIntrinsic(I)) return true; // FIXME: detect and handle SSE maskstore/maskload return false; } void handleBswap(IntrinsicInst &I) { IRBuilder<> IRB(&I); Value *Op = I.getArgOperand(0); Type *OpType = Op->getType(); Function *BswapFunc = Intrinsic::getDeclaration( F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1)); setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op))); setOrigin(&I, getOrigin(Op)); } // \brief Instrument vector convert instrinsic. // // This function instruments intrinsics like cvtsi2ss: // %Out = int_xxx_cvtyyy(%ConvertOp) // or // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp) // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same // number \p Out elements, and (if has 2 arguments) copies the rest of the // elements from \p CopyOp. // In most cases conversion involves floating-point value which may trigger a // hardware exception when not fully initialized. For this reason we require // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise. // We copy the shadow of \p CopyOp[NumUsedElements:] to \p // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always // return a fully initialized value. void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) { IRBuilder<> IRB(&I); Value *CopyOp, *ConvertOp; switch (I.getNumArgOperands()) { case 3: assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode"); case 2: CopyOp = I.getArgOperand(0); ConvertOp = I.getArgOperand(1); break; case 1: ConvertOp = I.getArgOperand(0); CopyOp = nullptr; break; default: llvm_unreachable("Cvt intrinsic with unsupported number of arguments."); } // The first *NumUsedElements* elements of ConvertOp are converted to the // same number of output elements. The rest of the output is copied from // CopyOp, or (if not available) filled with zeroes. // Combine shadow for elements of ConvertOp that are used in this operation, // and insert a check. // FIXME: consider propagating shadow of ConvertOp, at least in the case of // int->any conversion. Value *ConvertShadow = getShadow(ConvertOp); Value *AggShadow = nullptr; if (ConvertOp->getType()->isVectorTy()) { AggShadow = IRB.CreateExtractElement( ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); for (int i = 1; i < NumUsedElements; ++i) { Value *MoreShadow = IRB.CreateExtractElement( ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i)); AggShadow = IRB.CreateOr(AggShadow, MoreShadow); } } else { AggShadow = ConvertShadow; } assert(AggShadow->getType()->isIntegerTy()); insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I); // Build result shadow by zero-filling parts of CopyOp shadow that come from // ConvertOp. if (CopyOp) { assert(CopyOp->getType() == I.getType()); assert(CopyOp->getType()->isVectorTy()); Value *ResultShadow = getShadow(CopyOp); Type *EltTy = ResultShadow->getType()->getVectorElementType(); for (int i = 0; i < NumUsedElements; ++i) { ResultShadow = IRB.CreateInsertElement( ResultShadow, ConstantInt::getNullValue(EltTy), ConstantInt::get(IRB.getInt32Ty(), i)); } setShadow(&I, ResultShadow); setOrigin(&I, getOrigin(CopyOp)); } else { setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); } } // Given a scalar or vector, extract lower 64 bits (or less), and return all // zeroes if it is zero, and all ones otherwise. Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) { if (S->getType()->isVectorTy()) S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true); assert(S->getType()->getPrimitiveSizeInBits() <= 64); Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); return CreateShadowCast(IRB, S2, T, /* Signed */ true); } Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) { Type *T = S->getType(); assert(T->isVectorTy()); Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S)); return IRB.CreateSExt(S2, T); } // \brief Instrument vector shift instrinsic. // // This function instruments intrinsics like int_x86_avx2_psll_w. // Intrinsic shifts %In by %ShiftSize bits. // %ShiftSize may be a vector. In that case the lower 64 bits determine shift // size, and the rest is ignored. Behavior is defined even if shift size is // greater than register (or field) width. void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) { assert(I.getNumArgOperands() == 2); IRBuilder<> IRB(&I); // If any of the S2 bits are poisoned, the whole thing is poisoned. // Otherwise perform the same shift on S1. Value *S1 = getShadow(&I, 0); Value *S2 = getShadow(&I, 1); Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2) : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); Value *V1 = I.getOperand(0); Value *V2 = I.getOperand(1); Value *Shift = IRB.CreateCall(I.getCalledValue(), {IRB.CreateBitCast(S1, V1->getType()), V2}); Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); setShadow(&I, IRB.CreateOr(Shift, S2Conv)); setOriginForNaryOp(I); } // \brief Get an X86_MMX-sized vector type. Type *getMMXVectorTy(unsigned EltSizeInBits) { const unsigned X86_MMXSizeInBits = 64; return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits), X86_MMXSizeInBits / EltSizeInBits); } // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack // intrinsic. Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) { switch (id) { case llvm::Intrinsic::x86_sse2_packsswb_128: case llvm::Intrinsic::x86_sse2_packuswb_128: return llvm::Intrinsic::x86_sse2_packsswb_128; case llvm::Intrinsic::x86_sse2_packssdw_128: case llvm::Intrinsic::x86_sse41_packusdw: return llvm::Intrinsic::x86_sse2_packssdw_128; case llvm::Intrinsic::x86_avx2_packsswb: case llvm::Intrinsic::x86_avx2_packuswb: return llvm::Intrinsic::x86_avx2_packsswb; case llvm::Intrinsic::x86_avx2_packssdw: case llvm::Intrinsic::x86_avx2_packusdw: return llvm::Intrinsic::x86_avx2_packssdw; case llvm::Intrinsic::x86_mmx_packsswb: case llvm::Intrinsic::x86_mmx_packuswb: return llvm::Intrinsic::x86_mmx_packsswb; case llvm::Intrinsic::x86_mmx_packssdw: return llvm::Intrinsic::x86_mmx_packssdw; default: llvm_unreachable("unexpected intrinsic id"); } } // \brief Instrument vector pack instrinsic. // // This function instruments intrinsics like x86_mmx_packsswb, that // packs elements of 2 input vectors into half as many bits with saturation. // Shadow is propagated with the signed variant of the same intrinsic applied // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer). // EltSizeInBits is used only for x86mmx arguments. void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) { assert(I.getNumArgOperands() == 2); bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); IRBuilder<> IRB(&I); Value *S1 = getShadow(&I, 0); Value *S2 = getShadow(&I, 1); assert(isX86_MMX || S1->getType()->isVectorTy()); // SExt and ICmpNE below must apply to individual elements of input vectors. // In case of x86mmx arguments, cast them to appropriate vector types and // back. Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType(); if (isX86_MMX) { S1 = IRB.CreateBitCast(S1, T); S2 = IRB.CreateBitCast(S2, T); } Value *S1_ext = IRB.CreateSExt( IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T); Value *S2_ext = IRB.CreateSExt( IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T); if (isX86_MMX) { Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C); S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy); S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy); } Function *ShadowFn = Intrinsic::getDeclaration( F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID())); Value *S = IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack"); if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I)); setShadow(&I, S); setOriginForNaryOp(I); } // \brief Instrument sum-of-absolute-differencies intrinsic. void handleVectorSadIntrinsic(IntrinsicInst &I) { const unsigned SignificantBitsPerResultElement = 16; bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType(); unsigned ZeroBitsPerResultElement = ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement; IRBuilder<> IRB(&I); Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); S = IRB.CreateBitCast(S, ResTy); S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), ResTy); S = IRB.CreateLShr(S, ZeroBitsPerResultElement); S = IRB.CreateBitCast(S, getShadowTy(&I)); setShadow(&I, S); setOriginForNaryOp(I); } // \brief Instrument multiply-add intrinsic. void handleVectorPmaddIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) { bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType(); IRBuilder<> IRB(&I); Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1)); S = IRB.CreateBitCast(S, ResTy); S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)), ResTy); S = IRB.CreateBitCast(S, getShadowTy(&I)); setShadow(&I, S); setOriginForNaryOp(I); } void visitIntrinsicInst(IntrinsicInst &I) { switch (I.getIntrinsicID()) { case llvm::Intrinsic::bswap: handleBswap(I); break; case llvm::Intrinsic::x86_avx512_cvtsd2usi64: case llvm::Intrinsic::x86_avx512_cvtsd2usi: case llvm::Intrinsic::x86_avx512_cvtss2usi64: case llvm::Intrinsic::x86_avx512_cvtss2usi: case llvm::Intrinsic::x86_avx512_cvttss2usi64: case llvm::Intrinsic::x86_avx512_cvttss2usi: case llvm::Intrinsic::x86_avx512_cvttsd2usi64: case llvm::Intrinsic::x86_avx512_cvttsd2usi: case llvm::Intrinsic::x86_avx512_cvtusi2sd: case llvm::Intrinsic::x86_avx512_cvtusi2ss: case llvm::Intrinsic::x86_avx512_cvtusi642sd: case llvm::Intrinsic::x86_avx512_cvtusi642ss: case llvm::Intrinsic::x86_sse2_cvtsd2si64: case llvm::Intrinsic::x86_sse2_cvtsd2si: case llvm::Intrinsic::x86_sse2_cvtsd2ss: case llvm::Intrinsic::x86_sse2_cvtsi2sd: case llvm::Intrinsic::x86_sse2_cvtsi642sd: case llvm::Intrinsic::x86_sse2_cvtss2sd: case llvm::Intrinsic::x86_sse2_cvttsd2si64: case llvm::Intrinsic::x86_sse2_cvttsd2si: case llvm::Intrinsic::x86_sse_cvtsi2ss: case llvm::Intrinsic::x86_sse_cvtsi642ss: case llvm::Intrinsic::x86_sse_cvtss2si64: case llvm::Intrinsic::x86_sse_cvtss2si: case llvm::Intrinsic::x86_sse_cvttss2si64: case llvm::Intrinsic::x86_sse_cvttss2si: handleVectorConvertIntrinsic(I, 1); break; case llvm::Intrinsic::x86_sse2_cvtdq2pd: case llvm::Intrinsic::x86_sse2_cvtps2pd: case llvm::Intrinsic::x86_sse_cvtps2pi: case llvm::Intrinsic::x86_sse_cvttps2pi: handleVectorConvertIntrinsic(I, 2); break; case llvm::Intrinsic::x86_avx2_psll_w: case llvm::Intrinsic::x86_avx2_psll_d: case llvm::Intrinsic::x86_avx2_psll_q: case llvm::Intrinsic::x86_avx2_pslli_w: case llvm::Intrinsic::x86_avx2_pslli_d: case llvm::Intrinsic::x86_avx2_pslli_q: case llvm::Intrinsic::x86_avx2_psrl_w: case llvm::Intrinsic::x86_avx2_psrl_d: case llvm::Intrinsic::x86_avx2_psrl_q: case llvm::Intrinsic::x86_avx2_psra_w: case llvm::Intrinsic::x86_avx2_psra_d: case llvm::Intrinsic::x86_avx2_psrli_w: case llvm::Intrinsic::x86_avx2_psrli_d: case llvm::Intrinsic::x86_avx2_psrli_q: case llvm::Intrinsic::x86_avx2_psrai_w: case llvm::Intrinsic::x86_avx2_psrai_d: case llvm::Intrinsic::x86_sse2_psll_w: case llvm::Intrinsic::x86_sse2_psll_d: case llvm::Intrinsic::x86_sse2_psll_q: case llvm::Intrinsic::x86_sse2_pslli_w: case llvm::Intrinsic::x86_sse2_pslli_d: case llvm::Intrinsic::x86_sse2_pslli_q: case llvm::Intrinsic::x86_sse2_psrl_w: case llvm::Intrinsic::x86_sse2_psrl_d: case llvm::Intrinsic::x86_sse2_psrl_q: case llvm::Intrinsic::x86_sse2_psra_w: case llvm::Intrinsic::x86_sse2_psra_d: case llvm::Intrinsic::x86_sse2_psrli_w: case llvm::Intrinsic::x86_sse2_psrli_d: case llvm::Intrinsic::x86_sse2_psrli_q: case llvm::Intrinsic::x86_sse2_psrai_w: case llvm::Intrinsic::x86_sse2_psrai_d: case llvm::Intrinsic::x86_mmx_psll_w: case llvm::Intrinsic::x86_mmx_psll_d: case llvm::Intrinsic::x86_mmx_psll_q: case llvm::Intrinsic::x86_mmx_pslli_w: case llvm::Intrinsic::x86_mmx_pslli_d: case llvm::Intrinsic::x86_mmx_pslli_q: case llvm::Intrinsic::x86_mmx_psrl_w: case llvm::Intrinsic::x86_mmx_psrl_d: case llvm::Intrinsic::x86_mmx_psrl_q: case llvm::Intrinsic::x86_mmx_psra_w: case llvm::Intrinsic::x86_mmx_psra_d: case llvm::Intrinsic::x86_mmx_psrli_w: case llvm::Intrinsic::x86_mmx_psrli_d: case llvm::Intrinsic::x86_mmx_psrli_q: case llvm::Intrinsic::x86_mmx_psrai_w: case llvm::Intrinsic::x86_mmx_psrai_d: handleVectorShiftIntrinsic(I, /* Variable */ false); break; case llvm::Intrinsic::x86_avx2_psllv_d: case llvm::Intrinsic::x86_avx2_psllv_d_256: case llvm::Intrinsic::x86_avx2_psllv_q: case llvm::Intrinsic::x86_avx2_psllv_q_256: case llvm::Intrinsic::x86_avx2_psrlv_d: case llvm::Intrinsic::x86_avx2_psrlv_d_256: case llvm::Intrinsic::x86_avx2_psrlv_q: case llvm::Intrinsic::x86_avx2_psrlv_q_256: case llvm::Intrinsic::x86_avx2_psrav_d: case llvm::Intrinsic::x86_avx2_psrav_d_256: handleVectorShiftIntrinsic(I, /* Variable */ true); break; case llvm::Intrinsic::x86_sse2_packsswb_128: case llvm::Intrinsic::x86_sse2_packssdw_128: case llvm::Intrinsic::x86_sse2_packuswb_128: case llvm::Intrinsic::x86_sse41_packusdw: case llvm::Intrinsic::x86_avx2_packsswb: case llvm::Intrinsic::x86_avx2_packssdw: case llvm::Intrinsic::x86_avx2_packuswb: case llvm::Intrinsic::x86_avx2_packusdw: handleVectorPackIntrinsic(I); break; case llvm::Intrinsic::x86_mmx_packsswb: case llvm::Intrinsic::x86_mmx_packuswb: handleVectorPackIntrinsic(I, 16); break; case llvm::Intrinsic::x86_mmx_packssdw: handleVectorPackIntrinsic(I, 32); break; case llvm::Intrinsic::x86_mmx_psad_bw: case llvm::Intrinsic::x86_sse2_psad_bw: case llvm::Intrinsic::x86_avx2_psad_bw: handleVectorSadIntrinsic(I); break; case llvm::Intrinsic::x86_sse2_pmadd_wd: case llvm::Intrinsic::x86_avx2_pmadd_wd: case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128: case llvm::Intrinsic::x86_avx2_pmadd_ub_sw: handleVectorPmaddIntrinsic(I); break; case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw: handleVectorPmaddIntrinsic(I, 8); break; case llvm::Intrinsic::x86_mmx_pmadd_wd: handleVectorPmaddIntrinsic(I, 16); break; default: if (!handleUnknownIntrinsic(I)) visitInstruction(I); break; } } void visitCallSite(CallSite CS) { Instruction &I = *CS.getInstruction(); assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite"); if (CS.isCall()) { CallInst *Call = cast<CallInst>(&I); // For inline asm, do the usual thing: check argument shadow and mark all // outputs as clean. Note that any side effects of the inline asm that are // not immediately visible in its constraints are not handled. if (Call->isInlineAsm()) { visitInstruction(I); return; } assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere"); // We are going to insert code that relies on the fact that the callee // will become a non-readonly function after it is instrumented by us. To // prevent this code from being optimized out, mark that function // non-readonly in advance. if (Function *Func = Call->getCalledFunction()) { // Clear out readonly/readnone attributes. AttrBuilder B; B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); Func->removeAttributes(AttributeSet::FunctionIndex, AttributeSet::get(Func->getContext(), AttributeSet::FunctionIndex, B)); } } IRBuilder<> IRB(&I); unsigned ArgOffset = 0; DEBUG(dbgs() << " CallSite: " << I << "\n"); for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; unsigned i = ArgIt - CS.arg_begin(); if (!A->getType()->isSized()) { DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); continue; } unsigned Size = 0; Value *Store = nullptr; // Compute the Shadow for arg even if it is ByVal, because // in that case getShadow() will copy the actual arg shadow to // __msan_param_tls. Value *ArgShadow = getShadow(A); Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); DEBUG(dbgs() << " Arg#" << i << ": " << *A << " Shadow: " << *ArgShadow << "\n"); bool ArgIsInitialized = false; const DataLayout &DL = F.getParent()->getDataLayout(); if (CS.paramHasAttr(i + 1, Attribute::ByVal)) { assert(A->getType()->isPointerTy() && "ByVal argument is not a pointer!"); Size = DL.getTypeAllocSize(A->getType()->getPointerElementType()); if (ArgOffset + Size > kParamTLSSize) break; unsigned ParamAlignment = CS.getParamAlignment(i + 1); unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment); Store = IRB.CreateMemCpy(ArgShadowBase, getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB), Size, Alignment); } else { Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase, kShadowTLSAlignment); Constant *Cst = dyn_cast<Constant>(ArgShadow); if (Cst && Cst->isNullValue()) ArgIsInitialized = true; } if (MS.TrackOrigins && !ArgIsInitialized) IRB.CreateStore(getOrigin(A), getOriginPtrForArgument(A, IRB, ArgOffset)); (void)Store; assert(Size != 0 && Store != nullptr); DEBUG(dbgs() << " Param:" << *Store << "\n"); ArgOffset += RoundUpToAlignment(Size, 8); } DEBUG(dbgs() << " done with call args\n"); FunctionType *FT = cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); if (FT->isVarArg()) { VAHelper->visitCallSite(CS, IRB); } // Now, get the shadow for the RetVal. if (!I.getType()->isSized()) return; IRBuilder<> IRBBefore(&I); // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&I, IRBBefore); IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); Instruction *NextInsn = nullptr; if (CS.isCall()) { NextInsn = I.getNextNode(); } else { BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest(); if (!NormalDest->getSinglePredecessor()) { // FIXME: this case is tricky, so we are just conservative here. // Perhaps we need to split the edge between this BB and NormalDest, // but a naive attempt to use SplitEdge leads to a crash. setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); return; } NextInsn = NormalDest->getFirstInsertionPt(); assert(NextInsn && "Could not find insertion point for retval shadow load"); } IRBuilder<> IRBAfter(NextInsn); Value *RetvalShadow = IRBAfter.CreateAlignedLoad( getShadowPtrForRetval(&I, IRBAfter), kShadowTLSAlignment, "_msret"); setShadow(&I, RetvalShadow); if (MS.TrackOrigins) setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); } void visitReturnInst(ReturnInst &I) { IRBuilder<> IRB(&I); Value *RetVal = I.getReturnValue(); if (!RetVal) return; Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); if (CheckReturnValue) { insertShadowCheck(RetVal, &I); Value *Shadow = getCleanShadow(RetVal); IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); } else { Value *Shadow = getShadow(RetVal); IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment); // FIXME: make it conditional if ClStoreCleanOrigin==0 if (MS.TrackOrigins) IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB)); } } void visitPHINode(PHINode &I) { IRBuilder<> IRB(&I); if (!PropagateShadow) { setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); return; } ShadowPHINodes.push_back(&I); setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(), "_msphi_s")); if (MS.TrackOrigins) setOrigin( &I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(), "_msphi_o")); } void visitAllocaInst(AllocaInst &I) { setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); IRBuilder<> IRB(I.getNextNode()); const DataLayout &DL = F.getParent()->getDataLayout(); uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType()); if (PoisonStack && ClPoisonStackWithCall) { IRB.CreateCall(MS.MsanPoisonStackFn, {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), ConstantInt::get(MS.IntptrTy, Size)}); } else { Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment()); } if (PoisonStack && MS.TrackOrigins) { SmallString<2048> StackDescriptionStorage; raw_svector_ostream StackDescription(StackDescriptionStorage); // We create a string with a description of the stack allocation and // pass it into __msan_set_alloca_origin. // It will be printed by the run-time if stack-originated UMR is found. // The first 4 bytes of the string are set to '----' and will be replaced // by __msan_va_arg_overflow_size_tls at the first call. StackDescription << "----" << I.getName() << "@" << F.getName(); Value *Descr = createPrivateNonConstGlobalForString( *F.getParent(), StackDescription.str()); IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn, {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), ConstantInt::get(MS.IntptrTy, Size), IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), IRB.CreatePointerCast(&F, MS.IntptrTy)}); } } void visitSelectInst(SelectInst &I) { IRBuilder<> IRB(&I); // a = select b, c, d Value *B = I.getCondition(); Value *C = I.getTrueValue(); Value *D = I.getFalseValue(); Value *Sb = getShadow(B); Value *Sc = getShadow(C); Value *Sd = getShadow(D); // Result shadow if condition shadow is 0. Value *Sa0 = IRB.CreateSelect(B, Sc, Sd); Value *Sa1; if (I.getType()->isAggregateType()) { // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do // an extra "select". This results in much more compact IR. // Sa = select Sb, poisoned, (select b, Sc, Sd) Sa1 = getPoisonedShadow(getShadowTy(I.getType())); } else { // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ] // If Sb (condition is poisoned), look for bits in c and d that are equal // and both unpoisoned. // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd. // Cast arguments to shadow-compatible type. C = CreateAppToShadowCast(IRB, C); D = CreateAppToShadowCast(IRB, D); // Result shadow if condition shadow is 1. Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd)); } Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select"); setShadow(&I, Sa); if (MS.TrackOrigins) { // Origins are always i32, so any vector conditions must be flattened. // FIXME: consider tracking vector origins for app vectors? if (B->getType()->isVectorTy()) { Type *FlatTy = getShadowTyNoVec(B->getType()); B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy), ConstantInt::getNullValue(FlatTy)); Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy), ConstantInt::getNullValue(FlatTy)); } // a = select b, c, d // Oa = Sb ? Ob : (b ? Oc : Od) setOrigin( &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()), IRB.CreateSelect(B, getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())))); } } void visitLandingPadInst(LandingPadInst &I) { // Do nothing. // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1 setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); } void visitGetElementPtrInst(GetElementPtrInst &I) { handleShadowOr(I); } void visitExtractValueInst(ExtractValueInst &I) { IRBuilder<> IRB(&I); Value *Agg = I.getAggregateOperand(); DEBUG(dbgs() << "ExtractValue: " << I << "\n"); Value *AggShadow = getShadow(Agg); DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); setShadow(&I, ResShadow); setOriginForNaryOp(I); } void visitInsertValueInst(InsertValueInst &I) { IRBuilder<> IRB(&I); DEBUG(dbgs() << "InsertValue: " << I << "\n"); Value *AggShadow = getShadow(I.getAggregateOperand()); Value *InsShadow = getShadow(I.getInsertedValueOperand()); DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); DEBUG(dbgs() << " Res: " << *Res << "\n"); setShadow(&I, Res); setOriginForNaryOp(I); } void dumpInst(Instruction &I) { if (CallInst *CI = dyn_cast<CallInst>(&I)) { errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n"; } else { errs() << "ZZZ " << I.getOpcodeName() << "\n"; } errs() << "QQQ " << I << "\n"; } void visitResumeInst(ResumeInst &I) { DEBUG(dbgs() << "Resume: " << I << "\n"); // Nothing to do here. } void visitInstruction(Instruction &I) { // Everything else: stop propagating and check for poisoned shadow. if (ClDumpStrictInstructions) dumpInst(I); DEBUG(dbgs() << "DEFAULT: " << I << "\n"); for (size_t i = 0, n = I.getNumOperands(); i < n; i++) insertShadowCheck(I.getOperand(i), &I); setShadow(&I, getCleanShadow(&I)); setOrigin(&I, getCleanOrigin()); } }; /// \brief AMD64-specific implementation of VarArgHelper. struct VarArgAMD64Helper : public VarArgHelper { // An unfortunate workaround for asymmetric lowering of va_arg stuff. // See a comment in visitCallSite for more details. static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7 static const unsigned AMD64FpEndOffset = 176; Function &F; MemorySanitizer &MS; MemorySanitizerVisitor &MSV; Value *VAArgTLSCopy; Value *VAArgOverflowSize; SmallVector<CallInst *, 16> VAStartInstrumentationList; VarArgAMD64Helper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), VAArgOverflowSize(nullptr) {} enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; ArgKind classifyArgument(Value *arg) { // A very rough approximation of X86_64 argument classification rules. Type *T = arg->getType(); if (T->isFPOrFPVectorTy() || T->isX86_MMXTy()) return AK_FloatingPoint; if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64) return AK_GeneralPurpose; if (T->isPointerTy()) return AK_GeneralPurpose; return AK_Memory; } // For VarArg functions, store the argument shadow in an ABI-specific format // that corresponds to va_list layout. // We do this because Clang lowers va_arg in the frontend, and this pass // only sees the low level code that deals with va_list internals. // A much easier alternative (provided that Clang emits va_arg instructions) // would have been to associate each live instance of va_list with a copy of // MSanParamTLS, and extract shadow on va_arg() call in the argument list // order. void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { unsigned GpOffset = 0; unsigned FpOffset = AMD64GpEndOffset; unsigned OverflowOffset = AMD64FpEndOffset; const DataLayout &DL = F.getParent()->getDataLayout(); for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; unsigned ArgNo = CS.getArgumentNo(ArgIt); bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal); if (IsByVal) { // ByVal arguments always go to the overflow area. assert(A->getType()->isPointerTy()); Type *RealTy = A->getType()->getPointerElementType(); uint64_t ArgSize = DL.getTypeAllocSize(RealTy); Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); OverflowOffset += RoundUpToAlignment(ArgSize, 8); IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB), ArgSize, kShadowTLSAlignment); } else { ArgKind AK = classifyArgument(A); if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) AK = AK_Memory; if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset) AK = AK_Memory; Value *Base; switch (AK) { case AK_GeneralPurpose: Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset); GpOffset += 8; break; case AK_FloatingPoint: Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset); FpOffset += 16; break; default: assert(AK == AK_Memory); // HLSL Change - set as default case rather // than case AK_Memory uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset); OverflowOffset += RoundUpToAlignment(ArgSize, 8); } IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); } } Constant *OverflowSize = ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset); IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); } /// \brief Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), "_msarg"); } void visitVAStartInst(VAStartInst &I) override { IRBuilder<> IRB(&I); VAStartInstrumentationList.push_back(&I); Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); // Unpoison the whole __va_list_tag. // FIXME: magic ABI constants. IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 24, /* alignment */ 8, false); } void visitVACopyInst(VACopyInst &I) override { IRBuilder<> IRB(&I); Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); // Unpoison the whole __va_list_tag. // FIXME: magic ABI constants. IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 24, /* alignment */ 8, false); } void finalizeInstrumentation() override { assert(!VAArgOverflowSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd( ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); } // Instrument va_start. // Copy va_list shadow from the backup copy of the TLS contents. for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { CallInst *OrigInst = VAStartInstrumentationList[i]; IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr( IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), ConstantInt::get(MS.IntptrTy, 16)), Type::getInt64PtrTy(*MS.C)); Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); Value *RegSaveAreaShadowPtr = MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, AMD64FpEndOffset, 16); Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr( IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), ConstantInt::get(MS.IntptrTy, 8)), Type::getInt64PtrTy(*MS.C)); Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); Value *OverflowArgAreaShadowPtr = MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, AMD64FpEndOffset); IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); } } }; /// \brief MIPS64-specific implementation of VarArgHelper. struct VarArgMIPS64Helper : public VarArgHelper { Function &F; MemorySanitizer &MS; MemorySanitizerVisitor &MSV; Value *VAArgTLSCopy; Value *VAArgSize; SmallVector<CallInst *, 16> VAStartInstrumentationList; VarArgMIPS64Helper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr), VAArgSize(nullptr) {} void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override { unsigned VAArgOffset = 0; const DataLayout &DL = F.getParent()->getDataLayout(); for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; Value *Base; uint64_t ArgSize = DL.getTypeAllocSize(A->getType()); #if defined(__MIPSEB__) || defined(MIPSEB) // Adjusting the shadow for argument with size < 8 to match the placement // of bits in big endian system if (ArgSize < 8) VAArgOffset += (8 - ArgSize); #endif Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset); VAArgOffset += ArgSize; VAArgOffset = RoundUpToAlignment(VAArgOffset, 8); IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment); } Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset); // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of // a new class member i.e. it is the total size of all VarArgs. IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); } /// \brief Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0), "_msarg"); } void visitVAStartInst(VAStartInst &I) override { IRBuilder<> IRB(&I); VAStartInstrumentationList.push_back(&I); Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 8, /* alignment */ 8, false); } void visitVACopyInst(VACopyInst &I) override { IRBuilder<> IRB(&I); Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); // Unpoison the whole __va_list_tag. // FIXME: magic ABI constants. IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 8, /* alignment */ 8, false); } void finalizeInstrumentation() override { assert(!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); } // Instrument va_start. // Copy va_list shadow from the backup copy of the TLS contents. for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { CallInst *OrigInst = VAStartInstrumentationList[i]; IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), Type::getInt64PtrTy(*MS.C)); Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); Value *RegSaveAreaShadowPtr = MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB); IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8); } } }; /// \brief A no-op implementation of VarArgHelper. struct VarArgNoOpHelper : public VarArgHelper { VarArgNoOpHelper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) {} void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {} void visitVAStartInst(VAStartInst &I) override {} void visitVACopyInst(VACopyInst &I) override {} void finalizeInstrumentation() override {} }; VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor) { // VarArg handling is only implemented on AMD64. False positives are possible // on other platforms. llvm::Triple TargetTriple(Func.getParent()->getTargetTriple()); if (TargetTriple.getArch() == llvm::Triple::x86_64) return new VarArgAMD64Helper(Func, Msan, Visitor); else if (TargetTriple.getArch() == llvm::Triple::mips64 || TargetTriple.getArch() == llvm::Triple::mips64el) return new VarArgMIPS64Helper(Func, Msan, Visitor); else return new VarArgNoOpHelper(Func, Msan, Visitor); } } // namespace bool MemorySanitizer::runOnFunction(Function &F) { if (&F == MsanCtorFunction) return false; MemorySanitizerVisitor Visitor(F, *this); // Clear out readonly/readnone attributes. AttrBuilder B; B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); F.removeAttributes( AttributeSet::FunctionIndex, AttributeSet::get(F.getContext(), AttributeSet::FunctionIndex, B)); return Visitor.runOnFunction(); }
0
repos/DirectXShaderCompiler/lib/Transforms
repos/DirectXShaderCompiler/lib/Transforms/Instrumentation/InstrProfiling.cpp
//===-- InstrProfiling.cpp - Frontend instrumentation based profiling -----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass lowers instrprof_increment intrinsics emitted by a frontend for // profiling. It also builds the data structures and initialization code needed // for updating execution counts and emitting the profile at runtime. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" #include "llvm/Transforms/Utils/ModuleUtils.h" using namespace llvm; #define DEBUG_TYPE "instrprof" namespace { class InstrProfiling : public ModulePass { public: static char ID; InstrProfiling() : ModulePass(ID) {} InstrProfiling(const InstrProfOptions &Options) : ModulePass(ID), Options(Options) {} StringRef getPassName() const override { return "Frontend instrumentation-based coverage lowering"; } bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); } private: InstrProfOptions Options; Module *M; DenseMap<GlobalVariable *, GlobalVariable *> RegionCounters; std::vector<Value *> UsedVars; bool isMachO() const { return Triple(M->getTargetTriple()).isOSBinFormatMachO(); } /// Get the section name for the counter variables. StringRef getCountersSection() const { return isMachO() ? "__DATA,__llvm_prf_cnts" : "__llvm_prf_cnts"; } /// Get the section name for the name variables. StringRef getNameSection() const { return isMachO() ? "__DATA,__llvm_prf_names" : "__llvm_prf_names"; } /// Get the section name for the profile data variables. StringRef getDataSection() const { return isMachO() ? "__DATA,__llvm_prf_data" : "__llvm_prf_data"; } /// Get the section name for the coverage mapping data. StringRef getCoverageSection() const { return isMachO() ? "__DATA,__llvm_covmap" : "__llvm_covmap"; } /// Replace instrprof_increment with an increment of the appropriate value. void lowerIncrement(InstrProfIncrementInst *Inc); /// Set up the section and uses for coverage data and its references. void lowerCoverageData(GlobalVariable *CoverageData); /// Get the region counters for an increment, creating them if necessary. /// /// If the counter array doesn't yet exist, the profile data variables /// referring to them will also be created. GlobalVariable *getOrCreateRegionCounters(InstrProfIncrementInst *Inc); /// Emit runtime registration functions for each profile data variable. void emitRegistration(); /// Emit the necessary plumbing to pull in the runtime initialization. void emitRuntimeHook(); /// Add uses of our data variables and runtime hook. void emitUses(); /// Create a static initializer for our data, on platforms that need it, /// and for any profile output file that was specified. void emitInitialization(); }; } // anonymous namespace char InstrProfiling::ID = 0; INITIALIZE_PASS(InstrProfiling, "instrprof", "Frontend instrumentation-based coverage lowering.", false, false) ModulePass *llvm::createInstrProfilingPass(const InstrProfOptions &Options) { return new InstrProfiling(Options); } bool InstrProfiling::runOnModule(Module &M) { bool MadeChange = false; this->M = &M; RegionCounters.clear(); UsedVars.clear(); for (Function &F : M) for (BasicBlock &BB : F) for (auto I = BB.begin(), E = BB.end(); I != E;) if (auto *Inc = dyn_cast<InstrProfIncrementInst>(I++)) { lowerIncrement(Inc); MadeChange = true; } if (GlobalVariable *Coverage = M.getNamedGlobal("__llvm_coverage_mapping")) { lowerCoverageData(Coverage); MadeChange = true; } if (!MadeChange) return false; emitRegistration(); emitRuntimeHook(); emitUses(); emitInitialization(); return true; } void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) { GlobalVariable *Counters = getOrCreateRegionCounters(Inc); IRBuilder<> Builder(Inc->getParent(), *Inc); uint64_t Index = Inc->getIndex()->getZExtValue(); Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters, 0, Index); Value *Count = Builder.CreateLoad(Addr, "pgocount"); Count = Builder.CreateAdd(Count, Builder.getInt64(1)); Inc->replaceAllUsesWith(Builder.CreateStore(Count, Addr)); Inc->eraseFromParent(); } void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageData) { CoverageData->setSection(getCoverageSection()); CoverageData->setAlignment(8); Constant *Init = CoverageData->getInitializer(); // We're expecting { i32, i32, i32, i32, [n x { i8*, i32, i32 }], [m x i8] } // for some C. If not, the frontend's given us something broken. assert(Init->getNumOperands() == 6 && "bad number of fields in coverage map"); assert(isa<ConstantArray>(Init->getAggregateElement(4)) && "invalid function list in coverage map"); ConstantArray *Records = cast<ConstantArray>(Init->getAggregateElement(4)); for (unsigned I = 0, E = Records->getNumOperands(); I < E; ++I) { Constant *Record = Records->getOperand(I); Value *V = const_cast<Value *>(Record->getOperand(0))->stripPointerCasts(); assert(isa<GlobalVariable>(V) && "Missing reference to function name"); GlobalVariable *Name = cast<GlobalVariable>(V); // If we have region counters for this name, we've already handled it. auto It = RegionCounters.find(Name); if (It != RegionCounters.end()) continue; // Move the name variable to the right section. Name->setSection(getNameSection()); Name->setAlignment(1); } } /// Get the name of a profiling variable for a particular function. static std::string getVarName(InstrProfIncrementInst *Inc, StringRef VarName) { auto *Arr = cast<ConstantDataArray>(Inc->getName()->getInitializer()); StringRef Name = Arr->isCString() ? Arr->getAsCString() : Arr->getAsString(); return ("__llvm_profile_" + VarName + "_" + Name).str(); } GlobalVariable * InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) { GlobalVariable *Name = Inc->getName(); auto It = RegionCounters.find(Name); if (It != RegionCounters.end()) return It->second; // Move the name variable to the right section. Make sure it is placed in the // same comdat as its associated function. Otherwise, we may get multiple // counters for the same function in certain cases. Function *Fn = Inc->getParent()->getParent(); Name->setSection(getNameSection()); Name->setAlignment(1); Name->setComdat(Fn->getComdat()); uint64_t NumCounters = Inc->getNumCounters()->getZExtValue(); LLVMContext &Ctx = M->getContext(); ArrayType *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters); // Create the counters variable. auto *Counters = new GlobalVariable(*M, CounterTy, false, Name->getLinkage(), Constant::getNullValue(CounterTy), getVarName(Inc, "counters")); Counters->setVisibility(Name->getVisibility()); Counters->setSection(getCountersSection()); Counters->setAlignment(8); Counters->setComdat(Fn->getComdat()); RegionCounters[Inc->getName()] = Counters; // Create data variable. auto *NameArrayTy = Name->getType()->getPointerElementType(); auto *Int32Ty = Type::getInt32Ty(Ctx); auto *Int64Ty = Type::getInt64Ty(Ctx); auto *Int8PtrTy = Type::getInt8PtrTy(Ctx); auto *Int64PtrTy = Type::getInt64PtrTy(Ctx); Type *DataTypes[] = {Int32Ty, Int32Ty, Int64Ty, Int8PtrTy, Int64PtrTy}; auto *DataTy = StructType::get(Ctx, makeArrayRef(DataTypes)); Constant *DataVals[] = { ConstantInt::get(Int32Ty, NameArrayTy->getArrayNumElements()), ConstantInt::get(Int32Ty, NumCounters), ConstantInt::get(Int64Ty, Inc->getHash()->getZExtValue()), ConstantExpr::getBitCast(Name, Int8PtrTy), ConstantExpr::getBitCast(Counters, Int64PtrTy)}; auto *Data = new GlobalVariable(*M, DataTy, true, Name->getLinkage(), ConstantStruct::get(DataTy, DataVals), getVarName(Inc, "data")); Data->setVisibility(Name->getVisibility()); Data->setSection(getDataSection()); Data->setAlignment(8); Data->setComdat(Fn->getComdat()); // Mark the data variable as used so that it isn't stripped out. UsedVars.push_back(Data); return Counters; } void InstrProfiling::emitRegistration() { // Don't do this for Darwin. compiler-rt uses linker magic. if (Triple(M->getTargetTriple()).isOSDarwin()) return; // Construct the function. auto *VoidTy = Type::getVoidTy(M->getContext()); auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext()); auto *RegisterFTy = FunctionType::get(VoidTy, false); auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage, "__llvm_profile_register_functions", M); RegisterF->setUnnamedAddr(true); if (Options.NoRedZone) RegisterF->addFnAttr(Attribute::NoRedZone); auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false); auto *RuntimeRegisterF = Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage, "__llvm_profile_register_function", M); IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF)); for (Value *Data : UsedVars) IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy)); IRB.CreateRetVoid(); } void InstrProfiling::emitRuntimeHook() { const char *const RuntimeVarName = "__llvm_profile_runtime"; const char *const RuntimeUserName = "__llvm_profile_runtime_user"; // If the module's provided its own runtime, we don't need to do anything. if (M->getGlobalVariable(RuntimeVarName)) return; // Declare an external variable that will pull in the runtime initialization. auto *Int32Ty = Type::getInt32Ty(M->getContext()); auto *Var = new GlobalVariable(*M, Int32Ty, false, GlobalValue::ExternalLinkage, nullptr, RuntimeVarName); // Make a function that uses it. auto *User = Function::Create(FunctionType::get(Int32Ty, false), GlobalValue::LinkOnceODRLinkage, RuntimeUserName, M); User->addFnAttr(Attribute::NoInline); if (Options.NoRedZone) User->addFnAttr(Attribute::NoRedZone); User->setVisibility(GlobalValue::HiddenVisibility); IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User)); auto *Load = IRB.CreateLoad(Var); IRB.CreateRet(Load); // Mark the user variable as used so that it isn't stripped out. UsedVars.push_back(User); } void InstrProfiling::emitUses() { if (UsedVars.empty()) return; GlobalVariable *LLVMUsed = M->getGlobalVariable("llvm.used"); std::vector<Constant *> MergedVars; if (LLVMUsed) { // Collect the existing members of llvm.used. ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer()); for (unsigned I = 0, E = Inits->getNumOperands(); I != E; ++I) MergedVars.push_back(Inits->getOperand(I)); LLVMUsed->eraseFromParent(); } Type *i8PTy = Type::getInt8PtrTy(M->getContext()); // Add uses for our data. for (auto *Value : UsedVars) MergedVars.push_back( ConstantExpr::getBitCast(cast<Constant>(Value), i8PTy)); // Recreate llvm.used. ArrayType *ATy = ArrayType::get(i8PTy, MergedVars.size()); LLVMUsed = new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage, ConstantArray::get(ATy, MergedVars), "llvm.used"); LLVMUsed->setSection("llvm.metadata"); } void InstrProfiling::emitInitialization() { std::string InstrProfileOutput = Options.InstrProfileOutput; Constant *RegisterF = M->getFunction("__llvm_profile_register_functions"); if (!RegisterF && InstrProfileOutput.empty()) return; // Create the initialization function. auto *VoidTy = Type::getVoidTy(M->getContext()); auto *F = Function::Create(FunctionType::get(VoidTy, false), GlobalValue::InternalLinkage, "__llvm_profile_init", M); F->setUnnamedAddr(true); F->addFnAttr(Attribute::NoInline); if (Options.NoRedZone) F->addFnAttr(Attribute::NoRedZone); // Add the basic block and the necessary calls. IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F)); if (RegisterF) IRB.CreateCall(RegisterF, {}); if (!InstrProfileOutput.empty()) { auto *Int8PtrTy = Type::getInt8PtrTy(M->getContext()); auto *SetNameTy = FunctionType::get(VoidTy, Int8PtrTy, false); auto *SetNameF = Function::Create(SetNameTy, GlobalValue::ExternalLinkage, "__llvm_profile_override_default_filename", M); // Create variable for profile name. Constant *ProfileNameConst = ConstantDataArray::getString(M->getContext(), InstrProfileOutput, true); GlobalVariable *ProfileName = new GlobalVariable(*M, ProfileNameConst->getType(), true, GlobalValue::PrivateLinkage, ProfileNameConst); IRB.CreateCall(SetNameF, IRB.CreatePointerCast(ProfileName, Int8PtrTy)); } IRB.CreateRetVoid(); appendToGlobalCtors(*M, F, 0); }