Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Transforms/ObjCARC.h
//===-- ObjCARC.h - ObjCARC Scalar Transformations --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This header file defines prototypes for accessor functions that expose passes // in the ObjCARC Scalar Transformations library. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_OBJCARC_H #define LLVM_TRANSFORMS_OBJCARC_H namespace llvm { class Pass; //===----------------------------------------------------------------------===// // // ObjCARCAPElim - ObjC ARC autorelease pool elimination. // Pass *createObjCARCAPElimPass(); //===----------------------------------------------------------------------===// // // ObjCARCExpand - ObjC ARC preliminary simplifications. // Pass *createObjCARCExpandPass(); //===----------------------------------------------------------------------===// // // ObjCARCContract - Late ObjC ARC cleanups. // Pass *createObjCARCContractPass(); // // /////////////////////////////////////////////////////////////////////////////// // // ObjCARCOpt - ObjC ARC optimization. // Pass *createObjCARCOptPass(); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Transforms/IPO.h
//===- llvm/Transforms/IPO.h - Interprocedural Transformations --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This header file defines prototypes for accessor functions that expose passes // in the IPO transformations library. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_IPO_H #define LLVM_TRANSFORMS_IPO_H #include "llvm/ADT/ArrayRef.h" namespace llvm { class ModulePass; class Pass; class Function; class BasicBlock; class GlobalValue; //===----------------------------------------------------------------------===// // // These functions removes symbols from functions and modules. If OnlyDebugInfo // is true, only debugging information is removed from the module. // ModulePass *createStripSymbolsPass(bool OnlyDebugInfo = false); //===----------------------------------------------------------------------===// // // These functions strips symbols from functions and modules. // Only debugging information is not stripped. // ModulePass *createStripNonDebugSymbolsPass(); //===----------------------------------------------------------------------===// // // These pass removes llvm.dbg.declare intrinsics. ModulePass *createStripDebugDeclarePass(); //===----------------------------------------------------------------------===// // // These pass removes unused symbols' debug info. ModulePass *createStripDeadDebugInfoPass(); //===----------------------------------------------------------------------===// /// createConstantMergePass - This function returns a new pass that merges /// duplicate global constants together into a single constant that is shared. /// This is useful because some passes (ie TraceValues) insert a lot of string /// constants into the program, regardless of whether or not they duplicate an /// existing string. /// ModulePass *createConstantMergePass(); //===----------------------------------------------------------------------===// /// createGlobalOptimizerPass - This function returns a new pass that optimizes /// non-address taken internal globals. /// ModulePass *createGlobalOptimizerPass(); //===----------------------------------------------------------------------===// /// createGlobalDCEPass - This transform is designed to eliminate unreachable /// internal globals (functions or global variables) /// ModulePass *createGlobalDCEPass(); //===----------------------------------------------------------------------===// /// This transform is designed to eliminate available external globals /// (functions or global variables) /// ModulePass *createEliminateAvailableExternallyPass(); //===----------------------------------------------------------------------===// /// createGVExtractionPass - If deleteFn is true, this pass deletes /// the specified global values. Otherwise, it deletes as much of the module as /// possible, except for the global values specified. /// ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool deleteFn = false); //===----------------------------------------------------------------------===// /// createFunctionInliningPass - Return a new pass object that uses a heuristic /// to inline direct function calls to small functions. /// /// The Threshold can be passed directly, or asked to be computed from the /// given optimization and size optimization arguments. /// /// The -inline-threshold command line option takes precedence over the /// threshold given here. Pass *createFunctionInliningPass(); Pass *createFunctionInliningPass(int Threshold); Pass *createFunctionInliningPass(unsigned OptLevel, unsigned SizeOptLevel); //===----------------------------------------------------------------------===// /// createAlwaysInlinerPass - Return a new pass object that inlines only /// functions that are marked as "always_inline". Pass *createAlwaysInlinerPass(); Pass *createAlwaysInlinerPass(bool InsertLifetime); //===----------------------------------------------------------------------===// /// createPruneEHPass - Return a new pass object which transforms invoke /// instructions into calls, if the callee can _not_ unwind the stack. /// Pass *createPruneEHPass(); //===----------------------------------------------------------------------===// /// createInternalizePass - This pass loops over all of the functions in the /// input module, internalizing all globals (functions and variables) it can. //// /// The symbols in \p ExportList are never internalized. /// /// The symbol in DSOList are internalized if it is safe to drop them from /// the symbol table. /// /// Note that commandline options that are used with the above function are not /// used now! ModulePass *createInternalizePass(ArrayRef<const char *> ExportList); /// createInternalizePass - Same as above, but with an empty exportList. ModulePass *createInternalizePass(); //===----------------------------------------------------------------------===// /// createDeadArgEliminationPass - This pass removes arguments from functions /// which are not used by the body of the function. /// ModulePass *createDeadArgEliminationPass(); /// DeadArgHacking pass - Same as DAE, but delete arguments of external /// functions as well. This is definitely not safe, and should only be used by /// bugpoint. ModulePass *createDeadArgHackingPass(); //===----------------------------------------------------------------------===// /// createArgumentPromotionPass - This pass promotes "by reference" arguments to /// be passed by value if the number of elements passed is smaller or /// equal to maxElements (maxElements == 0 means always promote). /// Pass *createArgumentPromotionPass(unsigned maxElements = 3); //===----------------------------------------------------------------------===// /// createIPConstantPropagationPass - This pass propagates constants from call /// sites into the bodies of functions. /// ModulePass *createIPConstantPropagationPass(); //===----------------------------------------------------------------------===// /// createIPSCCPPass - This pass propagates constants from call sites into the /// bodies of functions, and keeps track of whether basic blocks are executable /// in the process. /// ModulePass *createIPSCCPPass(); //===----------------------------------------------------------------------===// // /// createLoopExtractorPass - This pass extracts all natural loops from the /// program into a function if it can. /// Pass *createLoopExtractorPass(); /// createSingleLoopExtractorPass - This pass extracts one natural loop from the /// program into a function if it can. This is used by bugpoint. /// Pass *createSingleLoopExtractorPass(); /// createBlockExtractorPass - This pass extracts all blocks (except those /// specified in the argument list) from the functions in the module. /// ModulePass *createBlockExtractorPass(); /// createStripDeadPrototypesPass - This pass removes any function declarations /// (prototypes) that are not used. ModulePass *createStripDeadPrototypesPass(); //===----------------------------------------------------------------------===// /// createFunctionAttrsPass - This pass discovers functions that do not access /// memory, or only read memory, and gives them the readnone/readonly attribute. /// It also discovers function arguments that are not captured by the function /// and marks them with the nocapture attribute. /// Pass *createFunctionAttrsPass(); //===----------------------------------------------------------------------===// /// createMergeFunctionsPass - This pass discovers identical functions and /// collapses them. /// ModulePass *createMergeFunctionsPass(); //===----------------------------------------------------------------------===// /// createPartialInliningPass - This pass inlines parts of functions. /// ModulePass *createPartialInliningPass(); //===----------------------------------------------------------------------===// // createMetaRenamerPass - Rename everything with metasyntatic names. // ModulePass *createMetaRenamerPass(); // // /////////////////////////////////////////////////////////////////////////////// /// createBarrierNoopPass - This pass is purely a module pass barrier in a pass /// manager. ModulePass *createBarrierNoopPass(); /// \brief This pass lowers bitset metadata and the llvm.bitset.test intrinsic /// to bitsets. ModulePass *createLowerBitSetsPass(); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Transforms/Instrumentation.h
//===- Transforms/Instrumentation.h - Instrumentation passes ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines constructor functions for instrumentation passes. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_INSTRUMENTATION_H #define LLVM_TRANSFORMS_INSTRUMENTATION_H #include "llvm/ADT/StringRef.h" #include <vector> #if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID) inline void *getDFSanArgTLSPtrForJIT() { extern __thread __attribute__((tls_model("initial-exec"))) void *__dfsan_arg_tls; return (void *)&__dfsan_arg_tls; } inline void *getDFSanRetValTLSPtrForJIT() { extern __thread __attribute__((tls_model("initial-exec"))) void *__dfsan_retval_tls; return (void *)&__dfsan_retval_tls; } #endif namespace llvm { class ModulePass; class FunctionPass; // Insert GCOV profiling instrumentation struct GCOVOptions { static GCOVOptions getDefault(); // Specify whether to emit .gcno files. bool EmitNotes; // Specify whether to modify the program to emit .gcda files when run. bool EmitData; // A four-byte version string. The meaning of a version string is described in // gcc's gcov-io.h char Version[4]; // Emit a "cfg checksum" that follows the "line number checksum" of a // function. This affects both .gcno and .gcda files. bool UseCfgChecksum; // Add the 'noredzone' attribute to added runtime library calls. bool NoRedZone; // Emit the name of the function in the .gcda files. This is redundant, as // the function identifier can be used to find the name from the .gcno file. bool FunctionNamesInData; // Emit the exit block immediately after the start block, rather than after // all of the function body's blocks. bool ExitBlockBeforeBody; }; ModulePass *createGCOVProfilerPass(const GCOVOptions &Options = GCOVOptions::getDefault()); /// Options for the frontend instrumentation based profiling pass. struct InstrProfOptions { InstrProfOptions() : NoRedZone(false) {} // Add the 'noredzone' attribute to added runtime library calls. bool NoRedZone; // Name of the profile file to use as output std::string InstrProfileOutput; }; /// Insert frontend instrumentation based profiling. ModulePass *createInstrProfilingPass( const InstrProfOptions &Options = InstrProfOptions()); // Insert AddressSanitizer (address sanity checking) instrumentation FunctionPass *createAddressSanitizerFunctionPass(bool CompileKernel = false); ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false); // Insert MemorySanitizer instrumentation (detection of uninitialized reads) FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0); // Insert ThreadSanitizer (race detection) instrumentation FunctionPass *createThreadSanitizerPass(); // Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation ModulePass *createDataFlowSanitizerPass( const std::vector<std::string> &ABIListFiles = std::vector<std::string>(), void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr); // Options for sanitizer coverage instrumentation. struct SanitizerCoverageOptions { SanitizerCoverageOptions() : CoverageType(SCK_None), IndirectCalls(false), TraceBB(false), TraceCmp(false), Use8bitCounters(false) {} enum Type { SCK_None = 0, SCK_Function, SCK_BB, SCK_Edge } CoverageType; bool IndirectCalls; bool TraceBB; bool TraceCmp; bool Use8bitCounters; }; // Insert SanitizerCoverage instrumentation. ModulePass *createSanitizerCoverageModulePass( const SanitizerCoverageOptions &Options = SanitizerCoverageOptions()); #if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID) inline ModulePass *createDataFlowSanitizerPassForJIT( const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) { return createDataFlowSanitizerPass(ABIListFiles, getDFSanArgTLSPtrForJIT, getDFSanRetValTLSPtrForJIT); } #endif // BoundsChecking - This pass instruments the code to perform run-time bounds // checking on loads, stores, and other memory intrinsics. FunctionPass *createBoundsCheckingPass(); /// \brief This pass splits the stack into a safe stack and an unsafe stack to /// protect against stack-based overflow vulnerabilities. FunctionPass *createSafeStackPass(); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Transforms/Vectorize.h
//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This header file defines prototypes for accessor functions that expose passes // in the Vectorize transformations library. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_VECTORIZE_H #define LLVM_TRANSFORMS_VECTORIZE_H namespace llvm { class BasicBlock; class BasicBlockPass; class Pass; //===----------------------------------------------------------------------===// /// @brief Vectorize configuration. struct VectorizeConfig { //===--------------------------------------------------------------------===// // Target architecture related parameters /// @brief The size of the native vector registers. unsigned VectorBits; /// @brief Vectorize boolean values. bool VectorizeBools; /// @brief Vectorize integer values. bool VectorizeInts; /// @brief Vectorize floating-point values. bool VectorizeFloats; /// @brief Vectorize pointer values. bool VectorizePointers; /// @brief Vectorize casting (conversion) operations. bool VectorizeCasts; /// @brief Vectorize floating-point math intrinsics. bool VectorizeMath; /// @brief Vectorize bit intrinsics. bool VectorizeBitManipulations; /// @brief Vectorize the fused-multiply-add intrinsic. bool VectorizeFMA; /// @brief Vectorize select instructions. bool VectorizeSelect; /// @brief Vectorize comparison instructions. bool VectorizeCmp; /// @brief Vectorize getelementptr instructions. bool VectorizeGEP; /// @brief Vectorize loads and stores. bool VectorizeMemOps; /// @brief Only generate aligned loads and stores. bool AlignedOnly; //===--------------------------------------------------------------------===// // Misc parameters /// @brief The required chain depth for vectorization. unsigned ReqChainDepth; /// @brief The maximum search distance for instruction pairs. unsigned SearchLimit; /// @brief The maximum number of candidate pairs with which to use a full /// cycle check. unsigned MaxCandPairsForCycleCheck; /// @brief Replicating one element to a pair breaks the chain. bool SplatBreaksChain; /// @brief The maximum number of pairable instructions per group. unsigned MaxInsts; /// @brief The maximum number of candidate instruction pairs per group. unsigned MaxPairs; /// @brief The maximum number of pairing iterations. unsigned MaxIter; /// @brief Don't try to form odd-length vectors. bool Pow2LenOnly; /// @brief Don't boost the chain-depth contribution of loads and stores. bool NoMemOpBoost; /// @brief Use a fast instruction dependency analysis. bool FastDep; /// @brief Initialize the VectorizeConfig from command line options. VectorizeConfig(); }; //===----------------------------------------------------------------------===// // // BBVectorize - A basic-block vectorization pass. // BasicBlockPass * createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig()); //===----------------------------------------------------------------------===// // // LoopVectorize - Create a loop vectorization pass. // Pass *createLoopVectorizePass(bool NoUnrolling = false, bool AlwaysVectorize = true); //===----------------------------------------------------------------------===// // // SLPVectorizer - Create a bottom-up SLP vectorizer pass. // Pass *createSLPVectorizerPass(); // // /////////////////////////////////////////////////////////////////////////////// /// @brief Vectorize the BasicBlock. /// /// @param BB The BasicBlock to be vectorized /// @param P The current running pass, should require AliasAnalysis and /// ScalarEvolution. After the vectorization, AliasAnalysis, /// ScalarEvolution and CFG are preserved. /// /// @return True if the BB is changed, false otherwise. /// bool vectorizeBasicBlock(Pass *P, BasicBlock &BB, const VectorizeConfig &C = VectorizeConfig()); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
//===- LowerExpectIntrinsic.h - LowerExpectIntrinsic pass -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// The header file for the LowerExpectIntrinsic pass as used by the new pass /// manager. /// //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H #define LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H #include "llvm/IR/Function.h" #include "llvm/IR/PassManager.h" namespace llvm { class LowerExpectIntrinsicPass { public: static StringRef name() { return "LowerExpectIntrinsicPass"; } /// \brief Run the pass over the function. /// /// This will lower all of th expect intrinsic calls in this function into /// branch weight metadata. That metadata will subsequently feed the analysis /// of the probabilities and frequencies of the CFG. After running this pass, /// no more expect intrinsics remain, allowing the rest of the optimizer to /// ignore them. PreservedAnalyses run(Function &F); }; } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Scalar/SimplifyCFG.h
//===- SimplifyCFG.h - Simplify and canonicalize the CFG --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This file provides the interface for the pass responsible for both /// simplifying and canonicalizing the CFG. /// //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H #define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H #include "llvm/IR/Function.h" #include "llvm/IR/PassManager.h" namespace llvm { /// \brief A pass to simplify and canonicalize the CFG of a function. /// /// This pass iteratively simplifies the entire CFG of a function, removing /// unnecessary control flows and bringing it into the canonical form expected /// by the rest of the mid-level optimizer. class SimplifyCFGPass { int BonusInstThreshold; public: static StringRef name() { return "SimplifyCFGPass"; } /// \brief Construct a pass with the default thresholds. SimplifyCFGPass(); /// \brief Construct a pass with a specific bonus threshold. SimplifyCFGPass(int BonusInstThreshold); /// \brief Run the pass over the function. PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM); }; } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Scalar/EarlyCSE.h
//===- EarlyCSE.h - Simple and fast CSE pass --------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This file provides the interface for a simple, fast CSE pass. /// //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H #define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H #include "llvm/IR/Function.h" #include "llvm/IR/PassManager.h" namespace llvm { /// \brief A simple and fast domtree-based CSE pass. /// /// This pass does a simple depth-first walk over the dominator tree, /// eliminating trivially redundant instructions and using instsimplify to /// canonicalize things as it goes. It is intended to be fast and catch obvious /// cases so that instcombine and other passes are more effective. It is /// expected that a later pass of GVN will catch the interesting/hard cases. class EarlyCSEPass { public: static StringRef name() { return "EarlyCSEPass"; } /// \brief Run the pass over the function. PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM); }; } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/IPO/LowerBitSets.h
//===- LowerBitSets.h - Bitset lowering pass --------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines parts of the bitset lowering pass implementation that may // be usefully unit tested. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_IPO_LOWERBITSETS_H #define LLVM_TRANSFORMS_IPO_LOWERBITSETS_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include <stdint.h> #include <limits> #include <set> #include <vector> namespace llvm { class DataLayout; class GlobalVariable; class Value; struct BitSetInfo { // The indices of the set bits in the bitset. std::set<uint64_t> Bits; // The byte offset into the combined global represented by the bitset. uint64_t ByteOffset; // The size of the bitset in bits. uint64_t BitSize; // Log2 alignment of the bit set relative to the combined global. // For example, a log2 alignment of 3 means that bits in the bitset // represent addresses 8 bytes apart. unsigned AlignLog2; bool isSingleOffset() const { return Bits.size() == 1; } bool isAllOnes() const { return Bits.size() == BitSize; } bool containsGlobalOffset(uint64_t Offset) const; bool containsValue(const DataLayout &DL, const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout, Value *V, uint64_t COffset = 0) const; }; struct BitSetBuilder { SmallVector<uint64_t, 16> Offsets; uint64_t Min, Max; BitSetBuilder() : Min(std::numeric_limits<uint64_t>::max()), Max(0) {} void addOffset(uint64_t Offset) { if (Min > Offset) Min = Offset; if (Max < Offset) Max = Offset; Offsets.push_back(Offset); } BitSetInfo build(); }; /// This class implements a layout algorithm for globals referenced by bit sets /// that tries to keep members of small bit sets together. This can /// significantly reduce bit set sizes in many cases. /// /// It works by assembling fragments of layout from sets of referenced globals. /// Each set of referenced globals causes the algorithm to create a new /// fragment, which is assembled by appending each referenced global in the set /// into the fragment. If a referenced global has already been referenced by an /// fragment created earlier, we instead delete that fragment and append its /// contents into the fragment we are assembling. /// /// By starting with the smallest fragments, we minimize the size of the /// fragments that are copied into larger fragments. This is most intuitively /// thought about when considering the case where the globals are virtual tables /// and the bit sets represent their derived classes: in a single inheritance /// hierarchy, the optimum layout would involve a depth-first search of the /// class hierarchy (and in fact the computed layout ends up looking a lot like /// a DFS), but a naive DFS would not work well in the presence of multiple /// inheritance. This aspect of the algorithm ends up fitting smaller /// hierarchies inside larger ones where that would be beneficial. /// /// For example, consider this class hierarchy: /// /// A B /// \ / | \ /// C D E /// /// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and /// bsE (E). If we laid out our objects by DFS traversing B followed by A, our /// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to /// cover the only 4 objects in its hierarchy, but not for bsA as it needs to /// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows: /// /// Add bsC, fragments {{C}} /// Add bsD, fragments {{C}, {D}} /// Add bsE, fragments {{C}, {D}, {E}} /// Add bsA, fragments {{A, C}, {D}, {E}} /// Add bsB, fragments {{B, A, C, D, E}} /// /// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3 /// fewer) objects, at the cost of bsB needing to cover 1 more object. /// /// The bit set lowering pass assigns an object index to each object that needs /// to be laid out, and calls addFragment for each bit set passing the object /// indices of its referenced globals. It then assembles a layout from the /// computed layout in the Fragments field. struct GlobalLayoutBuilder { /// The computed layout. Each element of this vector contains a fragment of /// layout (which may be empty) consisting of object indices. std::vector<std::vector<uint64_t>> Fragments; /// Mapping from object index to fragment index. std::vector<uint64_t> FragmentMap; GlobalLayoutBuilder(uint64_t NumObjects) : Fragments(1), FragmentMap(NumObjects) {} /// Add F to the layout while trying to keep its indices contiguous. /// If a previously seen fragment uses any of F's indices, that /// fragment will be laid out inside F. void addFragment(const std::set<uint64_t> &F); }; /// This class is used to build a byte array containing overlapping bit sets. By /// loading from indexed offsets into the byte array and applying a mask, a /// program can test bits from the bit set with a relatively short instruction /// sequence. For example, suppose we have 15 bit sets to lay out: /// /// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits), /// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits), /// L (4 bits), M (3 bits), N (2 bits), O (1 bit) /// /// These bits can be laid out in a 16-byte array like this: /// /// Byte Offset /// 0123456789ABCDEF /// Bit /// 7 HHHHHHHHHIIIIIII /// 6 GGGGGGGGGGJJJJJJ /// 5 FFFFFFFFFFFKKKKK /// 4 EEEEEEEEEEEELLLL /// 3 DDDDDDDDDDDDDMMM /// 2 CCCCCCCCCCCCCCNN /// 1 BBBBBBBBBBBBBBBO /// 0 AAAAAAAAAAAAAAAA /// /// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to /// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done /// in 1-2 machine instructions on x86, or 4-6 instructions on ARM. /// /// This is a byte array, rather than (say) a 2-byte array or a 4-byte array, /// because for one thing it gives us better packing (the more bins there are, /// the less evenly they will be filled), and for another, the instruction /// sequences can be slightly shorter, both on x86 and ARM. struct ByteArrayBuilder { /// The byte array built so far. std::vector<uint8_t> Bytes; enum { BitsPerByte = 8 }; /// The number of bytes allocated so far for each of the bits. uint64_t BitAllocs[BitsPerByte]; ByteArrayBuilder() { memset(BitAllocs, 0, sizeof(BitAllocs)); } /// Allocate BitSize bits in the byte array where Bits contains the bits to /// set. AllocByteOffset is set to the offset within the byte array and /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest /// Processing Time) multiprocessor scheduling algorithm to lay out the bits /// efficiently; the pass allocates bit sets in decreasing size order. void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize, uint64_t &AllocByteOffset, uint8_t &AllocMask); }; } // namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/IPO/PassManagerBuilder.h
// llvm/Transforms/IPO/PassManagerBuilder.h - Build Standard Pass -*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PassManagerBuilder class, which is used to set up a // "standard" optimization sequence suitable for languages like C and C++. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H #define LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H #include <vector> namespace hlsl { class HLSLExtensionsCodegenHelper; } namespace llvm { class Pass; class TargetLibraryInfoImpl; class TargetMachine; // The old pass manager infrastructure is hidden in a legacy namespace now. namespace legacy { class FunctionPassManager; class PassManagerBase; } /// PassManagerBuilder - This class is used to set up a standard optimization /// sequence for languages like C and C++, allowing some APIs to customize the /// pass sequence in various ways. A simple example of using it would be: /// /// PassManagerBuilder Builder; /// Builder.OptLevel = 2; /// Builder.populateFunctionPassManager(FPM); /// Builder.populateModulePassManager(MPM); /// /// In addition to setting up the basic passes, PassManagerBuilder allows /// frontends to vend a plugin API, where plugins are allowed to add extensions /// to the default pass manager. They do this by specifying where in the pass /// pipeline they want to be added, along with a callback function that adds /// the pass(es). For example, a plugin that wanted to add a loop optimization /// could do something like this: /// /// static void addMyLoopPass(const PMBuilder &Builder, PassManagerBase &PM) { /// if (Builder.getOptLevel() > 2 && Builder.getOptSizeLevel() == 0) /// PM.add(createMyAwesomePass()); /// } /// ... /// Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd, /// addMyLoopPass); /// ... class PassManagerBuilder { public: /// Extensions are passed the builder itself (so they can see how it is /// configured) as well as the pass manager to add stuff to. typedef void (*ExtensionFn)(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM); enum ExtensionPointTy { /// EP_EarlyAsPossible - This extension point allows adding passes before /// any other transformations, allowing them to see the code as it is coming /// out of the frontend. EP_EarlyAsPossible, /// EP_ModuleOptimizerEarly - This extension point allows adding passes /// just before the main module-level optimization passes. EP_ModuleOptimizerEarly, /// EP_LoopOptimizerEnd - This extension point allows adding loop passes to /// the end of the loop optimizer. EP_LoopOptimizerEnd, /// EP_ScalarOptimizerLate - This extension point allows adding optimization /// passes after most of the main optimizations, but before the last /// cleanup-ish optimizations. EP_ScalarOptimizerLate, /// EP_OptimizerLast -- This extension point allows adding passes that /// run after everything else. EP_OptimizerLast, /// EP_EnabledOnOptLevel0 - This extension point allows adding passes that /// should not be disabled by O0 optimization level. The passes will be /// inserted after the inlining pass. EP_EnabledOnOptLevel0, /// EP_Peephole - This extension point allows adding passes that perform /// peephole optimizations similar to the instruction combiner. These passes /// will be inserted after each instance of the instruction combiner pass. EP_Peephole, }; /// The Optimization Level - Specify the basic optimization level. /// 0 = -O0, 1 = -O1, 2 = -O2, 3 = -O3 unsigned OptLevel; /// SizeLevel - How much we're optimizing for size. /// 0 = none, 1 = -Os, 2 = -Oz unsigned SizeLevel; /// LibraryInfo - Specifies information about the runtime library for the /// optimizer. If this is non-null, it is added to both the function and /// per-module pass pipeline. TargetLibraryInfoImpl *LibraryInfo; /// Inliner - Specifies the inliner to use. If this is non-null, it is /// added to the per-module passes. Pass *Inliner; bool DisableTailCalls; bool DisableUnitAtATime; bool DisableUnrollLoops; bool BBVectorize; bool SLPVectorize; bool LoopVectorize; bool RerollLoops; bool LoadCombine; bool DisableGVNLoadPRE; bool VerifyInput; bool VerifyOutput; bool MergeFunctions; bool PrepareForLTO; bool HLSLHighLevel = false; // HLSL Change bool HLSLAllowPreserveValues = false; // HLSL Change bool HLSLOnlyWarnOnUnrollFail = false; // HLSL Change hlsl::HLSLExtensionsCodegenHelper *HLSLExtensionsCodeGen = nullptr; // HLSL Change bool HLSLResMayAlias = false; // HLSL Change unsigned ScanLimit = 0; // HLSL Change bool EnableGVN = true; // HLSL Change bool StructurizeLoopExitsForUnroll = false; // HLSL Change bool HLSLEnableAggressiveReassociation = true; // HLSL Change bool HLSLEnableLifetimeMarkers = false; // HLSL Change bool HLSLEnablePartialLifetimeMarkers = false; // HLSL Change bool HLSLEnableDebugNops = false; // HLSL Change bool HLSLEarlyInlining = true; // HLSL Change bool HLSLNoSink = false; // HLSL Change void addHLSLPasses(legacy::PassManagerBase &MPM); // HLSL Change private: /// ExtensionList - This is list of all of the extensions that are registered. std::vector<std::pair<ExtensionPointTy, ExtensionFn> > Extensions; public: PassManagerBuilder(); ~PassManagerBuilder(); /// Adds an extension that will be used by all PassManagerBuilder instances. /// This is intended to be used by plugins, to register a set of /// optimisations to run automatically. static void addGlobalExtension(ExtensionPointTy Ty, ExtensionFn Fn); void addExtension(ExtensionPointTy Ty, ExtensionFn Fn); private: void addExtensionsToPM(ExtensionPointTy ETy, legacy::PassManagerBase &PM) const; void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const; void addLTOOptimizationPasses(legacy::PassManagerBase &PM); void addLateLTOOptimizationPasses(legacy::PassManagerBase &PM); public: /// populateFunctionPassManager - This fills in the function pass manager, /// which is expected to be run on each function immediately as it is /// generated. The idea is to reduce the size of the IR in memory. void populateFunctionPassManager(legacy::FunctionPassManager &FPM); /// populateModulePassManager - This sets up the primary pass manager. void populateModulePassManager(legacy::PassManagerBase &MPM); void populateLTOPassManager(legacy::PassManagerBase &PM); }; /// Registers a function for adding a standard set of passes. This should be /// used by optimizer plugins to allow all front ends to transparently use /// them. Create a static instance of this class in your plugin, providing a /// private function that the PassManagerBuilder can use to add your passes. struct RegisterStandardPasses { RegisterStandardPasses(PassManagerBuilder::ExtensionPointTy Ty, PassManagerBuilder::ExtensionFn Fn) { PassManagerBuilder::addGlobalExtension(Ty, Fn); } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/IPO/InlinerPass.h
//===- InlinerPass.h - Code common to all inliners --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a simple policy-based bottom-up inliner. This file // implements all of the boring mechanics of the bottom-up inlining, while the // subclass determines WHAT to inline, which is the much more interesting // component. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_IPO_INLINERPASS_H #define LLVM_TRANSFORMS_IPO_INLINERPASS_H #include "llvm/Analysis/CallGraphSCCPass.h" namespace llvm { class CallSite; class DataLayout; class InlineCost; template<class PtrType, unsigned SmallSize> class SmallPtrSet; /// Inliner - This class contains all of the helper code which is used to /// perform the inlining operations that do not depend on the policy. /// struct Inliner : public CallGraphSCCPass { explicit Inliner(char &ID); explicit Inliner(char &ID, int Threshold, bool InsertLifetime); /// getAnalysisUsage - For this class, we declare that we require and preserve /// the call graph. If the derived class implements this method, it should /// always explicitly call the implementation here. void getAnalysisUsage(AnalysisUsage &Info) const override; // Main run interface method, this implements the interface required by the // Pass class. bool runOnSCC(CallGraphSCC &SCC) override; using llvm::Pass::doFinalization; // doFinalization - Remove now-dead linkonce functions at the end of // processing to avoid breaking the SCC traversal. bool doFinalization(CallGraph &CG) override; /// This method returns the value specified by the -inline-threshold value, /// specified on the command line. This is typically not directly needed. /// unsigned getInlineThreshold() const { return InlineThreshold; } /// Calculate the inline threshold for given Caller. This threshold is lower /// if the caller is marked with OptimizeForSize and -inline-threshold is not /// given on the comand line. It is higher if the callee is marked with the /// inlinehint attribute. /// unsigned getInlineThreshold(CallSite CS) const; /// getInlineCost - This method must be implemented by the subclass to /// determine the cost of inlining the specified call site. If the cost /// returned is greater than the current inline threshold, the call site is /// not inlined. /// virtual InlineCost getInlineCost(CallSite CS) = 0; /// removeDeadFunctions - Remove dead functions. /// /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag /// which restricts it to deleting functions with an 'AlwaysInline' /// attribute. This is useful for the InlineAlways pass that only wants to /// deal with that subset of the functions. bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false); // HLSL Change Starts void applyOptions(PassOptions O) override; void dumpConfig(raw_ostream &OS) override; // HLSL Change Ends private: // InlineThreshold - Cache the value here for easy access. unsigned InlineThreshold; // InsertLifetime - Insert @llvm.lifetime intrinsics. bool InsertLifetime; /// shouldInline - Return true if the inliner should attempt to /// inline at the given CallSite. bool shouldInline(CallSite CS); }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
//===- InstCombineWorklist.h - Worklist for InstCombine pass ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H #define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Instruction.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #define DEBUG_TYPE "instcombine" namespace llvm { /// InstCombineWorklist - This is the worklist management logic for /// InstCombine. class InstCombineWorklist { SmallVector<Instruction*, 256> Worklist; DenseMap<Instruction*, unsigned> WorklistMap; void operator=(const InstCombineWorklist&RHS) = delete; InstCombineWorklist(const InstCombineWorklist&) = delete; public: InstCombineWorklist() {} InstCombineWorklist(InstCombineWorklist &&Arg) : Worklist(std::move(Arg.Worklist)), WorklistMap(std::move(Arg.WorklistMap)) {} InstCombineWorklist &operator=(InstCombineWorklist &&RHS) { Worklist = std::move(RHS.Worklist); WorklistMap = std::move(RHS.WorklistMap); return *this; } bool isEmpty() const { return Worklist.empty(); } /// Add - Add the specified instruction to the worklist if it isn't already /// in it. void Add(Instruction *I) { if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) { DEBUG(dbgs() << "IC: ADD: " << *I << '\n'); Worklist.push_back(I); } } void AddValue(Value *V) { if (Instruction *I = dyn_cast<Instruction>(V)) Add(I); } /// AddInitialGroup - Add the specified batch of stuff in reverse order. /// which should only be done when the worklist is empty and when the group /// has no duplicates. void AddInitialGroup(Instruction *const *List, unsigned NumEntries) { assert(Worklist.empty() && "Worklist must be empty to add initial group"); Worklist.reserve(NumEntries+16); WorklistMap.resize(NumEntries); DEBUG(dbgs() << "IC: ADDING: " << NumEntries << " instrs to worklist\n"); for (unsigned Idx = 0; NumEntries; --NumEntries) { Instruction *I = List[NumEntries-1]; WorklistMap.insert(std::make_pair(I, Idx++)); Worklist.push_back(I); } } // Remove - remove I from the worklist if it exists. void Remove(Instruction *I) { DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I); if (It == WorklistMap.end()) return; // Not in worklist. // Don't bother moving everything down, just null out the slot. Worklist[It->second] = nullptr; WorklistMap.erase(It); } Instruction *RemoveOne() { Instruction *I = Worklist.pop_back_val(); WorklistMap.erase(I); return I; } /// AddUsersToWorkList - When an instruction is simplified, add all users of /// the instruction to the work lists because they might get more simplified /// now. /// void AddUsersToWorkList(Instruction &I) { for (User *U : I.users()) Add(cast<Instruction>(U)); } /// Zap - check that the worklist is empty and nuke the backing store for /// the map if it is large. void Zap() { assert(WorklistMap.empty() && "Worklist empty, but map not?"); // Do an explicit clear, this shrinks the map if needed. WorklistMap.clear(); } }; } // end namespace llvm. #undef DEBUG_TYPE #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/InstCombine/InstCombine.h
//===- InstCombine.h - InstCombine pass -------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This file provides the primary interface to the instcombine pass. This pass /// is suitable for use in the new pass manager. For a pass that works with the /// legacy pass manager, please look for \c createInstructionCombiningPass() in /// Scalar.h. /// //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H #define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H #include "llvm/IR/Function.h" #include "llvm/IR/PassManager.h" #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" namespace llvm { class InstCombinePass { InstCombineWorklist Worklist; public: static StringRef name() { return "InstCombinePass"; } // Explicitly define constructors for MSVC. InstCombinePass() {} InstCombinePass(InstCombinePass &&Arg) : Worklist(std::move(Arg.Worklist)) {} InstCombinePass &operator=(InstCombinePass &&RHS) { Worklist = std::move(RHS.Worklist); return *this; } PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM); }; } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This pass is used to ensure that functions have at most one return and one // unwind instruction in them. Additionally, it keeps track of which node is // the new exit node of the CFG. If there are no return or unwind instructions // in the function, the getReturnBlock/getUnwindBlock methods will return a null // pointer. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H #define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H #include "llvm/Pass.h" namespace llvm { struct UnifyFunctionExitNodes : public FunctionPass { BasicBlock *ReturnBlock, *UnwindBlock, *UnreachableBlock; public: static char ID; // Pass identification, replacement for typeid UnifyFunctionExitNodes() : FunctionPass(ID), ReturnBlock(nullptr), UnwindBlock(nullptr) { initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry()); } // We can preserve non-critical-edgeness when we unify function exit nodes void getAnalysisUsage(AnalysisUsage &AU) const override; // getReturn|Unwind|UnreachableBlock - Return the new single (or nonexistent) // return, unwind, or unreachable basic blocks in the CFG. // BasicBlock *getReturnBlock() const { return ReturnBlock; } BasicBlock *getUnwindBlock() const { return UnwindBlock; } BasicBlock *getUnreachableBlock() const { return UnreachableBlock; } bool runOnFunction(Function &F) override; }; Pass *createUnifyFunctionExitNodesPass(); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/ModuleUtils.h
//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This family of functions perform manipulations on Modules. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H #define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H #include "llvm/ADT/ArrayRef.h" #include <utility> // for std::pair namespace llvm { class Module; class Function; class GlobalValue; class GlobalVariable; class Constant; class StringRef; class Value; class Type; template <class PtrType> class SmallPtrSetImpl; /// Append F to the list of global ctors of module M with the given Priority. /// This wraps the function in the appropriate structure and stores it along /// side other global constructors. For details see /// http://llvm.org/docs/LangRef.html#intg_global_ctors void appendToGlobalCtors(Module &M, Function *F, int Priority); /// Same as appendToGlobalCtors(), but for global dtors. void appendToGlobalDtors(Module &M, Function *F, int Priority); /// \brief Given "llvm.used" or "llvm.compiler.used" as a global name, collect /// the initializer elements of that global in Set and return the global itself. GlobalVariable *collectUsedGlobalVariables(Module &M, SmallPtrSetImpl<GlobalValue *> &Set, bool CompilerUsed); // Validate the result of Module::getOrInsertFunction called for an interface // function of given sanitizer. If the instrumented module defines a function // with the same name, their prototypes must match, otherwise // getOrInsertFunction returns a bitcast. Function *checkSanitizerInterfaceFunction(Constant *FuncOrBitcast); /// \brief Creates sanitizer constructor function, and calls sanitizer's init /// function from it. /// \return Returns pair of pointers to constructor, and init functions /// respectively. std::pair<Function *, Function *> createSanitizerCtorAndInitFunctions( Module &M, StringRef CtorName, StringRef InitName, ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs); } // End llvm namespace #endif // LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/GlobalStatus.h
//===- GlobalStatus.h - Compute status info for globals ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H #define LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H #include "llvm/IR/Instructions.h" namespace llvm { class Value; class Function; /// It is safe to destroy a constant iff it is only used by constants itself. /// Note that constants cannot be cyclic, so this test is pretty easy to /// implement recursively. /// bool isSafeToDestroyConstant(const Constant *C); /// As we analyze each global, keep track of some information about it. If we /// find out that the address of the global is taken, none of this info will be /// accurate. struct GlobalStatus { /// True if the global's address is used in a comparison. bool IsCompared; /// True if the global is ever loaded. If the global isn't ever loaded it /// can be deleted. bool IsLoaded; /// Keep track of what stores to the global look like. enum StoredType { /// There is no store to this global. It can thus be marked constant. NotStored, /// This global is stored to, but the only thing stored is the constant it /// was initialized with. This is only tracked for scalar globals. InitializerStored, /// This global is stored to, but only its initializer and one other value /// is ever stored to it. If this global isStoredOnce, we track the value /// stored to it in StoredOnceValue below. This is only tracked for scalar /// globals. StoredOnce, /// This global is stored to by multiple values or something else that we /// cannot track. Stored } StoredType; /// If only one value (besides the initializer constant) is ever stored to /// this global, keep track of what value it is. Value *StoredOnceValue; /// These start out null/false. When the first accessing function is noticed, /// it is recorded. When a second different accessing function is noticed, /// HasMultipleAccessingFunctions is set to true. const Function *AccessingFunction; bool HasMultipleAccessingFunctions; /// Set to true if this global has a user that is not an instruction (e.g. a /// constant expr or GV initializer). bool HasNonInstructionUser; /// Set to the strongest atomic ordering requirement. AtomicOrdering Ordering; /// Look at all uses of the global and fill in the GlobalStatus structure. If /// the global has its address taken, return true to indicate we can't do /// anything with it. static bool analyzeGlobal(const Value *V, GlobalStatus &GS); GlobalStatus(); }; } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/LoopUtils.h
//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -*- C++ -*-=========// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines some loop transformation utilities. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H #define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" namespace llvm { class AliasAnalysis; class AliasSet; class AliasSetTracker; class AssumptionCache; class BasicBlock; class DataLayout; class DominatorTree; class Loop; class LoopInfo; class Pass; class PredIteratorCache; class ScalarEvolution; class TargetLibraryInfo; /// \brief Captures loop safety information. /// It keep information for loop & its header may throw exception. struct LICMSafetyInfo { bool MayThrow; // The current loop contains an instruction which // may throw. bool HeaderMayThrow; // Same as previous, but specific to loop header LICMSafetyInfo() : MayThrow(false), HeaderMayThrow(false) {} }; /// The RecurrenceDescriptor is used to identify recurrences variables in a /// loop. Reduction is a special case of recurrence that has uses of the /// recurrence variable outside the loop. The method isReductionPHI identifies /// reductions that are basic recurrences. /// /// Basic recurrences are defined as the summation, product, OR, AND, XOR, min, /// or max of a set of terms. For example: for(i=0; i<n; i++) { total += /// array[i]; } is a summation of array elements. Basic recurrences are a /// special case of chains of recurrences (CR). See ScalarEvolution for CR /// references. /// This struct holds information about recurrence variables. class RecurrenceDescriptor { public: /// This enum represents the kinds of recurrences that we support. enum RecurrenceKind { RK_NoRecurrence, ///< Not a recurrence. RK_IntegerAdd, ///< Sum of integers. RK_IntegerMult, ///< Product of integers. RK_IntegerOr, ///< Bitwise or logical OR of numbers. RK_IntegerAnd, ///< Bitwise or logical AND of numbers. RK_IntegerXor, ///< Bitwise or logical XOR of numbers. RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()). RK_FloatAdd, ///< Sum of floats. RK_FloatMult, ///< Product of floats. RK_FloatMinMax ///< Min/max implemented in terms of select(cmp()). }; // This enum represents the kind of minmax recurrence. enum MinMaxRecurrenceKind { MRK_Invalid, MRK_UIntMin, MRK_UIntMax, MRK_SIntMin, MRK_SIntMax, MRK_FloatMin, MRK_FloatMax }; RecurrenceDescriptor() : StartValue(nullptr), LoopExitInstr(nullptr), Kind(RK_NoRecurrence), MinMaxKind(MRK_Invalid) {} RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurrenceKind K, MinMaxRecurrenceKind MK) : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK) {} /// This POD struct holds information about a potential recurrence operation. class InstDesc { public: InstDesc(bool IsRecur, Instruction *I) : IsRecurrence(IsRecur), PatternLastInst(I), MinMaxKind(MRK_Invalid) {} InstDesc(Instruction *I, MinMaxRecurrenceKind K) : IsRecurrence(true), PatternLastInst(I), MinMaxKind(K) {} bool isRecurrence() { return IsRecurrence; } MinMaxRecurrenceKind getMinMaxKind() { return MinMaxKind; } Instruction *getPatternInst() { return PatternLastInst; } private: // Is this instruction a recurrence candidate. bool IsRecurrence; // The last instruction in a min/max pattern (select of the select(icmp()) // pattern), or the current recurrence instruction otherwise. Instruction *PatternLastInst; // If this is a min/max pattern the comparison predicate. MinMaxRecurrenceKind MinMaxKind; }; /// Returns a struct describing if the instruction 'I' can be a recurrence /// variable of type 'Kind'. If the recurrence is a min/max pattern of /// select(icmp()) this function advances the instruction pointer 'I' from the /// compare instruction to the select instruction and stores this pointer in /// 'PatternLastInst' member of the returned struct. static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind, InstDesc &Prev, bool HasFunNoNaNAttr); /// Returns true if instuction I has multiple uses in Insts static bool hasMultipleUsesOf(Instruction *I, SmallPtrSetImpl<Instruction *> &Insts); /// Returns true if all uses of the instruction I is within the Set. static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set); /// Returns a struct describing if the instruction if the instruction is a /// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y) /// or max(X, Y). static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev); /// Returns identity corresponding to the RecurrenceKind. static Constant *getRecurrenceIdentity(RecurrenceKind K, Type *Tp); /// Returns the opcode of binary operation corresponding to the /// RecurrenceKind. static unsigned getRecurrenceBinOp(RecurrenceKind Kind); /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind. static Value *createMinMaxOp(IRBuilder<> &Builder, MinMaxRecurrenceKind RK, Value *Left, Value *Right); /// Returns true if Phi is a reduction of type Kind and adds it to the /// RecurrenceDescriptor. static bool AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop, bool HasFunNoNaNAttr, RecurrenceDescriptor &RedDes); /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor is /// returned in RedDes. static bool isReductionPHI(PHINode *Phi, Loop *TheLoop, RecurrenceDescriptor &RedDes); RecurrenceKind getRecurrenceKind() { return Kind; } MinMaxRecurrenceKind getMinMaxRecurrenceKind() { return MinMaxKind; } TrackingVH<Value> getRecurrenceStartValue() { return StartValue; } Instruction *getLoopExitInstr() { return LoopExitInstr; } private: // The starting value of the recurrence. // It does not have to be zero! TrackingVH<Value> StartValue; // The instruction who's value is used outside the loop. Instruction *LoopExitInstr; // The kind of the recurrence. RecurrenceKind Kind; // If this a min/max recurrence the kind of recurrence. MinMaxRecurrenceKind MinMaxKind; }; BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P); /// \brief Simplify each loop in a loop nest recursively. /// /// This takes a potentially un-simplified loop L (and its children) and turns /// it into a simplified loop nest with preheaders and single backedges. It /// will optionally update \c AliasAnalysis and \c ScalarEvolution analyses if /// passed into it. bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP, AliasAnalysis *AA = nullptr, ScalarEvolution *SE = nullptr, AssumptionCache *AC = nullptr); /// \brief Put loop into LCSSA form. /// /// Looks at all instructions in the loop which have uses outside of the /// current loop. For each, an LCSSA PHI node is inserted and the uses outside /// the loop are rewritten to use this node. /// /// LoopInfo and DominatorTree are required and preserved. /// /// If ScalarEvolution is passed in, it will be preserved. /// /// Returns true if any modifications are made to the loop. bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution *SE = nullptr); /// \brief Put a loop nest into LCSSA form. /// /// This recursively forms LCSSA for a loop nest. /// /// LoopInfo and DominatorTree are required and preserved. /// /// If ScalarEvolution is passed in, it will be preserved. /// /// Returns true if any modifications are made to the loop. bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI, ScalarEvolution *SE = nullptr); /// \brief Walk the specified region of the CFG (defined by all blocks /// dominated by the specified block, and that are in the current loop) in /// reverse depth first order w.r.t the DominatorTree. This allows us to visit /// uses before definitions, allowing us to sink a loop body in one pass without /// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, /// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all /// instructions of the loop and loop safety information as arguments. /// It returns changed status. bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, Loop *, AliasSetTracker *, LICMSafetyInfo *); /// \brief Walk the specified region of the CFG (defined by all blocks /// dominated by the specified block, and that are in the current loop) in depth /// first order w.r.t the DominatorTree. This allows us to visit definitions /// before uses, allowing us to hoist a loop body in one pass without iteration. /// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout, /// TargetLibraryInfo, Loop, AliasSet information for all instructions of the /// loop and loop safety information as arguments. It returns changed status. bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *, TargetLibraryInfo *, Loop *, AliasSetTracker *, LICMSafetyInfo *); /// \brief Try to promote memory values to scalars by sinking stores out of /// the loop and moving loads to before the loop. We do this by looping over /// the stores in the loop, looking for stores to Must pointers which are /// loop invariant. It takes AliasSet, Loop exit blocks vector, loop exit blocks /// insertion point vector, PredIteratorCache, LoopInfo, DominatorTree, Loop, /// AliasSet information for all instructions of the loop and loop safety /// information as arguments. It returns changed status. bool promoteLoopAccessesToScalars(AliasSet &, SmallVectorImpl<BasicBlock*> &, SmallVectorImpl<Instruction*> &, PredIteratorCache &, LoopInfo *, DominatorTree *, Loop *, AliasSetTracker *, LICMSafetyInfo *); /// \brief Computes safety information for a loop /// checks loop body & header for the possiblity of may throw /// exception, it takes LICMSafetyInfo and loop as argument. /// Updates safety information in LICMSafetyInfo argument. void computeLICMSafetyInfo(LICMSafetyInfo *, Loop *); /// \brief Checks if the given PHINode in a loop header is an induction /// variable. Returns true if this is an induction PHI along with the step /// value. bool isInductionPHI(PHINode *, ScalarEvolution *, ConstantInt *&); } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/SimplifyIndVar.h
//===-- llvm/Transforms/Utils/SimplifyIndVar.h - Indvar Utils ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines in interface for induction variable simplification. It does // not define any actual pass or policy, but provides a single function to // simplify a loop's induction variables based on ScalarEvolution. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H #define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H #include "llvm/IR/ValueHandle.h" #include "llvm/Support/CommandLine.h" namespace llvm { class CastInst; class DominatorTree; class IVUsers; class Loop; class LPPassManager; class PHINode; class ScalarEvolution; /// Interface for visiting interesting IV users that are recognized but not /// simplified by this utility. class IVVisitor { protected: const DominatorTree *DT; bool ShouldSplitOverflowIntrinsics; virtual void anchor(); public: IVVisitor(): DT(nullptr), ShouldSplitOverflowIntrinsics(false) {} virtual ~IVVisitor() {} const DominatorTree *getDomTree() const { return DT; } bool shouldSplitOverflowInstrinsics() const { return ShouldSplitOverflowIntrinsics; } void setSplitOverflowIntrinsics() { ShouldSplitOverflowIntrinsics = true; assert(DT && "Splitting overflow intrinsics requires a DomTree."); } virtual void visitCast(CastInst *Cast) = 0; }; /// simplifyUsersOfIV - Simplify instructions that use this induction variable /// by using ScalarEvolution to analyze the IV's recurrence. bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, LPPassManager *LPM, SmallVectorImpl<WeakTrackingVH> &Dead, IVVisitor *V = nullptr); /// SimplifyLoopIVs - Simplify users of induction variables within this /// loop. This does not actually change or add IVs. bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, LPPassManager *LPM, SmallVectorImpl<WeakTrackingVH> &Dead); } // namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/UnrollLoop.h
//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines some loop unrolling utilities. It does not define any // actual pass or policy, but provides a single function to perform loop // unrolling. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H #define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H #include "llvm/ADT/StringRef.h" namespace llvm { class AssumptionCache; class Loop; class LoopInfo; class LPPassManager; class MDNode; class Pass; bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool AllowRuntime, bool AllowExpensiveTripCount, unsigned TripMultiple, LoopInfo *LI, Pass *PP, LPPassManager *LPM, AssumptionCache *AC); bool UnrollRuntimeLoopProlog(Loop *L, unsigned Count, bool AllowExpensiveTripCount, LoopInfo *LI, LPPassManager *LPM); MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name); } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/SimplifyLibCalls.h
//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file exposes an interface to build some C language libcalls for // optimization passes that need to call the various functions. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H #define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/IRBuilder.h" namespace llvm { class Value; class CallInst; class DataLayout; class Instruction; class TargetLibraryInfo; class BasicBlock; class Function; /// \brief This class implements simplifications for calls to fortified library /// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to, /// when possible, replace them with their non-checking counterparts. /// Other optimizations can also be done, but it's possible to disable them and /// only simplify needless use of the checking versions (when the object size /// is unknown) by passing true for OnlyLowerUnknownSize. class FortifiedLibCallSimplifier { private: const TargetLibraryInfo *TLI; bool OnlyLowerUnknownSize; public: FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI, bool OnlyLowerUnknownSize = false); /// \brief Take the given call instruction and return a more /// optimal value to replace the instruction with or 0 if a more /// optimal form can't be found. /// The call must not be an indirect call. Value *optimizeCall(CallInst *CI); private: Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B); Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B); Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B); // Str/Stp cpy are similar enough to be handled in the same functions. Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func); Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func); /// \brief Checks whether the call \p CI to a fortified libcall is foldable /// to the non-fortified version. bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp, unsigned SizeOp, bool isString); }; /// LibCallSimplifier - This class implements a collection of optimizations /// that replace well formed calls to library functions with a more optimal /// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'. class LibCallSimplifier { private: FortifiedLibCallSimplifier FortifiedSimplifier; const DataLayout &DL; const TargetLibraryInfo *TLI; bool UnsafeFPShrink; function_ref<void(Instruction *, Value *)> Replacer; /// \brief Internal wrapper for RAUW that is the default implementation. /// /// Other users may provide an alternate function with this signature instead /// of this one. static void replaceAllUsesWithDefault(Instruction *I, Value *With); /// \brief Replace an instruction's uses with a value using our replacer. void replaceAllUsesWith(Instruction *I, Value *With); public: LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI, function_ref<void(Instruction *, Value *)> Replacer = &replaceAllUsesWithDefault); /// optimizeCall - Take the given call instruction and return a more /// optimal value to replace the instruction with or 0 if a more /// optimal form can't be found. Note that the returned value may /// be equal to the instruction being optimized. In this case all /// other instructions that use the given instruction were modified /// and the given instruction is dead. /// The call must not be an indirect call. Value *optimizeCall(CallInst *CI); private: // String and Memory Library Call Optimizations Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B); Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B); Value *optimizeStrChr(CallInst *CI, IRBuilder<> &B); Value *optimizeStrRChr(CallInst *CI, IRBuilder<> &B); Value *optimizeStrCmp(CallInst *CI, IRBuilder<> &B); Value *optimizeStrNCmp(CallInst *CI, IRBuilder<> &B); Value *optimizeStrCpy(CallInst *CI, IRBuilder<> &B); Value *optimizeStpCpy(CallInst *CI, IRBuilder<> &B); Value *optimizeStrNCpy(CallInst *CI, IRBuilder<> &B); Value *optimizeStrLen(CallInst *CI, IRBuilder<> &B); Value *optimizeStrPBrk(CallInst *CI, IRBuilder<> &B); Value *optimizeStrTo(CallInst *CI, IRBuilder<> &B); Value *optimizeStrSpn(CallInst *CI, IRBuilder<> &B); Value *optimizeStrCSpn(CallInst *CI, IRBuilder<> &B); Value *optimizeStrStr(CallInst *CI, IRBuilder<> &B); Value *optimizeMemChr(CallInst *CI, IRBuilder<> &B); Value *optimizeMemCmp(CallInst *CI, IRBuilder<> &B); Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B); Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B); Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B); // Wrapper for all String/Memory Library Call Optimizations Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B); // Math Library Optimizations Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B, bool CheckRetType); Value *optimizeBinaryDoubleFP(CallInst *CI, IRBuilder<> &B); Value *optimizeCos(CallInst *CI, IRBuilder<> &B); Value *optimizePow(CallInst *CI, IRBuilder<> &B); Value *optimizeExp2(CallInst *CI, IRBuilder<> &B); Value *optimizeFabs(CallInst *CI, IRBuilder<> &B); Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B); Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B); // Integer Library Call Optimizations Value *optimizeFFS(CallInst *CI, IRBuilder<> &B); Value *optimizeAbs(CallInst *CI, IRBuilder<> &B); Value *optimizeIsDigit(CallInst *CI, IRBuilder<> &B); Value *optimizeIsAscii(CallInst *CI, IRBuilder<> &B); Value *optimizeToAscii(CallInst *CI, IRBuilder<> &B); // Formatting and IO Library Call Optimizations Value *optimizeErrorReporting(CallInst *CI, IRBuilder<> &B, int StreamArg = -1); Value *optimizePrintF(CallInst *CI, IRBuilder<> &B); Value *optimizeSPrintF(CallInst *CI, IRBuilder<> &B); Value *optimizeFPrintF(CallInst *CI, IRBuilder<> &B); Value *optimizeFWrite(CallInst *CI, IRBuilder<> &B); Value *optimizeFPuts(CallInst *CI, IRBuilder<> &B); Value *optimizePuts(CallInst *CI, IRBuilder<> &B); // Helper methods Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B); void classifyArgUse(Value *Val, BasicBlock *BB, bool IsFloat, SmallVectorImpl<CallInst *> &SinCalls, SmallVectorImpl<CallInst *> &CosCalls, SmallVectorImpl<CallInst *> &SinCosCalls); void replaceTrigInsts(SmallVectorImpl<CallInst *> &Calls, Value *Res); Value *optimizePrintFString(CallInst *CI, IRBuilder<> &B); Value *optimizeSPrintFString(CallInst *CI, IRBuilder<> &B); Value *optimizeFPrintFString(CallInst *CI, IRBuilder<> &B); /// hasFloatVersion - Checks if there is a float version of the specified /// function by checking for an existing function with name FuncName + f bool hasFloatVersion(StringRef FuncName); }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
//===-- SSAUpdaterImpl.h - SSA Updater Implementation -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides a template that implements the core algorithm for the // SSAUpdater and MachineSSAUpdater. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H #define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Debug.h" namespace llvm { #define DEBUG_TYPE "ssaupdater" class CastInst; class PHINode; template<typename T> class SSAUpdaterTraits; template<typename UpdaterT> class SSAUpdaterImpl { private: UpdaterT *Updater; typedef SSAUpdaterTraits<UpdaterT> Traits; typedef typename Traits::BlkT BlkT; typedef typename Traits::ValT ValT; typedef typename Traits::PhiT PhiT; /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl. /// The predecessors of each block are cached here since pred_iterator is /// slow and we need to iterate over the blocks at least a few times. class BBInfo { public: BlkT *BB; // Back-pointer to the corresponding block. ValT AvailableVal; // Value to use in this block. BBInfo *DefBB; // Block that defines the available value. int BlkNum; // Postorder number. BBInfo *IDom; // Immediate dominator. unsigned NumPreds; // Number of predecessor blocks. BBInfo **Preds; // Array[NumPreds] of predecessor blocks. PhiT *PHITag; // Marker for existing PHIs that match. BBInfo(BlkT *ThisBB, ValT V) : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr), BlkNum(0), IDom(nullptr), NumPreds(0), Preds(nullptr), PHITag(nullptr) {} }; typedef DenseMap<BlkT*, ValT> AvailableValsTy; AvailableValsTy *AvailableVals; SmallVectorImpl<PhiT*> *InsertedPHIs; typedef SmallVectorImpl<BBInfo*> BlockListTy; typedef DenseMap<BlkT*, BBInfo*> BBMapTy; BBMapTy BBMap; BumpPtrAllocator Allocator; public: explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A, SmallVectorImpl<PhiT*> *Ins) : Updater(U), AvailableVals(A), InsertedPHIs(Ins) { } /// GetValue - Check to see if AvailableVals has an entry for the specified /// BB and if so, return it. If not, construct SSA form by first /// calculating the required placement of PHIs and then inserting new PHIs /// where needed. ValT GetValue(BlkT *BB) { SmallVector<BBInfo*, 100> BlockList; BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList); // Special case: bail out if BB is unreachable. if (BlockList.size() == 0) { ValT V = Traits::GetUndefVal(BB, Updater); (*AvailableVals)[BB] = V; return V; } FindDominators(&BlockList, PseudoEntry); FindPHIPlacement(&BlockList); FindAvailableVals(&BlockList); return BBMap[BB]->DefBB->AvailableVal; } /// BuildBlockList - Starting from the specified basic block, traverse back /// through its predecessors until reaching blocks with known values. /// Create BBInfo structures for the blocks and append them to the block /// list. BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) { SmallVector<BBInfo*, 10> RootList; SmallVector<BBInfo*, 64> WorkList; BBInfo *Info = new (Allocator) BBInfo(BB, 0); BBMap[BB] = Info; WorkList.push_back(Info); // Search backward from BB, creating BBInfos along the way and stopping // when reaching blocks that define the value. Record those defining // blocks on the RootList. SmallVector<BlkT*, 10> Preds; while (!WorkList.empty()) { Info = WorkList.pop_back_val(); Preds.clear(); Traits::FindPredecessorBlocks(Info->BB, &Preds); Info->NumPreds = Preds.size(); if (Info->NumPreds == 0) Info->Preds = nullptr; else Info->Preds = static_cast<BBInfo**> (Allocator.Allocate(Info->NumPreds * sizeof(BBInfo*), AlignOf<BBInfo*>::Alignment)); for (unsigned p = 0; p != Info->NumPreds; ++p) { BlkT *Pred = Preds[p]; // Check if BBMap already has a BBInfo for the predecessor block. typename BBMapTy::value_type &BBMapBucket = BBMap.FindAndConstruct(Pred); if (BBMapBucket.second) { Info->Preds[p] = BBMapBucket.second; continue; } // Create a new BBInfo for the predecessor. ValT PredVal = AvailableVals->lookup(Pred); BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal); BBMapBucket.second = PredInfo; Info->Preds[p] = PredInfo; if (PredInfo->AvailableVal) { RootList.push_back(PredInfo); continue; } WorkList.push_back(PredInfo); } } // Now that we know what blocks are backwards-reachable from the starting // block, do a forward depth-first traversal to assign postorder numbers // to those blocks. BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0); unsigned BlkNum = 1; // Initialize the worklist with the roots from the backward traversal. while (!RootList.empty()) { Info = RootList.pop_back_val(); Info->IDom = PseudoEntry; Info->BlkNum = -1; WorkList.push_back(Info); } while (!WorkList.empty()) { Info = WorkList.back(); if (Info->BlkNum == -2) { // All the successors have been handled; assign the postorder number. Info->BlkNum = BlkNum++; // If not a root, put it on the BlockList. if (!Info->AvailableVal) BlockList->push_back(Info); WorkList.pop_back(); continue; } // Leave this entry on the worklist, but set its BlkNum to mark that its // successors have been put on the worklist. When it returns to the top // the list, after handling its successors, it will be assigned a // number. Info->BlkNum = -2; // Add unvisited successors to the work list. for (typename Traits::BlkSucc_iterator SI = Traits::BlkSucc_begin(Info->BB), E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) { BBInfo *SuccInfo = BBMap[*SI]; if (!SuccInfo || SuccInfo->BlkNum) continue; SuccInfo->BlkNum = -1; WorkList.push_back(SuccInfo); } } PseudoEntry->BlkNum = BlkNum; return PseudoEntry; } /// IntersectDominators - This is the dataflow lattice "meet" operation for /// finding dominators. Given two basic blocks, it walks up the dominator /// tree until it finds a common dominator of both. It uses the postorder /// number of the blocks to determine how to do that. BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) { while (Blk1 != Blk2) { while (Blk1->BlkNum < Blk2->BlkNum) { Blk1 = Blk1->IDom; if (!Blk1) return Blk2; } while (Blk2->BlkNum < Blk1->BlkNum) { Blk2 = Blk2->IDom; if (!Blk2) return Blk1; } } return Blk1; } /// FindDominators - Calculate the dominator tree for the subset of the CFG /// corresponding to the basic blocks on the BlockList. This uses the /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey /// and Kennedy, published in Software--Practice and Experience, 2001, /// 4:1-10. Because the CFG subset does not include any edges leading into /// blocks that define the value, the results are not the usual dominator /// tree. The CFG subset has a single pseudo-entry node with edges to a set /// of root nodes for blocks that define the value. The dominators for this /// subset CFG are not the standard dominators but they are adequate for /// placing PHIs within the subset CFG. void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) { bool Changed; do { Changed = false; // Iterate over the list in reverse order, i.e., forward on CFG edges. for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(), E = BlockList->rend(); I != E; ++I) { BBInfo *Info = *I; BBInfo *NewIDom = nullptr; // Iterate through the block's predecessors. for (unsigned p = 0; p != Info->NumPreds; ++p) { BBInfo *Pred = Info->Preds[p]; // Treat an unreachable predecessor as a definition with 'undef'. if (Pred->BlkNum == 0) { Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater); (*AvailableVals)[Pred->BB] = Pred->AvailableVal; Pred->DefBB = Pred; Pred->BlkNum = PseudoEntry->BlkNum; PseudoEntry->BlkNum++; } if (!NewIDom) NewIDom = Pred; else NewIDom = IntersectDominators(NewIDom, Pred); } // Check if the IDom value has changed. if (NewIDom && NewIDom != Info->IDom) { Info->IDom = NewIDom; Changed = true; } } } while (Changed); } /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for /// any blocks containing definitions of the value. If one is found, then /// the successor of Pred is in the dominance frontier for the definition, /// and this function returns true. bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) { for (; Pred != IDom; Pred = Pred->IDom) { if (Pred->DefBB == Pred) return true; } return false; } /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers /// of the known definitions. Iteratively add PHIs in the dom frontiers /// until nothing changes. Along the way, keep track of the nearest /// dominating definitions for non-PHI blocks. void FindPHIPlacement(BlockListTy *BlockList) { bool Changed; do { Changed = false; // Iterate over the list in reverse order, i.e., forward on CFG edges. for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(), E = BlockList->rend(); I != E; ++I) { BBInfo *Info = *I; // If this block already needs a PHI, there is nothing to do here. if (Info->DefBB == Info) continue; // Default to use the same def as the immediate dominator. BBInfo *NewDefBB = Info->IDom->DefBB; for (unsigned p = 0; p != Info->NumPreds; ++p) { if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) { // Need a PHI here. NewDefBB = Info; break; } } // Check if anything changed. if (NewDefBB != Info->DefBB) { Info->DefBB = NewDefBB; Changed = true; } } } while (Changed); } /// FindAvailableVal - If this block requires a PHI, first check if an /// existing PHI matches the PHI placement and reaching definitions computed /// earlier, and if not, create a new PHI. Visit all the block's /// predecessors to calculate the available value for each one and fill in /// the incoming values for a new PHI. void FindAvailableVals(BlockListTy *BlockList) { // Go through the worklist in forward order (i.e., backward through the CFG) // and check if existing PHIs can be used. If not, create empty PHIs where // they are needed. for (typename BlockListTy::iterator I = BlockList->begin(), E = BlockList->end(); I != E; ++I) { BBInfo *Info = *I; // Check if there needs to be a PHI in BB. if (Info->DefBB != Info) continue; // Look for an existing PHI. FindExistingPHI(Info->BB, BlockList); if (Info->AvailableVal) continue; ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater); Info->AvailableVal = PHI; (*AvailableVals)[Info->BB] = PHI; } // Now go back through the worklist in reverse order to fill in the // arguments for any new PHIs added in the forward traversal. for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(), E = BlockList->rend(); I != E; ++I) { BBInfo *Info = *I; if (Info->DefBB != Info) { // Record the available value at join nodes to speed up subsequent // uses of this SSAUpdater for the same value. if (Info->NumPreds > 1) (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal; continue; } // Check if this block contains a newly added PHI. PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater); if (!PHI) continue; // Iterate through the block's predecessors. for (unsigned p = 0; p != Info->NumPreds; ++p) { BBInfo *PredInfo = Info->Preds[p]; BlkT *Pred = PredInfo->BB; // Skip to the nearest preceding definition. if (PredInfo->DefBB != PredInfo) PredInfo = PredInfo->DefBB; Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred); } DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n"); // If the client wants to know about all new instructions, tell it. if (InsertedPHIs) InsertedPHIs->push_back(PHI); } } /// FindExistingPHI - Look through the PHI nodes in a block to see if any of /// them match what is needed. void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) { for (typename BlkT::iterator BBI = BB->begin(), BBE = BB->end(); BBI != BBE; ++BBI) { PhiT *SomePHI = Traits::InstrIsPHI(BBI); if (!SomePHI) break; if (CheckIfPHIMatches(SomePHI)) { RecordMatchingPHIs(BlockList); break; } // Match failed: clear all the PHITag values. for (typename BlockListTy::iterator I = BlockList->begin(), E = BlockList->end(); I != E; ++I) (*I)->PHITag = nullptr; } } /// CheckIfPHIMatches - Check if a PHI node matches the placement and values /// in the BBMap. bool CheckIfPHIMatches(PhiT *PHI) { SmallVector<PhiT*, 20> WorkList; WorkList.push_back(PHI); // Mark that the block containing this PHI has been visited. BBMap[PHI->getParent()]->PHITag = PHI; while (!WorkList.empty()) { PHI = WorkList.pop_back_val(); // Iterate through the PHI's incoming values. for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI), E = Traits::PHI_end(PHI); I != E; ++I) { ValT IncomingVal = I.getIncomingValue(); BBInfo *PredInfo = BBMap[I.getIncomingBlock()]; // Skip to the nearest preceding definition. if (PredInfo->DefBB != PredInfo) PredInfo = PredInfo->DefBB; // Check if it matches the expected value. if (PredInfo->AvailableVal) { if (IncomingVal == PredInfo->AvailableVal) continue; return false; } // Check if the value is a PHI in the correct block. PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater); if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB) return false; // If this block has already been visited, check if this PHI matches. if (PredInfo->PHITag) { if (IncomingPHIVal == PredInfo->PHITag) continue; return false; } PredInfo->PHITag = IncomingPHIVal; WorkList.push_back(IncomingPHIVal); } } return true; } /// RecordMatchingPHIs - For each PHI node that matches, record it in both /// the BBMap and the AvailableVals mapping. void RecordMatchingPHIs(BlockListTy *BlockList) { for (typename BlockListTy::iterator I = BlockList->begin(), E = BlockList->end(); I != E; ++I) if (PhiT *PHI = (*I)->PHITag) { BlkT *BB = PHI->getParent(); ValT PHIVal = Traits::GetPHIValue(PHI); (*AvailableVals)[BB] = PHIVal; BBMap[BB]->AvailableVal = PHIVal; } } }; #undef DEBUG_TYPE // "ssaupdater" } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/PromoteMemToReg.h
//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file exposes an interface to promote alloca instructions to SSA // registers, by using the SSA construction algorithm. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H #define LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H #include "llvm/ADT/ArrayRef.h" namespace llvm { class AllocaInst; class DominatorTree; class AliasSetTracker; class AssumptionCache; /// \brief Return true if this alloca is legal for promotion. /// /// This is true if there are only loads, stores, and lifetime markers /// (transitively) using this alloca. This also enforces that there is only /// ever one layer of bitcasts or GEPs between the alloca and the lifetime /// markers. bool isAllocaPromotable(const AllocaInst *AI); /// \brief Promote the specified list of alloca instructions into scalar /// registers, inserting PHI nodes as appropriate. /// /// This function makes use of DominanceFrontier information. This function /// does not modify the CFG of the function at all. All allocas must be from /// the same function. /// /// If AST is specified, the specified tracker is updated to reflect changes /// made to the IR. void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, AliasSetTracker *AST = nullptr, AssumptionCache *AC = nullptr); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/BypassSlowDivision.h
//===- llvm/Transforms/Utils/BypassSlowDivision.h --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains an optimization for div and rem on architectures that // execute short instructions significantly faster than longer instructions. // For example, on Intel Atom 32-bit divides are slow enough that during // runtime it is profitable to check the value of the operands, and if they are // positive and less than 256 use an unsigned 8-bit divide. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H #define LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H #include "llvm/ADT/DenseMap.h" #include "llvm/IR/Function.h" namespace llvm { /// This optimization identifies DIV instructions that can be /// profitably bypassed and carried out with a shorter, faster divide. bool bypassSlowDivision(Function &F, Function::iterator &I, const DenseMap<unsigned int, unsigned int> &BypassWidth); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/Local.h
//===-- Local.h - Functions to perform local transformations ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This family of functions perform various local transformations to the // program. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H #define LLVM_TRANSFORMS_UTILS_LOCAL_H #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Operator.h" namespace llvm { class User; class BasicBlock; class Function; class BranchInst; class Instruction; class DbgDeclareInst; class StoreInst; class LoadInst; class Value; class PHINode; class AllocaInst; class AssumptionCache; class ConstantExpr; class DataLayout; class TargetLibraryInfo; class TargetTransformInfo; class DIBuilder; class AliasAnalysis; class DominatorTree; template<typename T> class SmallVectorImpl; //===----------------------------------------------------------------------===// // Local constant propagation. // /// ConstantFoldTerminator - If a terminator instruction is predicated on a /// constant value, convert it into an unconditional branch to the constant /// destination. This is a nontrivial operation because the successors of this /// basic block must have their PHI nodes updated. /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch /// conditions and indirectbr addresses this might make dead if /// DeleteDeadConditions is true. bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false, const TargetLibraryInfo *TLI = nullptr); //===----------------------------------------------------------------------===// // Local dead code elimination. // /// isInstructionTriviallyDead - Return true if the result produced by the /// instruction is not used, and the instruction has no side effects. /// bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI = nullptr); /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a /// trivially dead instruction, delete it. If that makes any of its operands /// trivially dead, delete them too, recursively. Return true if any /// instructions were deleted. bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI = nullptr); /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively /// dead PHI node, due to being a def-use chain of single-use nodes that /// either forms a cycle or is terminated by a trivially dead instruction, /// delete it. If that makes any of its operands trivially dead, delete them /// too, recursively. Return true if a change was made. bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI = nullptr); /// SimplifyInstructionsInBlock - Scan the specified basic block and try to /// simplify any instructions in it and recursively delete dead instructions. /// /// This returns true if it changed the code, note that it can delete /// instructions in other blocks as well in this block. bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr); // // /////////////////////////////////////////////////////////////////////////////// // Control Flow Graph Restructuring. // /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this /// method is called when we're about to delete Pred as a predecessor of BB. If /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred. /// /// Unlike the removePredecessor method, this attempts to simplify uses of PHI /// nodes that collapse into identity values. For example, if we have: /// x = phi(1, 0, 0, 0) /// y = and x, z /// /// .. and delete the predecessor corresponding to the '1', this will attempt to /// recursively fold the 'and' to 0. void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred); /// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its /// predecessor is known to have one successor (BB!). Eliminate the edge /// between them, moving the instructions in the predecessor into BB. This /// deletes the predecessor block. /// void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr); /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an /// unconditional branch, and contains no instructions other than PHI nodes, /// potential debug intrinsics and the branch. If possible, eliminate BB by /// rewriting all the predecessors to branch to the successor block and return /// true. If we can't transform, return false. bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB); /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI /// nodes in this block. This doesn't try to be clever about PHI nodes /// which differ only in the order of the incoming values, but instcombine /// orders them so it usually won't matter. /// bool EliminateDuplicatePHINodes(BasicBlock *BB); /// SimplifyCFG - This function is used to do simplification of a CFG. For /// example, it adjusts branches to branches to eliminate the extra hop, it /// eliminates unreachable basic blocks, and does other "peephole" optimization /// of the CFG. It returns true if a modification was made, possibly deleting /// the basic block that was pointed to. /// bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI, unsigned BonusInstThreshold, AssumptionCache *AC = nullptr); /// FlatternCFG - This function is used to flatten a CFG. For /// example, it uses parallel-and and parallel-or mode to collapse // if-conditions and merge if-regions with identical statements. /// bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr); /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch, /// and if a predecessor branches to us and one of our successors, fold the /// setcc into the predecessor and use logical operations to pick the right /// destination. bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1); /// DemoteRegToStack - This function takes a virtual register computed by an /// Instruction and replaces it with a slot in the stack frame, allocated via /// alloca. This allows the CFG to be changed around without fear of /// invalidating the SSA information for the value. It returns the pointer to /// the alloca inserted to create a stack slot for X. /// AllocaInst *DemoteRegToStack(Instruction &X, bool VolatileLoads = false, Instruction *AllocaPoint = nullptr); /// DemotePHIToStack - This function takes a virtual register computed by a phi /// node and replaces it with a slot in the stack frame, allocated via alloca. /// The phi node is deleted and it returns the pointer to the alloca inserted. AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr); /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that /// we can determine, return it, otherwise return 0. If PrefAlign is specified, /// and it is more than the alignment of the ultimate object, see if we can /// increase the alignment of the ultimate object, making this check succeed. unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, const DataLayout &DL, const Instruction *CxtI = nullptr, AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr); /// getKnownAlignment - Try to infer an alignment for the specified pointer. static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI = nullptr, AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr) { return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT); } /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the /// code necessary to compute the offset from the base pointer (without adding /// in the base pointer). Return the result as a signed integer of intptr size. /// When NoAssumptions is true, no assumptions about index computation not /// overflowing is made. template <typename IRBuilderTy> Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions = false) { GEPOperator *GEPOp = cast<GEPOperator>(GEP); Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); Value *Result = Constant::getNullValue(IntPtrTy); // If the GEP is inbounds, we know that none of the addressing operations will // overflow in an unsigned sense. bool isInBounds = GEPOp->isInBounds() && !NoAssumptions; // Build a mask for high order bits. unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth(); uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth); gep_type_iterator GTI = gep_type_begin(GEP); for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; ++i, ++GTI) { Value *Op = *i; uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; if (Constant *OpC = dyn_cast<Constant>(Op)) { if (OpC->isZeroValue()) continue; // Handle a struct index, which adds its field offset to the pointer. if (StructType *STy = dyn_cast<StructType>(*GTI)) { if (OpC->getType()->isVectorTy()) OpC = OpC->getSplatValue(); uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue(); Size = DL.getStructLayout(STy)->getElementOffset(OpValue); if (Size) Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size), GEP->getName()+".offs"); continue; } Constant *Scale = ConstantInt::get(IntPtrTy, Size); Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/); // Emit an add instruction. Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); continue; } // Convert to correct type. if (Op->getType() != IntPtrTy) Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); if (Size != 1) { // We'll let instcombine(mul) convert this to a shl if possible. Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size), GEP->getName()+".idx", isInBounds /*NUW*/); } // Emit an add instruction. Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs"); } return Result; } ///===---------------------------------------------------------------------===// /// Dbg Intrinsic utilities /// /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value /// that has an associated llvm.dbg.decl intrinsic. bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst *SI, DIBuilder &Builder); /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value /// that has an associated llvm.dbg.decl intrinsic. bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, LoadInst *LI, DIBuilder &Builder); /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set /// of llvm.dbg.value intrinsics. bool LowerDbgDeclare(Function &F); /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to /// an alloca, if any. DbgDeclareInst *FindAllocaDbgDeclare(Value *V); // HLSL Change - Begin /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to /// an alloca, if any. void FindAllocaDbgDeclare(Value *V, SmallVectorImpl<DbgDeclareInst *> &Declares); // HLSL Change - End /// \brief Replaces llvm.dbg.declare instruction when an alloca is replaced with /// a new value. If Deref is true, tan additional DW_OP_deref is prepended to /// the expression. bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, DIBuilder &Builder, bool Deref); /// \brief Remove all blocks that can not be reached from the function's entry. /// /// Returns true if any basic block was removed. bool removeUnreachableBlocks(Function &F); /// \brief Combine the metadata of two instructions so that K can replace J /// /// Metadata not listed as known via KnownIDs is removed void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs); /// \brief Replace each use of 'From' with 'To' if that use is dominated by /// the given edge. Returns the number of replacements made. unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Edge); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/BuildLibCalls.h
//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file exposes an interface to build some C language libcalls for // optimization passes that need to call the various functions. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H #define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H #include "llvm/IR/IRBuilder.h" namespace llvm { class Value; class DataLayout; class TargetLibraryInfo; /// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*. Value *CastToCStr(Value *V, IRBuilder<> &B); /// EmitStrLen - Emit a call to the strlen function to the builder, for the /// specified pointer. Ptr is required to be some pointer type, and the /// return value has 'intptr_t' type. Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL, const TargetLibraryInfo *TLI); /// EmitStrNLen - Emit a call to the strnlen function to the builder, for the /// specified pointer. Ptr is required to be some pointer type, MaxLen must /// be of size_t type, and the return value has 'intptr_t' type. Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B, const DataLayout &DL, const TargetLibraryInfo *TLI); /// EmitStrChr - Emit a call to the strchr function to the builder, for the /// specified pointer and character. Ptr is required to be some pointer type, /// and the return value has 'i8*' type. Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetLibraryInfo *TLI); /// EmitStrNCmp - Emit a call to the strncmp function to the builder. Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B, const DataLayout &DL, const TargetLibraryInfo *TLI); /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the /// specified pointer arguments. Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B, const TargetLibraryInfo *TLI, StringRef Name = "strcpy"); /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the /// specified pointer arguments and length. Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B, const TargetLibraryInfo *TLI, StringRef Name = "strncpy"); /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder. /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src /// are pointers. Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize, IRBuilder<> &B, const DataLayout &DL, const TargetLibraryInfo *TLI); /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value. Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B, const DataLayout &DL, const TargetLibraryInfo *TLI); /// EmitMemCmp - Emit a call to the memcmp function. Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B, const DataLayout &DL, const TargetLibraryInfo *TLI); /// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name' /// (e.g. 'floor'). This function is known to take a single of type matching /// 'Op' and returns one value with the same type. If 'Op' is a long double, /// 'l' is added as the suffix of name, if 'Op' is a float, we add a 'f' /// suffix. Value *EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B, const AttributeSet &Attrs); /// EmitUnaryFloatFnCall - Emit a call to the binary function named 'Name' /// (e.g. 'fmin'). This function is known to take type matching 'Op1' and /// 'Op2' and return one value with the same type. If 'Op1/Op2' are long /// double, 'l' is added as the suffix of name, if 'Op1/Op2' are float, we /// add a 'f' suffix. Value *EmitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name, IRBuilder<> &B, const AttributeSet &Attrs); /// EmitPutChar - Emit a call to the putchar function. This assumes that Char /// is an integer. Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI); /// EmitPutS - Emit a call to the puts function. This assumes that Str is /// some pointer. Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI); /// EmitFPutC - Emit a call to the fputc function. This assumes that Char is /// an i32, and File is a pointer to FILE. Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B, const TargetLibraryInfo *TLI); /// EmitFPutS - Emit a call to the puts function. Str is required to be a /// pointer and File is a pointer to FILE. Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetLibraryInfo *TLI); /// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE. Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B, const DataLayout &DL, const TargetLibraryInfo *TLI); } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/Cloning.h
//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines various functions that are used to clone chunks of LLVM // code for various purposes. This varies from copying whole modules into new // modules, to cloning functions with different arguments, to inlining // functions, to copying basic blocks to support loop unrolling or superblock // formation, etc. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_CLONING_H #define LLVM_TRANSFORMS_UTILS_CLONING_H #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Twine.h" #include "llvm/IR/ValueHandle.h" #include "llvm/IR/ValueMap.h" #include "llvm/Transforms/Utils/ValueMapper.h" namespace llvm { class Module; class Function; class Instruction; class Pass; class LPPassManager; class BasicBlock; class Value; class CallInst; class InvokeInst; class ReturnInst; class CallSite; class Trace; class CallGraph; class DataLayout; class Loop; class LoopInfo; class AllocaInst; class AliasAnalysis; class AssumptionCacheTracker; class DominatorTree; /// CloneModule - Return an exact copy of the specified module /// Module *CloneModule(const Module *M); Module *CloneModule(const Module *M, ValueToValueMapTy &VMap); /// ClonedCodeInfo - This struct can be used to capture information about code /// being cloned, while it is being cloned. struct ClonedCodeInfo { /// ContainsCalls - This is set to true if the cloned code contains a normal /// call instruction. bool ContainsCalls; /// ContainsDynamicAllocas - This is set to true if the cloned code contains /// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in /// the entry block or they are in the entry block but are not a constant /// size. bool ContainsDynamicAllocas; ClonedCodeInfo() : ContainsCalls(false), ContainsDynamicAllocas(false) {} }; /// CloneBasicBlock - Return a copy of the specified basic block, but without /// embedding the block into a particular function. The block returned is an /// exact copy of the specified basic block, without any remapping having been /// performed. Because of this, this is only suitable for applications where /// the basic block will be inserted into the same function that it was cloned /// from (loop unrolling would use this, for example). /// /// Also, note that this function makes a direct copy of the basic block, and /// can thus produce illegal LLVM code. In particular, it will copy any PHI /// nodes from the original block, even though there are no predecessors for the /// newly cloned block (thus, phi nodes will have to be updated). Also, this /// block will branch to the old successors of the original block: these /// successors will have to have any PHI nodes updated to account for the new /// incoming edges. /// /// The correlation between instructions in the source and result basic blocks /// is recorded in the VMap map. /// /// If you have a particular suffix you'd like to use to add to any cloned /// names, specify it as the optional third parameter. /// /// If you would like the basic block to be auto-inserted into the end of a /// function, you can specify it as the optional fourth parameter. /// /// If you would like to collect additional information about the cloned /// function, you can specify a ClonedCodeInfo object with the optional fifth /// parameter. /// BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, const Twine &NameSuffix = "", Function *F = nullptr, ClonedCodeInfo *CodeInfo = nullptr); /// CloneFunction - Return a copy of the specified function, but without /// embedding the function into another module. Also, any references specified /// in the VMap are changed to refer to their mapped value instead of the /// original one. If any of the arguments to the function are in the VMap, /// the arguments are deleted from the resultant function. The VMap is /// updated to include mappings from all of the instructions and basicblocks in /// the function from their old to new values. The final argument captures /// information about the cloned code if non-null. /// /// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue /// mappings, and debug info metadata will not be cloned. /// Function *CloneFunction(const Function *F, ValueToValueMapTy &VMap, bool ModuleLevelChanges, ClonedCodeInfo *CodeInfo = nullptr); /// Clone OldFunc into NewFunc, transforming the old arguments into references /// to VMap values. Note that if NewFunc already has basic blocks, the ones /// cloned into it will be added to the end of the function. This function /// fills in a list of return instructions, and can optionally remap types /// and/or append the specified suffix to all values cloned. /// /// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue /// mappings. /// void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl<ReturnInst*> &Returns, const char *NameSuffix = "", ClonedCodeInfo *CodeInfo = nullptr, ValueMapTypeRemapper *TypeMapper = nullptr, ValueMaterializer *Materializer = nullptr); /// A helper class used with CloneAndPruneIntoFromInst to change the default /// behavior while instructions are being cloned. class CloningDirector { public: /// This enumeration describes the way CloneAndPruneIntoFromInst should /// proceed after the CloningDirector has examined an instruction. enum CloningAction { ///< Continue cloning the instruction (default behavior). CloneInstruction, ///< Skip this instruction but continue cloning the current basic block. SkipInstruction, ///< Skip this instruction and stop cloning the current basic block. StopCloningBB, ///< Don't clone the terminator but clone the current block's successors. CloneSuccessors }; virtual ~CloningDirector() {} /// Subclasses must override this function to customize cloning behavior. virtual CloningAction handleInstruction(ValueToValueMapTy &VMap, const Instruction *Inst, BasicBlock *NewBB) = 0; virtual ValueMapTypeRemapper *getTypeRemapper() { return nullptr; } virtual ValueMaterializer *getValueMaterializer() { return nullptr; } }; void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, const Instruction *StartingInst, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl<ReturnInst*> &Returns, const char *NameSuffix = "", ClonedCodeInfo *CodeInfo = nullptr, CloningDirector *Director = nullptr); /// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto, /// except that it does some simple constant prop and DCE on the fly. The /// effect of this is to copy significantly less code in cases where (for /// example) a function call with constant arguments is inlined, and those /// constant arguments cause a significant amount of code in the callee to be /// dead. Since this doesn't produce an exactly copy of the input, it can't be /// used for things like CloneFunction or CloneModule. /// /// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue /// mappings. /// void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl<ReturnInst*> &Returns, const char *NameSuffix = "", ClonedCodeInfo *CodeInfo = nullptr, Instruction *TheCall = nullptr); /// InlineFunctionInfo - This class captures the data input to the /// InlineFunction call, and records the auxiliary results produced by it. class InlineFunctionInfo { public: explicit InlineFunctionInfo(CallGraph *cg = nullptr, AliasAnalysis *AA = nullptr, AssumptionCacheTracker *ACT = nullptr) : CG(cg), AA(AA), ACT(ACT) {} /// CG - If non-null, InlineFunction will update the callgraph to reflect the /// changes it makes. CallGraph *CG; AliasAnalysis *AA; AssumptionCacheTracker *ACT; /// StaticAllocas - InlineFunction fills this in with all static allocas that /// get copied into the caller. SmallVector<AllocaInst *, 4> StaticAllocas; /// InlinedCalls - InlineFunction fills this in with callsites that were /// inlined from the callee. This is only filled in if CG is non-null. SmallVector<WeakTrackingVH, 8> InlinedCalls; void reset() { StaticAllocas.clear(); InlinedCalls.clear(); } }; /// InlineFunction - This function inlines the called function into the basic /// block of the caller. This returns false if it is not possible to inline /// this call. The program is still in a well defined state if this occurs /// though. /// /// Note that this only does one level of inlining. For example, if the /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now /// exists in the instruction stream. Similarly this will inline a recursive /// function by one level. /// bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI, bool InsertLifetime = true); bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, bool InsertLifetime = true); bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI, bool InsertLifetime = true); /// \brief Clones a loop \p OrigLoop. Returns the loop and the blocks in \p /// Blocks. /// /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block /// \p LoopDomBB. Insert the new blocks before block specified in \p Before. Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, Loop *OrigLoop, ValueToValueMapTy &VMap, const Twine &NameSuffix, LoopInfo *LI, DominatorTree *DT, SmallVectorImpl<BasicBlock *> &Blocks); /// \brief Remaps instructions in \p Blocks using the mapping in \p VMap. void remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock *> &Blocks, ValueToValueMapTy &VMap); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/SSAUpdater.h
//===-- SSAUpdater.h - Unstructured SSA Update Tool -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the SSAUpdater class. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H #define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Compiler.h" namespace llvm { class BasicBlock; class Instruction; class LoadInst; template<typename T> class SmallVectorImpl; template<typename T> class SSAUpdaterTraits; class PHINode; class Type; class Use; class Value; /// \brief Helper class for SSA formation on a set of values defined in /// multiple blocks. /// /// This is used when code duplication or another unstructured /// transformation wants to rewrite a set of uses of one value with uses of a /// set of values. class SSAUpdater { friend class SSAUpdaterTraits<SSAUpdater>; private: /// This keeps track of which value to use on a per-block basis. When we /// insert PHI nodes, we keep track of them here. //typedef DenseMap<BasicBlock*, Value*> AvailableValsTy; void *AV; /// ProtoType holds the type of the values being rewritten. Type *ProtoType; /// PHI nodes are given a name based on ProtoName. std::string ProtoName; /// If this is non-null, the SSAUpdater adds all PHI nodes that it creates to /// the vector. SmallVectorImpl<PHINode*> *InsertedPHIs; public: /// If InsertedPHIs is specified, it will be filled /// in with all PHI Nodes created by rewriting. explicit SSAUpdater(SmallVectorImpl<PHINode*> *InsertedPHIs = nullptr); ~SSAUpdater(); /// \brief Reset this object to get ready for a new set of SSA updates with /// type 'Ty'. /// /// PHI nodes get a name based on 'Name'. void Initialize(Type *Ty, StringRef Name); /// \brief Indicate that a rewritten value is available in the specified block /// with the specified value. void AddAvailableValue(BasicBlock *BB, Value *V); /// \brief Return true if the SSAUpdater already has a value for the specified /// block. bool HasValueForBlock(BasicBlock *BB) const; /// \brief Construct SSA form, materializing a value that is live at the end /// of the specified block. Value *GetValueAtEndOfBlock(BasicBlock *BB); /// \brief Construct SSA form, materializing a value that is live in the /// middle of the specified block. /// /// \c GetValueInMiddleOfBlock is the same as \c GetValueAtEndOfBlock except /// in one important case: if there is a definition of the rewritten value /// after the 'use' in BB. Consider code like this: /// /// \code /// X1 = ... /// SomeBB: /// use(X) /// X2 = ... /// br Cond, SomeBB, OutBB /// \endcode /// /// In this case, there are two values (X1 and X2) added to the AvailableVals /// set by the client of the rewriter, and those values are both live out of /// their respective blocks. However, the use of X happens in the *middle* of /// a block. Because of this, we need to insert a new PHI node in SomeBB to /// merge the appropriate values, and this value isn't live out of the block. Value *GetValueInMiddleOfBlock(BasicBlock *BB); /// \brief Rewrite a use of the symbolic value. /// /// This handles PHI nodes, which use their value in the corresponding /// predecessor. Note that this will not work if the use is supposed to be /// rewritten to a value defined in the same block as the use, but above it. /// Any 'AddAvailableValue's added for the use's block will be considered to /// be below it. void RewriteUse(Use &U); /// \brief Rewrite a use like \c RewriteUse but handling in-block definitions. /// /// This version of the method can rewrite uses in the same block as /// a definition, because it assumes that all uses of a value are below any /// inserted values. void RewriteUseAfterInsertions(Use &U); private: Value *GetValueAtEndOfBlockInternal(BasicBlock *BB); void operator=(const SSAUpdater&) = delete; SSAUpdater(const SSAUpdater&) = delete; }; /// \brief Helper class for promoting a collection of loads and stores into SSA /// Form using the SSAUpdater. /// /// This handles complexities that SSAUpdater doesn't, such as multiple loads /// and stores in one block. /// /// Clients of this class are expected to subclass this and implement the /// virtual methods. class LoadAndStorePromoter { protected: SSAUpdater &SSA; public: LoadAndStorePromoter(ArrayRef<const Instruction*> Insts, SSAUpdater &S, StringRef Name = StringRef()); virtual ~LoadAndStorePromoter() {} /// \brief This does the promotion. /// /// Insts is a list of loads and stores to promote, and Name is the basename /// for the PHIs to insert. After this is complete, the loads and stores are /// removed from the code. void run(const SmallVectorImpl<Instruction*> &Insts) const; /// \brief Return true if the specified instruction is in the Inst list. /// /// The Insts list is the one passed into the constructor. Clients should /// implement this with a more efficient version if possible. virtual bool isInstInList(Instruction *I, const SmallVectorImpl<Instruction*> &Insts) const; /// \brief This hook is invoked after all the stores are found and inserted as /// available values. virtual void doExtraRewritesBeforeFinalDeletion() const { } /// \brief Clients can choose to implement this to get notified right before /// a load is RAUW'd another value. virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const { } /// \brief Called before each instruction is deleted. virtual void instructionDeleted(Instruction *I) const { } /// \brief Called to update debug info associated with the instruction. virtual void updateDebugInfo(Instruction *I) const { } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/CmpInstAnalysis.h
//===-- CmpInstAnalysis.h - Utils to help fold compare insts ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file holds routines to help analyse compare instructions // and fold them into constants or other compare instructions // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H #define LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H #include "llvm/IR/InstrTypes.h" namespace llvm { class ICmpInst; class Value; /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits /// are carefully arranged to allow folding of expressions such as: /// /// (A < B) | (A > B) --> (A != B) /// /// Note that this is only valid if the first and second predicates have the /// same sign. Is illegal to do: (A u< B) | (A s> B) /// /// Three bits are used to represent the condition, as follows: /// 0 A > B /// 1 A == B /// 2 A < B /// /// <=> Value Definition /// 000 0 Always false /// 001 1 A > B /// 010 2 A == B /// 011 3 A >= B /// 100 4 A < B /// 101 5 A != B /// 110 6 A <= B /// 111 7 Always true /// unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false); /// getICmpValue - This is the complement of getICmpCode, which turns an /// opcode and two operands into either a constant true or false, or the /// predicate for a new ICmp instruction. The sign is passed in to determine /// which kind of predicate to use in the new icmp instruction. /// Non-NULL return value will be a true or false constant. /// NULL return means a new ICmp is needed. The predicate for which is /// output in NewICmpPred. Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, CmpInst::Predicate &NewICmpPred); /// PredicatesFoldable - Return true if both predicates match sign or if at /// least one of them is an equality comparison (which is signless). bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2); } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/BasicBlockUtils.h
//===-- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This family of functions perform manipulations on basic blocks, and // instructions contained within basic blocks. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H #define LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H // FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" namespace llvm { class AliasAnalysis; class MemoryDependenceAnalysis; class DominatorTree; class LoopInfo; class Instruction; class MDNode; class ReturnInst; class TargetLibraryInfo; class TerminatorInst; /// DeleteDeadBlock - Delete the specified block, which must have no /// predecessors. void DeleteDeadBlock(BasicBlock *BB); /// FoldSingleEntryPHINodes - We know that BB has one predecessor. If there are /// any single-entry PHI nodes in it, fold them away. This handles the case /// when all entries to the PHI nodes in a block are guaranteed equal, such as /// when the block has exactly one predecessor. void FoldSingleEntryPHINodes(BasicBlock *BB, AliasAnalysis *AA = nullptr, MemoryDependenceAnalysis *MemDep = nullptr); /// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it /// is dead. Also recursively delete any operands that become dead as /// a result. This includes tracing the def-use list from the PHI to see if /// it is ultimately unused or if it reaches an unused cycle. Return true /// if any PHIs were deleted. bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr); /// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor, /// if possible. The return value indicates success or failure. bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr, LoopInfo *LI = nullptr, AliasAnalysis *AA = nullptr, MemoryDependenceAnalysis *MemDep = nullptr); // ReplaceInstWithValue - Replace all uses of an instruction (specified by BI) // with a value, then remove and delete the original instruction. // void ReplaceInstWithValue(BasicBlock::InstListType &BIL, BasicBlock::iterator &BI, Value *V); // ReplaceInstWithInst - Replace the instruction specified by BI with the // instruction specified by I. Copies DebugLoc from BI to I, if I doesn't // already have a DebugLoc. The original instruction is deleted and BI is // updated to point to the new instruction. // void ReplaceInstWithInst(BasicBlock::InstListType &BIL, BasicBlock::iterator &BI, Instruction *I); // ReplaceInstWithInst - Replace the instruction specified by From with the // instruction specified by To. Copies DebugLoc from BI to I, if I doesn't // already have a DebugLoc. // void ReplaceInstWithInst(Instruction *From, Instruction *To); /// \brief Option class for critical edge splitting. /// /// This provides a builder interface for overriding the default options used /// during critical edge splitting. struct CriticalEdgeSplittingOptions { AliasAnalysis *AA; DominatorTree *DT; LoopInfo *LI; bool MergeIdenticalEdges; bool DontDeleteUselessPHIs; bool PreserveLCSSA; CriticalEdgeSplittingOptions() : AA(nullptr), DT(nullptr), LI(nullptr), MergeIdenticalEdges(false), DontDeleteUselessPHIs(false), PreserveLCSSA(false) {} /// \brief Basic case of setting up all the analysis. CriticalEdgeSplittingOptions(AliasAnalysis *AA, DominatorTree *DT = nullptr, LoopInfo *LI = nullptr) : AA(AA), DT(DT), LI(LI), MergeIdenticalEdges(false), DontDeleteUselessPHIs(false), PreserveLCSSA(false) {} /// \brief A common pattern is to preserve the dominator tree and loop /// info but not care about AA. CriticalEdgeSplittingOptions(DominatorTree *DT, LoopInfo *LI) : AA(nullptr), DT(DT), LI(LI), MergeIdenticalEdges(false), DontDeleteUselessPHIs(false), PreserveLCSSA(false) {} CriticalEdgeSplittingOptions &setMergeIdenticalEdges() { MergeIdenticalEdges = true; return *this; } CriticalEdgeSplittingOptions &setDontDeleteUselessPHIs() { DontDeleteUselessPHIs = true; return *this; } CriticalEdgeSplittingOptions &setPreserveLCSSA() { PreserveLCSSA = true; return *this; } }; /// SplitCriticalEdge - If this edge is a critical edge, insert a new node to /// split the critical edge. This will update the analyses passed in through /// the option struct. This returns the new block if the edge was split, null /// otherwise. /// /// If MergeIdenticalEdges in the options struct is true (not the default), /// *all* edges from TI to the specified successor will be merged into the same /// critical edge block. This is most commonly interesting with switch /// instructions, which may have many edges to any one destination. This /// ensures that all edges to that dest go to one block instead of each going /// to a different block, but isn't the standard definition of a "critical /// edge". /// /// It is invalid to call this function on a critical edge that starts at an /// IndirectBrInst. Splitting these edges will almost always create an invalid /// program because the address of the new block won't be the one that is jumped /// to. /// BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, const CriticalEdgeSplittingOptions &Options = CriticalEdgeSplittingOptions()); inline BasicBlock * SplitCriticalEdge(BasicBlock *BB, succ_iterator SI, const CriticalEdgeSplittingOptions &Options = CriticalEdgeSplittingOptions()) { return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(), Options); } /// SplitCriticalEdge - If the edge from *PI to BB is not critical, return /// false. Otherwise, split all edges between the two blocks and return true. /// This updates all of the same analyses as the other SplitCriticalEdge /// function. If P is specified, it updates the analyses /// described above. inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI, const CriticalEdgeSplittingOptions &Options = CriticalEdgeSplittingOptions()) { bool MadeChange = false; TerminatorInst *TI = (*PI)->getTerminator(); for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) if (TI->getSuccessor(i) == Succ) MadeChange |= !!SplitCriticalEdge(TI, i, Options); return MadeChange; } /// SplitCriticalEdge - If an edge from Src to Dst is critical, split the edge /// and return true, otherwise return false. This method requires that there be /// an edge between the two blocks. It updates the analyses /// passed in the options struct inline BasicBlock * SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst, const CriticalEdgeSplittingOptions &Options = CriticalEdgeSplittingOptions()) { TerminatorInst *TI = Src->getTerminator(); unsigned i = 0; while (1) { assert(i != TI->getNumSuccessors() && "Edge doesn't exist!"); if (TI->getSuccessor(i) == Dst) return SplitCriticalEdge(TI, i, Options); ++i; } } // SplitAllCriticalEdges - Loop over all of the edges in the CFG, // breaking critical edges as they are found. // Returns the number of broken edges. unsigned SplitAllCriticalEdges(Function &F, const CriticalEdgeSplittingOptions &Options = CriticalEdgeSplittingOptions()); /// SplitEdge - Split the edge connecting specified block. BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT = nullptr, LoopInfo *LI = nullptr); /// SplitBlock - Split the specified block at the specified instruction - every /// thing before SplitPt stays in Old and everything starting with SplitPt moves /// to a new block. The two blocks are joined by an unconditional branch and /// the loop info is updated. /// BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT = nullptr, LoopInfo *LI = nullptr); /// SplitBlockPredecessors - This method introduces at least one new basic block /// into the function and moves some of the predecessors of BB to be /// predecessors of the new block. The new predecessors are indicated by the /// Preds array. The new block is given a suffix of 'Suffix'. Returns new basic /// block to which predecessors from Preds are now pointing. /// /// If BB is a landingpad block then additional basicblock might be introduced. /// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more /// details on this case. /// /// This currently updates the LLVM IR, AliasAnalysis, DominatorTree, /// DominanceFrontier, LoopInfo, and LCCSA but no other analyses. /// In particular, it does not preserve LoopSimplify (because it's /// complicated to handle the case where one of the edges being split /// is an exit of a loop with other exits). /// BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds, const char *Suffix, AliasAnalysis *AA = nullptr, DominatorTree *DT = nullptr, LoopInfo *LI = nullptr, bool PreserveLCSSA = false); /// SplitLandingPadPredecessors - This method transforms the landing pad, /// OrigBB, by introducing two new basic blocks into the function. One of those /// new basic blocks gets the predecessors listed in Preds. The other basic /// block gets the remaining predecessors of OrigBB. The landingpad instruction /// OrigBB is clone into both of the new basic blocks. The new blocks are given /// the suffixes 'Suffix1' and 'Suffix2', and are returned in the NewBBs vector. /// /// This currently updates the LLVM IR, AliasAnalysis, DominatorTree, /// DominanceFrontier, LoopInfo, and LCCSA but no other analyses. In particular, /// it does not preserve LoopSimplify (because it's complicated to handle the /// case where one of the edges being split is an exit of a loop with other /// exits). /// void SplitLandingPadPredecessors(BasicBlock *OrigBB, ArrayRef<BasicBlock *> Preds, const char *Suffix, const char *Suffix2, SmallVectorImpl<BasicBlock *> &NewBBs, AliasAnalysis *AA = nullptr, DominatorTree *DT = nullptr, LoopInfo *LI = nullptr, bool PreserveLCSSA = false); /// FoldReturnIntoUncondBranch - This method duplicates the specified return /// instruction into a predecessor which ends in an unconditional branch. If /// the return instruction returns a value defined by a PHI, propagate the /// right value into the return. It returns the new return instruction in the /// predecessor. ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, BasicBlock *Pred); /// SplitBlockAndInsertIfThen - Split the containing block at the /// specified instruction - everything before and including SplitBefore stays /// in the old basic block, and everything after SplitBefore is moved to a /// new block. The two blocks are connected by a conditional branch /// (with value of Cmp being the condition). /// Before: /// Head /// SplitBefore /// Tail /// After: /// Head /// if (Cond) /// ThenBlock /// SplitBefore /// Tail /// /// If Unreachable is true, then ThenBlock ends with /// UnreachableInst, otherwise it branches to Tail. /// Returns the NewBasicBlock's terminator. /// /// Updates DT if given. TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore, bool Unreachable, MDNode *BranchWeights = nullptr, DominatorTree *DT = nullptr); /// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, /// but also creates the ElseBlock. /// Before: /// Head /// SplitBefore /// Tail /// After: /// Head /// if (Cond) /// ThenBlock /// else /// ElseBlock /// SplitBefore /// Tail void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore, TerminatorInst **ThenTerm, TerminatorInst **ElseTerm, MDNode *BranchWeights = nullptr); /// /// GetIfCondition - Check whether BB is the merge point of a if-region. /// If so, return the boolean condition that determines which entry into /// BB will be taken. Also, return by references the block that will be /// entered from if the condition is true, and the block that will be /// entered if the condition is false. Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue, BasicBlock *&IfFalse); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/LoopVersioning.h
//===- LoopVersioning.h - Utility to version a loop -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a utility class to perform loop versioning. The versioned // loop speculates that otherwise may-aliasing memory accesses don't overlap and // emits checks to prove this. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H #define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H #include "llvm/Transforms/Utils/ValueMapper.h" namespace llvm { class Loop; class LoopAccessInfo; class LoopInfo; /// \brief This class emits a version of the loop where run-time checks ensure /// that may-alias pointers can't overlap. /// /// It currently only supports single-exit loops and assumes that the loop /// already has a preheader. class LoopVersioning { public: LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI, DominatorTree *DT, const SmallVector<int, 8> *PtrToPartition = nullptr); /// \brief Returns true if we need memchecks to disambiguate may-aliasing /// accesses. bool needsRuntimeChecks() const; /// \brief Performs the CFG manipulation part of versioning the loop including /// the DominatorTree and LoopInfo updates. /// /// The loop that was used to construct the class will be the "versioned" loop /// i.e. the loop that will receive control if all the memchecks pass. /// /// This allows the loop transform pass to operate on the same loop regardless /// of whether versioning was necessary or not: /// /// for each loop L: /// analyze L /// if versioning is necessary version L /// transform L void versionLoop(Pass *P); /// \brief Adds the necessary PHI nodes for the versioned loops based on the /// loop-defined values used outside of the loop. /// /// This needs to be called after versionLoop if there are defs in the loop /// that are used outside the loop. FIXME: this should be invoked internally /// by versionLoop and made private. void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside); /// \brief Returns the versioned loop. Control flows here if pointers in the /// loop don't alias (i.e. all memchecks passed). (This loop is actually the /// same as the original loop that we got constructed with.) Loop *getVersionedLoop() { return VersionedLoop; } /// \brief Returns the fall-back loop. Control flows here if pointers in the /// loop may alias (i.e. one of the memchecks failed). Loop *getNonVersionedLoop() { return NonVersionedLoop; } private: /// \brief The original loop. This becomes the "versioned" one. I.e., /// control flows here if pointers in the loop don't alias. Loop *VersionedLoop; /// \brief The fall-back loop. I.e. control flows here if pointers in the /// loop may alias (memchecks failed). Loop *NonVersionedLoop; /// \brief For each memory pointer it contains the partitionId it is used in. /// If nullptr, no partitioning is used. /// /// The I-th entry corresponds to I-th entry in LAI.getRuntimePointerCheck(). /// If the pointer is used in multiple partitions the entry is set to -1. const SmallVector<int, 8> *PtrToPartition; /// \brief This maps the instructions from VersionedLoop to their counterpart /// in NonVersionedLoop. ValueToValueMapTy VMap; /// \brief Analyses used. const LoopAccessInfo &LAI; LoopInfo *LI; DominatorTree *DT; }; } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/IntegerDivision.h
//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains an implementation of 32bit and 64bit scalar integer // division for targets that don't have native support. It's largely derived // from compiler-rt's implementations of __udivsi3 and __udivmoddi4, // but hand-tuned for targets that prefer less control flow. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H #define LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H namespace llvm { class BinaryOperator; } namespace llvm { /// Generate code to calculate the remainder of two integers, replacing Rem /// with the generated code. This currently generates code using the udiv /// expansion, but future work includes generating more specialized code, /// e.g. when more information about the operands are known. Implements both /// 32bit and 64bit scalar division. /// /// @brief Replace Rem with generated code. bool expandRemainder(BinaryOperator *Rem); /// Generate code to divide two integers, replacing Div with the generated /// code. This currently generates code similarly to compiler-rt's /// implementations, but future work includes generating more specialized code /// when more information about the operands are known. Implements both /// 32bit and 64bit scalar division. /// /// @brief Replace Div with generated code. bool expandDivision(BinaryOperator* Div); /// Generate code to calculate the remainder of two integers, replacing Rem /// with the generated code. Uses ExpandReminder with a 32bit Rem which /// makes it useful for targets with little or no support for less than /// 32 bit arithmetic. /// /// @brief Replace Rem with generated code. bool expandRemainderUpTo32Bits(BinaryOperator *Rem); /// Generate code to calculate the remainder of two integers, replacing Rem /// with the generated code. Uses ExpandReminder with a 64bit Rem. /// /// @brief Replace Rem with generated code. bool expandRemainderUpTo64Bits(BinaryOperator *Rem); /// Generate code to divide two integers, replacing Div with the generated /// code. Uses ExpandDivision with a 32bit Div which makes it useful for /// targets with little or no support for less than 32 bit arithmetic. /// /// @brief Replace Rem with generated code. bool expandDivisionUpTo32Bits(BinaryOperator *Div); /// Generate code to divide two integers, replacing Div with the generated /// code. Uses ExpandDivision with a 64bit Div. /// /// @brief Replace Rem with generated code. bool expandDivisionUpTo64Bits(BinaryOperator *Div); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/ValueMapper.h
//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the MapValue interface which is used by various parts of // the Transforms/Utils library to implement cloning and linking facilities. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H #define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H #include "llvm/IR/ValueMap.h" namespace llvm { class Value; class Instruction; typedef ValueMap<const Value *, WeakTrackingVH> ValueToValueMapTy; /// ValueMapTypeRemapper - This is a class that can be implemented by clients /// to remap types when cloning constants and instructions. class ValueMapTypeRemapper { virtual void anchor(); // Out of line method. public: virtual ~ValueMapTypeRemapper() {} /// remapType - The client should implement this method if they want to /// remap types while mapping values. virtual Type *remapType(Type *SrcTy) = 0; }; /// ValueMaterializer - This is a class that can be implemented by clients /// to materialize Values on demand. class ValueMaterializer { virtual void anchor(); // Out of line method. public: virtual ~ValueMaterializer() {} /// materializeValueFor - The client should implement this method if they /// want to generate a mapped Value on demand. For example, if linking /// lazily. virtual Value *materializeValueFor(Value *V) = 0; }; /// RemapFlags - These are flags that the value mapping APIs allow. enum RemapFlags { RF_None = 0, /// RF_NoModuleLevelChanges - If this flag is set, the remapper knows that /// only local values within a function (such as an instruction or argument) /// are mapped, not global values like functions and global metadata. RF_NoModuleLevelChanges = 1, /// RF_IgnoreMissingEntries - If this flag is set, the remapper ignores /// entries that are not in the value map. If it is unset, it aborts if an /// operand is asked to be remapped which doesn't exist in the mapping. RF_IgnoreMissingEntries = 2 }; static inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) { return RemapFlags(unsigned(LHS)|unsigned(RHS)); } Value *MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags = RF_None, ValueMapTypeRemapper *TypeMapper = nullptr, ValueMaterializer *Materializer = nullptr); Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM, RemapFlags Flags = RF_None, ValueMapTypeRemapper *TypeMapper = nullptr, ValueMaterializer *Materializer = nullptr); /// MapMetadata - provide versions that preserve type safety for MDNodes. MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM, RemapFlags Flags = RF_None, ValueMapTypeRemapper *TypeMapper = nullptr, ValueMaterializer *Materializer = nullptr); void RemapInstruction(Instruction *I, ValueToValueMapTy &VM, RemapFlags Flags = RF_None, ValueMapTypeRemapper *TypeMapper = nullptr, ValueMaterializer *Materializer = nullptr); /// MapValue - provide versions that preserve type safety for Constants. inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM, RemapFlags Flags = RF_None, ValueMapTypeRemapper *TypeMapper = nullptr, ValueMaterializer *Materializer = nullptr) { return cast<Constant>(MapValue((const Value*)V, VM, Flags, TypeMapper, Materializer)); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/LoopSimplify.h
//===- LoopSimplify.h - Loop Canonicalization Pass ----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // See LoopSimplify.cpp for description of the pass. // //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar.h" using namespace llvm; struct LoopSimplify : public FunctionPass { static char ID; // Pass identification, replacement for typeid LoopSimplify(); // AA - If we have an alias analysis object to update, this is it, otherwise // this is null. AliasAnalysis *AA; DominatorTree *DT; LoopInfo *LI; ScalarEvolution *SE; AssumptionCache *AC; bool runOnFunction(llvm::Function &F) override; void getAnalysisUsage(llvm::AnalysisUsage &AU) const override; /// verifyAnalysis() - Verify LoopSimplifyForm's guarantees. void verifyAnalysis() const override; };
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/CodeExtractor.h
//===-- Transform/Utils/CodeExtractor.h - Code extraction util --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // A utility to support extracting code from one function into its own // stand-alone function. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H #define LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SetVector.h" namespace llvm { class BasicBlock; class DominatorTree; class Function; class Loop; class Module; class RegionNode; class Type; class Value; /// \brief Utility class for extracting code into a new function. /// /// This utility provides a simple interface for extracting some sequence of /// code into its own function, replacing it with a call to that function. It /// also provides various methods to query about the nature and result of /// such a transformation. /// /// The rough algorithm used is: /// 1) Find both the inputs and outputs for the extracted region. /// 2) Pass the inputs as arguments, remapping them within the extracted /// function to arguments. /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas /// as arguments, and inserting stores to the arguments for any scalars. class CodeExtractor { typedef SetVector<Value *> ValueSet; // Various bits of state computed on construction. DominatorTree *const DT; const bool AggregateArgs; // Bits of intermediate state computed at various phases of extraction. SetVector<BasicBlock *> Blocks; unsigned NumExitBlocks; Type *RetTy; public: /// \brief Create a code extractor for a single basic block. /// /// In this formation, we don't require a dominator tree. The given basic /// block is set up for extraction. CodeExtractor(BasicBlock *BB, bool AggregateArgs = false); /// \brief Create a code extractor for a sequence of blocks. /// /// Given a sequence of basic blocks where the first block in the sequence /// dominates the rest, prepare a code extractor object for pulling this /// sequence out into its new function. When a DominatorTree is also given, /// extra checking and transformations are enabled. CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr, bool AggregateArgs = false); /// \brief Create a code extractor for a loop body. /// /// Behaves just like the generic code sequence constructor, but uses the /// block sequence of the loop. CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false); /// \brief Create a code extractor for a region node. /// /// Behaves just like the generic code sequence constructor, but uses the /// block sequence of the region node passed in. CodeExtractor(DominatorTree &DT, const RegionNode &RN, bool AggregateArgs = false); /// \brief Perform the extraction, returning the new function. /// /// Returns zero when called on a CodeExtractor instance where isEligible /// returns false. Function *extractCodeRegion(); /// \brief Test whether this code extractor is eligible. /// /// Based on the blocks used when constructing the code extractor, /// determine whether it is eligible for extraction. bool isEligible() const { return !Blocks.empty(); } /// \brief Compute the set of input values and output values for the code. /// /// These can be used either when performing the extraction or to evaluate /// the expected size of a call to the extracted function. Note that this /// work cannot be cached between the two as once we decide to extract /// a code sequence, that sequence is modified, including changing these /// sets, before extraction occurs. These modifications won't have any /// significant impact on the cost however. void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs) const; private: void severSplitPHINodes(BasicBlock *&Header); void splitReturnBlocks(); Function *constructFunction(const ValueSet &inputs, const ValueSet &outputs, BasicBlock *header, BasicBlock *newRootNode, BasicBlock *newHeader, Function *oldFunction, Module *M); void moveCodeToFunction(Function *newFunction); void emitCallAndSwitchStatement(Function *newFunction, BasicBlock *newHeader, ValueSet &inputs, ValueSet &outputs); }; } #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/CtorUtils.h
//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines functions that are used to process llvm.global_ctors. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_CTORUTILS_H #define LLVM_TRANSFORMS_UTILS_CTORUTILS_H #include "llvm/ADT/STLExtras.h" namespace llvm { class GlobalVariable; class Function; class Module; /// Call "ShouldRemove" for every entry in M's global_ctor list and remove the /// entries for which it returns true. Return true if anything changed. bool optimizeGlobalCtorsList(Module &M, function_ref<bool(Function *)> ShouldRemove); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
//===- ASanStackFrameLayout.h - ComputeASanStackFrameLayout -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This header defines ComputeASanStackFrameLayout and auxiliary data structs. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H #define LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" namespace llvm { class AllocaInst; // These magic constants should be the same as in // in asan_internal.h from ASan runtime in compiler-rt. static const int kAsanStackLeftRedzoneMagic = 0xf1; static const int kAsanStackMidRedzoneMagic = 0xf2; static const int kAsanStackRightRedzoneMagic = 0xf3; // Input/output data struct for ComputeASanStackFrameLayout. struct ASanStackVariableDescription { const char *Name; // Name of the variable that will be displayed by asan // if a stack-related bug is reported. uint64_t Size; // Size of the variable in bytes. size_t Alignment; // Alignment of the variable (power of 2). AllocaInst *AI; // The actual AllocaInst. size_t Offset; // Offset from the beginning of the frame; // set by ComputeASanStackFrameLayout. }; // Output data struct for ComputeASanStackFrameLayout. struct ASanStackFrameLayout { // Frame description, see DescribeAddressIfStack in ASan runtime. SmallString<64> DescriptionString; // The contents of the shadow memory for the stack frame that we need // to set at function entry. SmallVector<uint8_t, 64> ShadowBytes; size_t FrameAlignment; // Alignment for the entire frame. size_t FrameSize; // Size of the frame in bytes. }; void ComputeASanStackFrameLayout( // The array of stack variables. The elements may get reordered and changed. SmallVectorImpl<ASanStackVariableDescription> &Vars, // AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64. size_t Granularity, // The minimal size of the left-most redzone (header). // At least 4 pointer sizes, power of 2, and >= Granularity. // The resulting FrameSize should be multiple of MinHeaderSize. size_t MinHeaderSize, // The result is put here. ASanStackFrameLayout *Layout); } // llvm namespace #endif // LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
0
repos/DirectXShaderCompiler/include/llvm/Transforms
repos/DirectXShaderCompiler/include/llvm/Transforms/Utils/SymbolRewriter.h
//===-- SymbolRewriter.h - Symbol Rewriting Pass ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides the prototypes and definitions related to the Symbol // Rewriter pass. // // The Symbol Rewriter pass takes a set of rewrite descriptors which define // transformations for symbol names. These can be either single name to name // trnsformation or more broad regular expression based transformations. // // All the functions are re-written at the IR level. The Symbol Rewriter itself // is exposed as a module level pass. All symbols at the module level are // iterated. For any matching symbol, the requested transformation is applied, // updating references to it as well (a la RAUW). The resulting binary will // only contain the rewritten symbols. // // By performing this operation in the compiler, we are able to catch symbols // that would otherwise not be possible to catch (e.g. inlined symbols). // // This makes it possible to cleanly transform symbols without resorting to // overly-complex macro tricks and the pre-processor. An example of where this // is useful is the sanitizers where we would like to intercept a well-defined // set of functions across the module. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H #define LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" #include "llvm/IR/Module.h" namespace llvm { class MemoryBuffer; namespace yaml { class KeyValueNode; class MappingNode; class ScalarNode; class Stream; } namespace SymbolRewriter { /// The basic entity representing a rewrite operation. It serves as the base /// class for any rewrite descriptor. It has a certain set of specializations /// which describe a particular rewrite. /// /// The RewriteMapParser can be used to parse a mapping file that provides the /// mapping for rewriting the symbols. The descriptors individually describe /// whether to rewrite a function, global variable, or global alias. Each of /// these can be selected either by explicitly providing a name for the ones to /// be rewritten or providing a (posix compatible) regular expression that will /// select the symbols to rewrite. This descriptor list is passed to the /// SymbolRewriter pass. class RewriteDescriptor : public ilist_node<RewriteDescriptor> { RewriteDescriptor(const RewriteDescriptor &) = delete; const RewriteDescriptor & operator=(const RewriteDescriptor &) = delete; public: enum class Type { Invalid, /// invalid Function, /// function - descriptor rewrites a function GlobalVariable, /// global variable - descriptor rewrites a global variable NamedAlias, /// named alias - descriptor rewrites a global alias }; virtual ~RewriteDescriptor() {} Type getType() const { return Kind; } virtual bool performOnModule(Module &M) = 0; protected: explicit RewriteDescriptor(Type T) : Kind(T) {} private: const Type Kind; }; typedef iplist<RewriteDescriptor> RewriteDescriptorList; class RewriteMapParser { public: bool parse(const std::string &MapFile, RewriteDescriptorList *Descriptors); private: bool parse(std::unique_ptr<MemoryBuffer> &MapFile, RewriteDescriptorList *DL); bool parseEntry(yaml::Stream &Stream, yaml::KeyValueNode &Entry, RewriteDescriptorList *DL); bool parseRewriteFunctionDescriptor(yaml::Stream &Stream, yaml::ScalarNode *Key, yaml::MappingNode *Value, RewriteDescriptorList *DL); bool parseRewriteGlobalVariableDescriptor(yaml::Stream &Stream, yaml::ScalarNode *Key, yaml::MappingNode *Value, RewriteDescriptorList *DL); bool parseRewriteGlobalAliasDescriptor(yaml::Stream &YS, yaml::ScalarNode *K, yaml::MappingNode *V, RewriteDescriptorList *DL); }; } template <> struct ilist_traits<SymbolRewriter::RewriteDescriptor> : public ilist_default_traits<SymbolRewriter::RewriteDescriptor> { mutable ilist_half_node<SymbolRewriter::RewriteDescriptor> Sentinel; public: // createSentinel is used to get a reference to a node marking the end of // the list. Because the sentinel is relative to this instance, use a // non-static method. // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends SymbolRewriter::RewriteDescriptor * createSentinel() const { // since i[p] lists always publicly derive from the corresponding // traits, placing a data member in this class will augment the // i[p]list. Since the NodeTy is expected to publicly derive from // ilist_node<NodeTy>, there is a legal viable downcast from it to // NodeTy. We use this trick to superpose i[p]list with a "ghostly" // NodeTy, which becomes the sentinel. Dereferencing the sentinel is // forbidden (save the ilist_node<NodeTy>) so no one will ever notice // the superposition. return static_cast<SymbolRewriter::RewriteDescriptor *>(&Sentinel); } void destroySentinel(SymbolRewriter::RewriteDescriptor *) {} SymbolRewriter::RewriteDescriptor *provideInitialHead() const { return createSentinel(); } SymbolRewriter::RewriteDescriptor * ensureHead(SymbolRewriter::RewriteDescriptor *&) const { return createSentinel(); } static void noteHead(SymbolRewriter::RewriteDescriptor *, SymbolRewriter::RewriteDescriptor *) {} }; ModulePass *createRewriteSymbolsPass(); ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &); } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/PredIteratorCache.h
//===- PredIteratorCache.h - pred_iterator Cache ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PredIteratorCache class. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_PREDITERATORCACHE_H #define LLVM_IR_PREDITERATORCACHE_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/CFG.h" #include "llvm/Support/Allocator.h" namespace llvm { /// PredIteratorCache - This class is an extremely trivial cache for /// predecessor iterator queries. This is useful for code that repeatedly /// wants the predecessor list for the same blocks. class PredIteratorCache { /// BlockToPredsMap - Pointer to null-terminated list. DenseMap<BasicBlock *, BasicBlock **> BlockToPredsMap; DenseMap<BasicBlock *, unsigned> BlockToPredCountMap; /// Memory - This is the space that holds cached preds. BumpPtrAllocator Memory; private: /// GetPreds - Get a cached list for the null-terminated predecessor list of /// the specified block. This can be used in a loop like this: /// for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) /// use(*PI); /// instead of: /// for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) BasicBlock **GetPreds(BasicBlock *BB) { BasicBlock **&Entry = BlockToPredsMap[BB]; if (Entry) return Entry; SmallVector<BasicBlock *, 32> PredCache(pred_begin(BB), pred_end(BB)); PredCache.push_back(nullptr); // null terminator. BlockToPredCountMap[BB] = PredCache.size() - 1; Entry = Memory.Allocate<BasicBlock *>(PredCache.size()); std::copy(PredCache.begin(), PredCache.end(), Entry); return Entry; } unsigned GetNumPreds(BasicBlock *BB) { GetPreds(BB); return BlockToPredCountMap[BB]; } public: size_t size(BasicBlock *BB) { return GetNumPreds(BB); } ArrayRef<BasicBlock *> get(BasicBlock *BB) { return makeArrayRef(GetPreds(BB), GetNumPreds(BB)); } /// clear - Remove all information. void clear() { BlockToPredsMap.clear(); BlockToPredCountMap.clear(); Memory.Reset(); } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Instruction.h
//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of the Instruction class, which is the // base class for all of the LLVM instructions. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_INSTRUCTION_H #define LLVM_IR_INSTRUCTION_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/ilist_node.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/SymbolTableListTraits.h" #include "llvm/IR/User.h" #include <memory> // HLSL Change #include <utility> // HLSL Change namespace llvm { class FastMathFlags; class LLVMContext; class MDNode; class BasicBlock; struct AAMDNodes; template <> struct ilist_traits<Instruction> : public SymbolTableListTraits<Instruction, BasicBlock> { /// \brief Return a node that marks the end of a list. /// /// The sentinel is relative to this instance, so we use a non-static /// method. // HLSL Change Starts Instruction *createSentinel() const { return Sentinel.get(); } static void destroySentinel(Instruction *) {} Instruction *provideInitialHead() const { return Sentinel.get(); } Instruction *ensureHead(Instruction *) const { return Sentinel.get(); } static void noteHead(Instruction *, Instruction *) {} public: // Saves and takes ownership of the sentinel. // Must be called before the other accessors above. void setSentinel(std::unique_ptr<Instruction> &&s) { Sentinel = std::move(s); } private: std::unique_ptr<Instruction> Sentinel; // HLSL Change Ends }; class Instruction : public User, public ilist_node<Instruction> { void operator=(const Instruction &) = delete; Instruction(const Instruction &) = delete; BasicBlock *Parent; DebugLoc DbgLoc; // 'dbg' Metadata cache. enum { /// HasMetadataBit - This is a bit stored in the SubClassData field which /// indicates whether this instruction has metadata attached to it or not. HasMetadataBit = 1 << 15 }; public: // Out of line virtual method, so the vtable, etc has a home. ~Instruction() override; /// user_back - Specialize the methods defined in Value, as we know that an /// instruction can only be used by other instructions. Instruction *user_back() { return cast<Instruction>(*user_begin());} const Instruction *user_back() const { return cast<Instruction>(*user_begin());} inline const BasicBlock *getParent() const { return Parent; } inline BasicBlock *getParent() { return Parent; } /// \brief Return the module owning the function this instruction belongs to /// or nullptr it the function does not have a module. /// /// Note: this is undefined behavior if the instruction does not have a /// parent, or the parent basic block does not have a parent function. const Module *getModule() const; Module *getModule(); /// removeFromParent - This method unlinks 'this' from the containing basic /// block, but does not delete it. /// void removeFromParent(); /// eraseFromParent - This method unlinks 'this' from the containing basic /// block and deletes it. /// /// \returns an iterator pointing to the element after the erased one iplist<Instruction>::iterator eraseFromParent(); /// Insert an unlinked instruction into a basic block immediately before /// the specified instruction. void insertBefore(Instruction *InsertPos); /// Insert an unlinked instruction into a basic block immediately after the /// specified instruction. void insertAfter(Instruction *InsertPos); /// moveBefore - Unlink this instruction from its current basic block and /// insert it into the basic block that MovePos lives in, right before /// MovePos. void moveBefore(Instruction *MovePos); //===--------------------------------------------------------------------===// // Subclass classification. //===--------------------------------------------------------------------===// /// getOpcode() returns a member of one of the enums like Instruction::Add. unsigned getOpcode() const { return getValueID() - InstructionVal; } const char *getOpcodeName() const { return getOpcodeName(getOpcode()); } bool isTerminator() const { return isTerminator(getOpcode()); } bool isBinaryOp() const { return isBinaryOp(getOpcode()); } bool isShift() { return isShift(getOpcode()); } bool isCast() const { return isCast(getOpcode()); } static const char* getOpcodeName(unsigned OpCode); static inline bool isTerminator(unsigned OpCode) { return OpCode >= TermOpsBegin && OpCode < TermOpsEnd; } static inline bool isBinaryOp(unsigned Opcode) { return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd; } /// @brief Determine if the Opcode is one of the shift instructions. static inline bool isShift(unsigned Opcode) { return Opcode >= Shl && Opcode <= AShr; } /// isLogicalShift - Return true if this is a logical shift left or a logical /// shift right. inline bool isLogicalShift() const { return getOpcode() == Shl || getOpcode() == LShr; } /// isArithmeticShift - Return true if this is an arithmetic shift right. inline bool isArithmeticShift() const { return getOpcode() == AShr; } /// @brief Determine if the OpCode is one of the CastInst instructions. static inline bool isCast(unsigned OpCode) { return OpCode >= CastOpsBegin && OpCode < CastOpsEnd; } //===--------------------------------------------------------------------===// // Metadata manipulation. //===--------------------------------------------------------------------===// /// hasMetadata() - Return true if this instruction has any metadata attached /// to it. bool hasMetadata() const { return DbgLoc || hasMetadataHashEntry(); } /// hasMetadataOtherThanDebugLoc - Return true if this instruction has /// metadata attached to it other than a debug location. bool hasMetadataOtherThanDebugLoc() const { return hasMetadataHashEntry(); } /// getMetadata - Get the metadata of given kind attached to this Instruction. /// If the metadata is not found then return null. MDNode *getMetadata(unsigned KindID) const { if (!hasMetadata()) return nullptr; return getMetadataImpl(KindID); } /// getMetadata - Get the metadata of given kind attached to this Instruction. /// If the metadata is not found then return null. MDNode *getMetadata(StringRef Kind) const { if (!hasMetadata()) return nullptr; return getMetadataImpl(Kind); } /// getAllMetadata - Get all metadata attached to this Instruction. The first /// element of each pair returned is the KindID, the second element is the /// metadata value. This list is returned sorted by the KindID. void getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const { if (hasMetadata()) getAllMetadataImpl(MDs); } /// getAllMetadataOtherThanDebugLoc - This does the same thing as /// getAllMetadata, except that it filters out the debug location. void getAllMetadataOtherThanDebugLoc( SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const { if (hasMetadataOtherThanDebugLoc()) getAllMetadataOtherThanDebugLocImpl(MDs); } /// getAAMetadata - Fills the AAMDNodes structure with AA metadata from /// this instruction. When Merge is true, the existing AA metadata is /// merged with that from this instruction providing the most-general result. void getAAMetadata(AAMDNodes &N, bool Merge = false) const; /// setMetadata - Set the metadata of the specified kind to the specified /// node. This updates/replaces metadata if already present, or removes it if /// Node is null. void setMetadata(unsigned KindID, MDNode *Node); void setMetadata(StringRef Kind, MDNode *Node); /// \brief Drop unknown metadata. /// Passes are required to drop metadata they don't understand. This is a /// convenience method for passes to do so. void dropUnknownMetadata(ArrayRef<unsigned> KnownIDs); void dropUnknownMetadata() { return dropUnknownMetadata(None); } void dropUnknownMetadata(unsigned ID1) { return dropUnknownMetadata(makeArrayRef(ID1)); } void dropUnknownMetadata(unsigned ID1, unsigned ID2) { unsigned IDs[] = {ID1, ID2}; return dropUnknownMetadata(IDs); } /// setAAMetadata - Sets the metadata on this instruction from the /// AAMDNodes structure. void setAAMetadata(const AAMDNodes &N); /// setDebugLoc - Set the debug location information for this instruction. void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); } /// getDebugLoc - Return the debug location for this node as a DebugLoc. const DebugLoc &getDebugLoc() const { return DbgLoc; } /// Set or clear the unsafe-algebra flag on this instruction, which must be an /// operator which supports this flag. See LangRef.html for the meaning of /// this flag. void setHasUnsafeAlgebra(bool B); /// Set or clear the no-nans flag on this instruction, which must be an /// operator which supports this flag. See LangRef.html for the meaning of /// this flag. void setHasNoNaNs(bool B); /// Set or clear the no-infs flag on this instruction, which must be an /// operator which supports this flag. See LangRef.html for the meaning of /// this flag. void setHasNoInfs(bool B); /// Set or clear the no-signed-zeros flag on this instruction, which must be /// an operator which supports this flag. See LangRef.html for the meaning of /// this flag. void setHasNoSignedZeros(bool B); /// Set or clear the allow-reciprocal flag on this instruction, which must be /// an operator which supports this flag. See LangRef.html for the meaning of /// this flag. void setHasAllowReciprocal(bool B); /// Convenience function for setting multiple fast-math flags on this /// instruction, which must be an operator which supports these flags. See /// LangRef.html for the meaning of these flags. void setFastMathFlags(FastMathFlags FMF); /// Convenience function for transferring all fast-math flag values to this /// instruction, which must be an operator which supports these flags. See /// LangRef.html for the meaning of these flags. void copyFastMathFlags(FastMathFlags FMF); /// Determine whether the unsafe-algebra flag is set. bool hasUnsafeAlgebra() const; /// Determine whether the no-NaNs flag is set. bool hasNoNaNs() const; /// Determine whether the no-infs flag is set. bool hasNoInfs() const; /// Determine whether the no-signed-zeros flag is set. bool hasNoSignedZeros() const; /// Determine whether the allow-reciprocal flag is set. bool hasAllowReciprocal() const; /// Convenience function for getting all the fast-math flags, which must be an /// operator which supports these flags. See LangRef.html for the meaning of /// these flags. FastMathFlags getFastMathFlags() const; /// Copy I's fast-math flags void copyFastMathFlags(const Instruction *I); private: /// hasMetadataHashEntry - Return true if we have an entry in the on-the-side /// metadata hash. bool hasMetadataHashEntry() const { return (getSubclassDataFromValue() & HasMetadataBit) != 0; } // These are all implemented in Metadata.cpp. MDNode *getMetadataImpl(unsigned KindID) const; MDNode *getMetadataImpl(StringRef Kind) const; void getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const; void getAllMetadataOtherThanDebugLocImpl( SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const; void clearMetadataHashEntries(); public: //===--------------------------------------------------------------------===// // Predicates and helper methods. //===--------------------------------------------------------------------===// /// isAssociative - Return true if the instruction is associative: /// /// Associative operators satisfy: x op (y op z) === (x op y) op z /// /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative. /// bool isAssociative() const; static bool isAssociative(unsigned op); /// isCommutative - Return true if the instruction is commutative: /// /// Commutative operators satisfy: (x op y) === (y op x) /// /// In LLVM, these are the associative operators, plus SetEQ and SetNE, when /// applied to any type. /// bool isCommutative() const { return isCommutative(getOpcode()); } static bool isCommutative(unsigned op); /// isIdempotent - Return true if the instruction is idempotent: /// /// Idempotent operators satisfy: x op x === x /// /// In LLVM, the And and Or operators are idempotent. /// bool isIdempotent() const { return isIdempotent(getOpcode()); } static bool isIdempotent(unsigned op); /// isNilpotent - Return true if the instruction is nilpotent: /// /// Nilpotent operators satisfy: x op x === Id, /// /// where Id is the identity for the operator, i.e. a constant such that /// x op Id === x and Id op x === x for all x. /// /// In LLVM, the Xor operator is nilpotent. /// bool isNilpotent() const { return isNilpotent(getOpcode()); } static bool isNilpotent(unsigned op); /// mayWriteToMemory - Return true if this instruction may modify memory. /// bool mayWriteToMemory() const; /// mayReadFromMemory - Return true if this instruction may read memory. /// bool mayReadFromMemory() const; /// mayReadOrWriteMemory - Return true if this instruction may read or /// write memory. /// bool mayReadOrWriteMemory() const { return mayReadFromMemory() || mayWriteToMemory(); } /// isAtomic - Return true if this instruction has an /// AtomicOrdering of unordered or higher. /// bool isAtomic() const; /// mayThrow - Return true if this instruction may throw an exception. /// bool mayThrow() const; /// mayReturn - Return true if this is a function that may return. /// this is true for all normal instructions. The only exception /// is functions that are marked with the 'noreturn' attribute. /// bool mayReturn() const; /// mayHaveSideEffects - Return true if the instruction may have side effects. /// /// Note that this does not consider malloc and alloca to have side /// effects because the newly allocated memory is completely invisible to /// instructions which don't use the returned value. For cases where this /// matters, isSafeToSpeculativelyExecute may be more appropriate. bool mayHaveSideEffects() const { return mayWriteToMemory() || mayThrow() || !mayReturn(); } /// clone() - Create a copy of 'this' instruction that is identical in all /// ways except the following: /// * The instruction has no parent /// * The instruction has no name /// Instruction *clone() const; /// isIdenticalTo - Return true if the specified instruction is exactly /// identical to the current one. This means that all operands match and any /// extra information (e.g. load is volatile) agree. bool isIdenticalTo(const Instruction *I) const; /// isIdenticalToWhenDefined - This is like isIdenticalTo, except that it /// ignores the SubclassOptionalData flags, which specify conditions /// under which the instruction's result is undefined. bool isIdenticalToWhenDefined(const Instruction *I) const; /// When checking for operation equivalence (using isSameOperationAs) it is /// sometimes useful to ignore certain attributes. enum OperationEquivalenceFlags { /// Check for equivalence ignoring load/store alignment. CompareIgnoringAlignment = 1<<0, /// Check for equivalence treating a type and a vector of that type /// as equivalent. CompareUsingScalarTypes = 1<<1 }; /// This function determines if the specified instruction executes the same /// operation as the current one. This means that the opcodes, type, operand /// types and any other factors affecting the operation must be the same. This /// is similar to isIdenticalTo except the operands themselves don't have to /// be identical. /// @returns true if the specified instruction is the same operation as /// the current one. /// @brief Determine if one instruction is the same operation as another. bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const; /// isUsedOutsideOfBlock - Return true if there are any uses of this /// instruction in blocks other than the specified block. Note that PHI nodes /// are considered to evaluate their operands in the corresponding predecessor /// block. bool isUsedOutsideOfBlock(const BasicBlock *BB) const; /// Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Value *V) { return V->getValueID() >= Value::InstructionVal; } //---------------------------------------------------------------------- // Exported enumerations. // enum TermOps { // These terminate basic blocks #define FIRST_TERM_INST(N) TermOpsBegin = N, #define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N, #define LAST_TERM_INST(N) TermOpsEnd = N+1 #include "llvm/IR/Instruction.def" }; enum BinaryOps { #define FIRST_BINARY_INST(N) BinaryOpsBegin = N, #define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N, #define LAST_BINARY_INST(N) BinaryOpsEnd = N+1 #include "llvm/IR/Instruction.def" }; enum MemoryOps { #define FIRST_MEMORY_INST(N) MemoryOpsBegin = N, #define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N, #define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1 #include "llvm/IR/Instruction.def" }; enum CastOps { #define FIRST_CAST_INST(N) CastOpsBegin = N, #define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N, #define LAST_CAST_INST(N) CastOpsEnd = N+1 #include "llvm/IR/Instruction.def" }; enum OtherOps { #define FIRST_OTHER_INST(N) OtherOpsBegin = N, #define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N, #define LAST_OTHER_INST(N) OtherOpsEnd = N+1 #include "llvm/IR/Instruction.def" }; private: // Shadow Value::setValueSubclassData with a private forwarding method so that // subclasses cannot accidentally use it. void setValueSubclassData(unsigned short D) { Value::setValueSubclassData(D); } unsigned short getSubclassDataFromValue() const { return Value::getSubclassDataFromValue(); } void setHasMetadataHashEntry(bool V) { setValueSubclassData((getSubclassDataFromValue() & ~HasMetadataBit) | (V ? HasMetadataBit : 0)); } friend class SymbolTableListTraits<Instruction, BasicBlock>; void setParent(BasicBlock *P); protected: // Instruction subclasses can stick up to 15 bits of stuff into the // SubclassData field of instruction with these members. // Verify that only the low 15 bits are used. void setInstructionSubclassData(unsigned short D) { assert((D & HasMetadataBit) == 0 && "Out of range value put into field"); setValueSubclassData((getSubclassDataFromValue() & HasMetadataBit) | D); } unsigned getSubclassDataFromInstruction() const { return getSubclassDataFromValue() & ~HasMetadataBit; } Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, Instruction *InsertBefore = nullptr); Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd); private: /// Create a copy of this instruction. Instruction *cloneImpl() const; }; // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #if 0 inline Instruction * ilist_traits<Instruction>::createSentinel() const { // Since i(p)lists always publicly derive from their corresponding traits, // placing a data member in this class will augment the i(p)list. But since // the NodeTy is expected to be publicly derive from ilist_node<NodeTy>, // there is a legal viable downcast from it to NodeTy. We use this trick to // superimpose an i(p)list with a "ghostly" NodeTy, which becomes the // sentinel. Dereferencing the sentinel is forbidden (save the // ilist_node<NodeTy>), so no one will ever notice the superposition. return static_cast<Instruction *>(&Sentinel); } #endif // HLSL Change Ends // Instruction* is only 4-byte aligned. template<> class PointerLikeTypeTraits<Instruction*> { typedef Instruction* PT; public: static inline void *getAsVoidPointer(PT P) { return P; } static inline PT getFromVoidPointer(void *P) { return static_cast<PT>(P); } enum { NumLowBitsAvailable = 2 }; }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/UseListOrder.h
//===- llvm/IR/UseListOrder.h - LLVM Use List Order -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file has structures and command-line options for preserving use-list // order. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_USELISTORDER_H #define LLVM_IR_USELISTORDER_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include <vector> namespace llvm { class Module; class Function; class Value; /// \brief Structure to hold a use-list order. struct UseListOrder { const Value *V; const Function *F; std::vector<unsigned> Shuffle; UseListOrder(const Value *V, const Function *F, size_t ShuffleSize) : V(V), F(F), Shuffle(ShuffleSize) {} UseListOrder() : V(0), F(0) {} UseListOrder(UseListOrder &&X) : V(X.V), F(X.F), Shuffle(std::move(X.Shuffle)) {} UseListOrder &operator=(UseListOrder &&X) { V = X.V; F = X.F; Shuffle = std::move(X.Shuffle); return *this; } private: UseListOrder(const UseListOrder &X) = delete; UseListOrder &operator=(const UseListOrder &X) = delete; }; typedef std::vector<UseListOrder> UseListOrderStack; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/LegacyPassNameParser.h
//===- LegacyPassNameParser.h -----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the PassNameParser and FilteredPassNameParser<> classes, // which are used to add command line arguments to a utility for all of the // passes that have been registered into the system. // // The PassNameParser class adds ALL passes linked into the system (that are // creatable) as command line arguments to the tool (when instantiated with the // appropriate command line option template). The FilteredPassNameParser<> // template is used for the same purposes as PassNameParser, except that it only // includes passes that have a PassType that are compatible with the filter // (which is the template argument). // // Note that this is part of the legacy pass manager infrastructure and will be // (eventually) going away. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_LEGACYPASSNAMEPARSER_H #define LLVM_IR_LEGACYPASSNAMEPARSER_H #include "llvm/ADT/STLExtras.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <cstring> namespace llvm { //===----------------------------------------------------------------------===// // PassNameParser class - Make use of the pass registration mechanism to // automatically add a command line argument to opt for each pass. // class PassNameParser : public PassRegistrationListener, public cl::parser<const PassInfo*> { public: PassNameParser(cl::Option &O); ~PassNameParser() override; void initialize() { cl::parser<const PassInfo*>::initialize(); // Add all of the passes to the map that got initialized before 'this' did. enumeratePasses(); } // ignorablePassImpl - Can be overriden in subclasses to refine the list of // which passes we want to include. // virtual bool ignorablePassImpl(const PassInfo *P) const { return false; } inline bool ignorablePass(const PassInfo *P) const { // Ignore non-selectable and non-constructible passes! Ignore // non-optimizations. return P->getPassArgument() == nullptr || *P->getPassArgument() == 0 || P->getNormalCtor() == nullptr || ignorablePassImpl(P); } // Implement the PassRegistrationListener callbacks used to populate our map // void passRegistered(const PassInfo *P) override { if (ignorablePass(P)) return; if (findOption(P->getPassArgument()) != getNumOptions()) { errs() << "Two passes with the same argument (-" << P->getPassArgument() << ") attempted to be registered!\n"; llvm_unreachable(nullptr); } addLiteralOption(P->getPassArgument(), P, P->getPassName().data()); } void passEnumerate(const PassInfo *P) override { passRegistered(P); } // printOptionInfo - Print out information about this option. Override the // default implementation to sort the table before we print... void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const override { PassNameParser *PNP = const_cast<PassNameParser*>(this); array_pod_sort(PNP->Values.begin(), PNP->Values.end(), ValLessThan); cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth); } private: // ValLessThan - Provide a sorting comparator for Values elements... // // HLSL Change: changed calling convention to __cdecl static int __cdecl ValLessThan(const PassNameParser::OptionInfo *VT1, const PassNameParser::OptionInfo *VT2) { return std::strcmp(VT1->Name, VT2->Name); } }; ///===----------------------------------------------------------------------===// /// FilteredPassNameParser class - Make use of the pass registration /// mechanism to automatically add a command line argument to opt for /// each pass that satisfies a filter criteria. Filter should return /// true for passes to be registered as command-line options. /// template<typename Filter> class FilteredPassNameParser : public PassNameParser { private: Filter filter; public: bool ignorablePassImpl(const PassInfo *P) const override { return !filter(*P); } }; // // /////////////////////////////////////////////////////////////////////////////// /// PassArgFilter - A filter for use with PassNameFilterParser that only /// accepts a Pass whose Arg matches certain strings. /// /// Use like this: /// /// extern const char AllowedPassArgs[] = "-anders_aa -dse"; /// /// static cl::list< /// const PassInfo*, /// bool, /// FilteredPassNameParser<PassArgFilter<AllowedPassArgs> > > /// PassList(cl::desc("Passes available:")); /// /// Only the -anders_aa and -dse options will be available to the user. /// template<const char *Args> class PassArgFilter { public: bool operator()(const PassInfo &P) const { return(std::strstr(Args, P.getPassArgument())); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/GlobalObject.h
//===-- llvm/GlobalObject.h - Class to represent global objects -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This represents an independent object. That is, a function or a global // variable, but not an alias. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_GLOBALOBJECT_H #define LLVM_IR_GLOBALOBJECT_H #include "llvm/IR/Constant.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalValue.h" namespace llvm { class Comdat; class Module; class GlobalObject : public GlobalValue { GlobalObject(const GlobalObject &) = delete; protected: GlobalObject(PointerType *Ty, ValueTy VTy, Use *Ops, unsigned NumOps, LinkageTypes Linkage, const Twine &Name) : GlobalValue(Ty, VTy, Ops, NumOps, Linkage, Name), ObjComdat(nullptr) { setGlobalValueSubClassData(0); } std::string Section; // Section to emit this into, empty means default Comdat *ObjComdat; static const unsigned AlignmentBits = 5; static const unsigned GlobalObjectSubClassDataBits = GlobalValueSubClassDataBits - AlignmentBits; private: static const unsigned AlignmentMask = (1 << AlignmentBits) - 1; public: unsigned getAlignment() const { unsigned Data = getGlobalValueSubClassData(); unsigned AlignmentData = Data & AlignmentMask; return (1u << AlignmentData) >> 1; } void setAlignment(unsigned Align); unsigned getGlobalObjectSubClassData() const; void setGlobalObjectSubClassData(unsigned Val); bool hasSection() const { return !StringRef(getSection()).empty(); } const char *getSection() const { return Section.c_str(); } void setSection(StringRef S); bool hasComdat() const { return getComdat() != nullptr; } const Comdat *getComdat() const { return ObjComdat; } Comdat *getComdat() { return ObjComdat; } void setComdat(Comdat *C) { ObjComdat = C; } void copyAttributesFrom(const GlobalValue *Src) override; // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Value *V) { return V->getValueID() == Value::FunctionVal || V->getValueID() == Value::GlobalVariableVal; } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/TrackingMDRef.h
//===- llvm/IR/TrackingMDRef.h - Tracking Metadata references ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // References to metadata that track RAUW. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_TRACKINGMDREF_H #define LLVM_IR_TRACKINGMDREF_H #include "llvm/IR/MetadataTracking.h" #include "llvm/Support/Casting.h" namespace llvm { class Metadata; class MDNode; class ValueAsMetadata; /// \brief Tracking metadata reference. /// /// This class behaves like \a TrackingVH, but for metadata. class TrackingMDRef { Metadata *MD; public: TrackingMDRef() : MD(nullptr) {} explicit TrackingMDRef(Metadata *MD) : MD(MD) { track(); } TrackingMDRef(TrackingMDRef &&X) : MD(X.MD) { retrack(X); } TrackingMDRef(const TrackingMDRef &X) : MD(X.MD) { track(); } TrackingMDRef &operator=(TrackingMDRef &&X) { if (&X == this) return *this; untrack(); MD = X.MD; retrack(X); return *this; } TrackingMDRef &operator=(const TrackingMDRef &X) { if (&X == this) return *this; untrack(); MD = X.MD; track(); return *this; } ~TrackingMDRef() { untrack(); } Metadata *get() const { return MD; } operator Metadata *() const { return get(); } Metadata *operator->() const { return get(); } Metadata &operator*() const { return *get(); } void reset() { untrack(); MD = nullptr; } void reset(Metadata *MD) { untrack(); this->MD = MD; track(); } /// \brief Check whether this has a trivial destructor. /// /// If \c MD isn't replaceable, the destructor will be a no-op. bool hasTrivialDestructor() const { return !MD || !MetadataTracking::isReplaceable(*MD); } bool operator==(const TrackingMDRef &X) const { return MD == X.MD; } bool operator!=(const TrackingMDRef &X) const { return MD != X.MD; } private: void track() { if (MD) MetadataTracking::track(MD); } void untrack() { if (MD) MetadataTracking::untrack(MD); } void retrack(TrackingMDRef &X) { assert(MD == X.MD && "Expected values to match"); if (X.MD) { MetadataTracking::retrack(X.MD, MD); X.MD = nullptr; } } }; /// \brief Typed tracking ref. /// /// Track refererences of a particular type. It's useful to use this for \a /// MDNode and \a ValueAsMetadata. template <class T> class TypedTrackingMDRef { TrackingMDRef Ref; public: TypedTrackingMDRef() {} explicit TypedTrackingMDRef(T *MD) : Ref(static_cast<Metadata *>(MD)) {} TypedTrackingMDRef(TypedTrackingMDRef &&X) : Ref(std::move(X.Ref)) {} TypedTrackingMDRef(const TypedTrackingMDRef &X) : Ref(X.Ref) {} TypedTrackingMDRef &operator=(TypedTrackingMDRef &&X) { Ref = std::move(X.Ref); return *this; } TypedTrackingMDRef &operator=(const TypedTrackingMDRef &X) { Ref = X.Ref; return *this; } T *get() const { return (T *)Ref.get(); } operator T *() const { return get(); } T *operator->() const { return get(); } T &operator*() const { return *get(); } bool operator==(const TypedTrackingMDRef &X) const { return Ref == X.Ref; } bool operator!=(const TypedTrackingMDRef &X) const { return Ref != X.Ref; } void reset() { Ref.reset(); } void reset(T *MD) { Ref.reset(static_cast<Metadata *>(MD)); } /// \brief Check whether this has a trivial destructor. bool hasTrivialDestructor() const { return Ref.hasTrivialDestructor(); } }; typedef TypedTrackingMDRef<MDNode> TrackingMDNodeRef; typedef TypedTrackingMDRef<ValueAsMetadata> TrackingValueAsMetadataRef; // Expose the underlying metadata to casting. template <> struct simplify_type<TrackingMDRef> { typedef Metadata *SimpleType; static SimpleType getSimplifiedValue(TrackingMDRef &MD) { return MD.get(); } }; template <> struct simplify_type<const TrackingMDRef> { typedef Metadata *SimpleType; static SimpleType getSimplifiedValue(const TrackingMDRef &MD) { return MD.get(); } }; template <class T> struct simplify_type<TypedTrackingMDRef<T>> { typedef T *SimpleType; static SimpleType getSimplifiedValue(TypedTrackingMDRef<T> &MD) { return MD.get(); } }; template <class T> struct simplify_type<const TypedTrackingMDRef<T>> { typedef T *SimpleType; static SimpleType getSimplifiedValue(const TypedTrackingMDRef<T> &MD) { return MD.get(); } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DataLayout.h
//===--------- llvm/DataLayout.h - Data size & alignment info ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines layout properties related to datatype size/offset/alignment // information. It uses lazy annotations to cache information about how // structure types are laid out and used. // // This structure should be created once, filled in if the defaults are not // correct and then passed around by const&. None of the members functions // require modification to the object. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DATALAYOUT_H #define LLVM_IR_DATALAYOUT_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Type.h" #include "llvm/Pass.h" #include "llvm/Support/DataTypes.h" // This needs to be outside of the namespace, to avoid conflict with llvm-c // decl. typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef; namespace llvm { class Value; class Type; class IntegerType; class StructType; class StructLayout; class Triple; class GlobalVariable; class LLVMContext; template<typename T> class ArrayRef; /// Enum used to categorize the alignment types stored by LayoutAlignElem enum AlignTypeEnum { INVALID_ALIGN = 0, INTEGER_ALIGN = 'i', VECTOR_ALIGN = 'v', FLOAT_ALIGN = 'f', AGGREGATE_ALIGN = 'a' }; // FIXME: Currently the DataLayout string carries a "preferred alignment" // for types. As the DataLayout is module/global, this should likely be // sunk down to an FTTI element that is queried rather than a global // preference. /// \brief Layout alignment element. /// /// Stores the alignment data associated with a given alignment type (integer, /// vector, float) and type bit width. /// /// \note The unusual order of elements in the structure attempts to reduce /// padding and make the structure slightly more cache friendly. struct LayoutAlignElem { /// \brief Alignment type from \c AlignTypeEnum unsigned AlignType : 8; unsigned TypeBitWidth : 24; unsigned ABIAlign : 16; unsigned PrefAlign : 16; static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align, unsigned pref_align, uint32_t bit_width); bool operator==(const LayoutAlignElem &rhs) const; }; /// \brief Layout pointer alignment element. /// /// Stores the alignment data associated with a given pointer and address space. /// /// \note The unusual order of elements in the structure attempts to reduce /// padding and make the structure slightly more cache friendly. struct PointerAlignElem { unsigned ABIAlign; unsigned PrefAlign; uint32_t TypeByteWidth; uint32_t AddressSpace; /// Initializer static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign, unsigned PrefAlign, uint32_t TypeByteWidth); bool operator==(const PointerAlignElem &rhs) const; }; /// \brief A parsed version of the target data layout string in and methods for /// querying it. /// /// The target data layout string is specified *by the target* - a frontend /// generating LLVM IR is required to generate the right target data for the /// target being codegen'd to. class DataLayout { private: /// Defaults to false. bool BigEndian; unsigned StackNaturalAlign; enum ManglingModeT { MM_None, MM_ELF, MM_MachO, MM_WinCOFF, MM_WinCOFFX86, MM_Mips }; ManglingModeT ManglingMode; SmallVector<unsigned char, 8> LegalIntWidths; /// \brief Primitive type alignment data. SmallVector<LayoutAlignElem, 16> Alignments; /// \brief The string representation used to create this DataLayout std::string StringRepresentation; typedef SmallVector<PointerAlignElem, 8> PointersTy; PointersTy Pointers; PointersTy::const_iterator findPointerLowerBound(uint32_t AddressSpace) const { return const_cast<DataLayout *>(this)->findPointerLowerBound(AddressSpace); } PointersTy::iterator findPointerLowerBound(uint32_t AddressSpace); /// This member is a signal that a requested alignment type and bit width were /// not found in the SmallVector. static const LayoutAlignElem InvalidAlignmentElem; /// This member is a signal that a requested pointer type and bit width were /// not found in the DenseSet. static const PointerAlignElem InvalidPointerElem; // The StructType -> StructLayout map. mutable void *LayoutMap; void setAlignment(AlignTypeEnum align_type, unsigned abi_align, unsigned pref_align, uint32_t bit_width); unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, bool ABIAlign, Type *Ty) const; void setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign, unsigned PrefAlign, uint32_t TypeByteWidth); /// Internal helper method that returns requested alignment for type. unsigned getAlignment(Type *Ty, bool abi_or_pref) const; /// \brief Valid alignment predicate. /// /// Predicate that tests a LayoutAlignElem reference returned by get() against /// InvalidAlignmentElem. bool validAlignment(const LayoutAlignElem &align) const { return &align != &InvalidAlignmentElem; } /// \brief Valid pointer predicate. /// /// Predicate that tests a PointerAlignElem reference returned by get() /// against \c InvalidPointerElem. bool validPointer(const PointerAlignElem &align) const { return &align != &InvalidPointerElem; } /// Parses a target data specification string. Assert if the string is /// malformed. void parseSpecifier(StringRef LayoutDescription); // Free all internal data structures. void clear(); public: /// Constructs a DataLayout from a specification string. See reset(). explicit DataLayout(StringRef LayoutDescription) : LayoutMap(nullptr) { reset(LayoutDescription); } /// Initialize target data from properties stored in the module. explicit DataLayout(const Module *M); void init(const Module *M); DataLayout(const DataLayout &DL) : LayoutMap(nullptr) { *this = DL; } DataLayout &operator=(const DataLayout &DL) { clear(); StringRepresentation = DL.StringRepresentation; BigEndian = DL.isBigEndian(); StackNaturalAlign = DL.StackNaturalAlign; ManglingMode = DL.ManglingMode; LegalIntWidths = DL.LegalIntWidths; Alignments = DL.Alignments; Pointers = DL.Pointers; return *this; } bool operator==(const DataLayout &Other) const; bool operator!=(const DataLayout &Other) const { return !(*this == Other); } ~DataLayout(); // Not virtual, do not subclass this class /// Parse a data layout string (with fallback to default values). void reset(StringRef LayoutDescription); /// Layout endianness... bool isLittleEndian() const { return !BigEndian; } bool isBigEndian() const { return BigEndian; } /// \brief Returns the string representation of the DataLayout. /// /// This representation is in the same format accepted by the string /// constructor above. This should not be used to compare two DataLayout as /// different string can represent the same layout. const std::string &getStringRepresentation() const { return StringRepresentation; } /// \brief Test if the DataLayout was constructed from an empty string. bool isDefault() const { return StringRepresentation.empty(); } /// \brief Returns true if the specified type is known to be a native integer /// type supported by the CPU. /// /// For example, i64 is not native on most 32-bit CPUs and i37 is not native /// on any known one. This returns false if the integer width is not legal. /// /// The width is specified in bits. bool isLegalInteger(unsigned Width) const { for (unsigned LegalIntWidth : LegalIntWidths) if (LegalIntWidth == Width) return true; return false; } bool isIllegalInteger(unsigned Width) const { return !isLegalInteger(Width); } /// Returns true if the given alignment exceeds the natural stack alignment. bool exceedsNaturalStackAlignment(unsigned Align) const { return (StackNaturalAlign != 0) && (Align > StackNaturalAlign); } unsigned getStackAlignment() const { return StackNaturalAlign; } bool hasMicrosoftFastStdCallMangling() const { return ManglingMode == MM_WinCOFFX86; } bool hasLinkerPrivateGlobalPrefix() const { return ManglingMode == MM_MachO; } const char *getLinkerPrivateGlobalPrefix() const { if (ManglingMode == MM_MachO) return "l"; return ""; } char getGlobalPrefix() const { switch (ManglingMode) { case MM_None: case MM_ELF: case MM_Mips: case MM_WinCOFF: return '\0'; case MM_MachO: case MM_WinCOFFX86: return '_'; } llvm_unreachable("invalid mangling mode"); } const char *getPrivateGlobalPrefix() const { switch (ManglingMode) { case MM_None: return ""; case MM_ELF: return ".L"; case MM_Mips: return "$"; case MM_MachO: case MM_WinCOFF: case MM_WinCOFFX86: return "L"; } llvm_unreachable("invalid mangling mode"); } static const char *getManglingComponent(const Triple &T); /// \brief Returns true if the specified type fits in a native integer type /// supported by the CPU. /// /// For example, if the CPU only supports i32 as a native integer type, then /// i27 fits in a legal integer type but i45 does not. bool fitsInLegalInteger(unsigned Width) const { for (unsigned LegalIntWidth : LegalIntWidths) if (Width <= LegalIntWidth) return true; return false; } /// Layout pointer alignment /// FIXME: The defaults need to be removed once all of /// the backends/clients are updated. unsigned getPointerABIAlignment(unsigned AS = 0) const; /// Return target's alignment for stack-based pointers /// FIXME: The defaults need to be removed once all of /// the backends/clients are updated. unsigned getPointerPrefAlignment(unsigned AS = 0) const; /// Layout pointer size /// FIXME: The defaults need to be removed once all of /// the backends/clients are updated. unsigned getPointerSize(unsigned AS = 0) const; /// Layout pointer size, in bits /// FIXME: The defaults need to be removed once all of /// the backends/clients are updated. unsigned getPointerSizeInBits(unsigned AS = 0) const { return getPointerSize(AS) * 8; } /// Layout pointer size, in bits, based on the type. If this function is /// called with a pointer type, then the type size of the pointer is returned. /// If this function is called with a vector of pointers, then the type size /// of the pointer is returned. This should only be called with a pointer or /// vector of pointers. unsigned getPointerTypeSizeInBits(Type *) const; unsigned getPointerTypeSize(Type *Ty) const { return getPointerTypeSizeInBits(Ty) / 8; } /// Size examples: /// /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*] /// ---- ---------- --------------- --------------- /// i1 1 8 8 /// i8 8 8 8 /// i19 19 24 32 /// i32 32 32 32 /// i100 100 104 128 /// i128 128 128 128 /// Float 32 32 32 /// Double 64 64 64 /// X86_FP80 80 80 96 /// /// [*] The alloc size depends on the alignment, and thus on the target. /// These values are for x86-32 linux. /// \brief Returns the number of bits necessary to hold the specified type. /// /// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must /// have a size (Type::isSized() must return true). uint64_t getTypeSizeInBits(Type *Ty) const; /// \brief Returns the maximum number of bytes that may be overwritten by /// storing the specified type. /// /// For example, returns 5 for i36 and 10 for x86_fp80. uint64_t getTypeStoreSize(Type *Ty) const { return (getTypeSizeInBits(Ty) + 7) / 8; } /// \brief Returns the maximum number of bits that may be overwritten by /// storing the specified type; always a multiple of 8. /// /// For example, returns 40 for i36 and 80 for x86_fp80. uint64_t getTypeStoreSizeInBits(Type *Ty) const { return 8 * getTypeStoreSize(Ty); } /// \brief Returns the offset in bytes between successive objects of the /// specified type, including alignment padding. /// /// This is the amount that alloca reserves for this type. For example, /// returns 12 or 16 for x86_fp80, depending on alignment. uint64_t getTypeAllocSize(Type *Ty) const { // Round up to the next alignment boundary. return RoundUpToAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); } /// \brief Returns the offset in bits between successive objects of the /// specified type, including alignment padding; always a multiple of 8. /// /// This is the amount that alloca reserves for this type. For example, /// returns 96 or 128 for x86_fp80, depending on alignment. uint64_t getTypeAllocSizeInBits(Type *Ty) const { return 8 * getTypeAllocSize(Ty); } /// \brief Returns the minimum ABI-required alignment for the specified type. unsigned getABITypeAlignment(Type *Ty) const; /// \brief Returns the minimum ABI-required alignment for an integer type of /// the specified bitwidth. unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const; /// \brief Returns the preferred stack/global alignment for the specified /// type. /// /// This is always at least as good as the ABI alignment. unsigned getPrefTypeAlignment(Type *Ty) const; /// \brief Returns the preferred alignment for the specified type, returned as /// log2 of the value (a shift amount). unsigned getPreferredTypeAlignmentShift(Type *Ty) const; /// \brief Returns an integer type with size at least as big as that of a /// pointer in the given address space. IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const; /// \brief Returns an integer (vector of integer) type with size at least as /// big as that of a pointer of the given pointer (vector of pointer) type. Type *getIntPtrType(Type *) const; /// \brief Returns the smallest integer type with size at least as big as /// Width bits. Type *getSmallestLegalIntType(LLVMContext &C, unsigned Width = 0) const; /// \brief Returns the largest legal integer type, or null if none are set. Type *getLargestLegalIntType(LLVMContext &C) const { unsigned LargestSize = getLargestLegalIntTypeSize(); return (LargestSize == 0) ? nullptr : Type::getIntNTy(C, LargestSize); } /// \brief Returns the size of largest legal integer type size, or 0 if none /// are set. unsigned getLargestLegalIntTypeSize() const; /// \brief Returns the offset from the beginning of the type for the specified /// indices. /// /// This is used to implement getelementptr. uint64_t getIndexedOffset(Type *Ty, ArrayRef<Value *> Indices) const; /// \brief Returns a StructLayout object, indicating the alignment of the /// struct, its size, and the offsets of its fields. /// /// Note that this information is lazily cached. const StructLayout *getStructLayout(StructType *Ty) const; /// \brief Returns the preferred alignment of the specified global. /// /// This includes an explicitly requested alignment (if the global has one). unsigned getPreferredAlignment(const GlobalVariable *GV) const; /// \brief Returns the preferred alignment of the specified global, returned /// in log form. /// /// This includes an explicitly requested alignment (if the global has one). unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const; }; inline DataLayout *unwrap(LLVMTargetDataRef P) { return reinterpret_cast<DataLayout *>(P); } inline LLVMTargetDataRef wrap(const DataLayout *P) { return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P)); } /// Used to lazily calculate structure layout information for a target machine, /// based on the DataLayout structure. class StructLayout { uint64_t StructSize; unsigned StructAlignment; unsigned NumElements; uint64_t MemberOffsets[1]; // variable sized array! public: uint64_t getSizeInBytes() const { return StructSize; } uint64_t getSizeInBits() const { return 8 * StructSize; } unsigned getAlignment() const { return StructAlignment; } /// \brief Given a valid byte offset into the structure, returns the structure /// index that contains it. unsigned getElementContainingOffset(uint64_t Offset) const; uint64_t getElementOffset(unsigned Idx) const { assert(Idx < NumElements && "Invalid element idx!"); return MemberOffsets[Idx]; } uint64_t getElementOffsetInBits(unsigned Idx) const { return getElementOffset(Idx) * 8; } private: friend class DataLayout; // Only DataLayout can create this class StructLayout(StructType *ST, const DataLayout &DL); }; // The implementation of this method is provided inline as it is particularly // well suited to constant folding when called on a specific Type subclass. inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const { assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); switch (Ty->getTypeID()) { case Type::LabelTyID: return getPointerSizeInBits(0); case Type::PointerTyID: return getPointerSizeInBits(Ty->getPointerAddressSpace()); case Type::ArrayTyID: { ArrayType *ATy = cast<ArrayType>(Ty); return ATy->getNumElements() * getTypeAllocSizeInBits(ATy->getElementType()); } case Type::StructTyID: // Get the layout annotation... which is lazily created on demand. return getStructLayout(cast<StructType>(Ty))->getSizeInBits(); case Type::IntegerTyID: return Ty->getIntegerBitWidth(); case Type::HalfTyID: return 16; case Type::FloatTyID: return 32; case Type::DoubleTyID: case Type::X86_MMXTyID: return 64; case Type::PPC_FP128TyID: case Type::FP128TyID: return 128; // In memory objects this is always aligned to a higher boundary, but // only 80 bits contain information. case Type::X86_FP80TyID: return 80; case Type::VectorTyID: { VectorType *VTy = cast<VectorType>(Ty); // HLSL Change Begins. // HLSL vector use aligned size. return VTy->getNumElements() * getTypeAllocSizeInBits(VTy->getElementType()); // HLSL Change Ends. } default: llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type"); } } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/BasicBlock.h
//===-- llvm/BasicBlock.h - Represent a basic block in the VM ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of the BasicBlock class. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_BASICBLOCK_H #define LLVM_IR_BASICBLOCK_H #include "llvm/ADT/Twine.h" #include "llvm/ADT/ilist.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/SymbolTableListTraits.h" #include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/DataTypes.h" namespace llvm { class CallInst; class LandingPadInst; class TerminatorInst; class LLVMContext; class BlockAddress; class Function; // Traits for intrusive list of basic blocks... template<> struct ilist_traits<BasicBlock> : public SymbolTableListTraits<BasicBlock, Function> { BasicBlock *createSentinel() const; static void destroySentinel(BasicBlock*) {} BasicBlock *provideInitialHead() const { return createSentinel(); } BasicBlock *ensureHead(BasicBlock*) const { return createSentinel(); } static void noteHead(BasicBlock*, BasicBlock*) {} static ValueSymbolTable *getSymTab(Function *ItemParent); private: mutable ilist_half_node<BasicBlock> Sentinel; }; /// \brief LLVM Basic Block Representation /// /// This represents a single basic block in LLVM. A basic block is simply a /// container of instructions that execute sequentially. Basic blocks are Values /// because they are referenced by instructions such as branches and switch /// tables. The type of a BasicBlock is "Type::LabelTy" because the basic block /// represents a label to which a branch can jump. /// /// A well formed basic block is formed of a list of non-terminating /// instructions followed by a single TerminatorInst instruction. /// TerminatorInst's may not occur in the middle of basic blocks, and must /// terminate the blocks. The BasicBlock class allows malformed basic blocks to /// occur because it may be useful in the intermediate stage of constructing or /// modifying a program. However, the verifier will ensure that basic blocks /// are "well formed". class BasicBlock : public Value, // Basic blocks are data objects also public ilist_node<BasicBlock> { friend class BlockAddress; public: typedef iplist<Instruction> InstListType; private: InstListType InstList; Function *Parent; void setParent(Function *parent); friend class SymbolTableListTraits<BasicBlock, Function>; BasicBlock(const BasicBlock &) = delete; void operator=(const BasicBlock &) = delete; /// \brief Constructor. /// /// If the function parameter is specified, the basic block is automatically /// inserted at either the end of the function (if InsertBefore is null), or /// before the specified basic block. explicit BasicBlock(LLVMContext &C, const Twine &Name = "", Function *Parent = nullptr, BasicBlock *InsertBefore = nullptr); public: /// \brief Get the context in which this basic block lives. LLVMContext &getContext() const; /// Instruction iterators... typedef InstListType::iterator iterator; typedef InstListType::const_iterator const_iterator; typedef InstListType::reverse_iterator reverse_iterator; typedef InstListType::const_reverse_iterator const_reverse_iterator; /// \brief Creates a new BasicBlock. /// /// If the Parent parameter is specified, the basic block is automatically /// inserted at either the end of the function (if InsertBefore is 0), or /// before the specified basic block. static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "", Function *Parent = nullptr, BasicBlock *InsertBefore = nullptr) { return new BasicBlock(Context, Name, Parent, InsertBefore); } ~BasicBlock() override; /// \brief Return the enclosing method, or null if none. const Function *getParent() const { return Parent; } Function *getParent() { return Parent; } /// \brief Return the module owning the function this basic block belongs to, /// or nullptr it the function does not have a module. /// /// Note: this is undefined behavior if the block does not have a parent. const Module *getModule() const; Module *getModule(); /// \brief Returns the terminator instruction if the block is well formed or /// null if the block is not well formed. TerminatorInst *getTerminator(); const TerminatorInst *getTerminator() const; /// \brief Returns the call instruction marked 'musttail' prior to the /// terminating return instruction of this basic block, if such a call is /// present. Otherwise, returns null. CallInst *getTerminatingMustTailCall(); const CallInst *getTerminatingMustTailCall() const { return const_cast<BasicBlock *>(this)->getTerminatingMustTailCall(); } /// \brief Returns a pointer to the first instruction in this block that is /// not a PHINode instruction. /// /// When adding instructions to the beginning of the basic block, they should /// be added before the returned value, not before the first instruction, /// which might be PHI. Returns 0 is there's no non-PHI instruction. Instruction* getFirstNonPHI(); const Instruction* getFirstNonPHI() const { return const_cast<BasicBlock*>(this)->getFirstNonPHI(); } /// \brief Returns a pointer to the first instruction in this block that is not /// a PHINode or a debug intrinsic. Instruction* getFirstNonPHIOrDbg(); const Instruction* getFirstNonPHIOrDbg() const { return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbg(); } /// \brief Returns a pointer to the first instruction in this block that is not /// a PHINode, a debug intrinsic, or a lifetime intrinsic. Instruction* getFirstNonPHIOrDbgOrLifetime(); const Instruction* getFirstNonPHIOrDbgOrLifetime() const { return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbgOrLifetime(); } /// \brief Returns an iterator to the first instruction in this block that is /// suitable for inserting a non-PHI instruction. /// /// In particular, it skips all PHIs and LandingPad instructions. iterator getFirstInsertionPt(); const_iterator getFirstInsertionPt() const { return const_cast<BasicBlock*>(this)->getFirstInsertionPt(); } /// \brief Unlink 'this' from the containing function, but do not delete it. void removeFromParent(); /// \brief Unlink 'this' from the containing function and delete it. /// // \returns an iterator pointing to the element after the erased one. iplist<BasicBlock>::iterator eraseFromParent(); /// \brief Unlink this basic block from its current function and insert it /// into the function that \p MovePos lives in, right before \p MovePos. void moveBefore(BasicBlock *MovePos); /// \brief Unlink this basic block from its current function and insert it /// right after \p MovePos in the function \p MovePos lives in. void moveAfter(BasicBlock *MovePos); /// \brief Insert unlinked basic block into a function. /// /// Inserts an unlinked basic block into \c Parent. If \c InsertBefore is /// provided, inserts before that basic block, otherwise inserts at the end. /// /// \pre \a getParent() is \c nullptr. void insertInto(Function *Parent, BasicBlock *InsertBefore = nullptr); /// \brief Return the predecessor of this block if it has a single predecessor /// block. Otherwise return a null pointer. BasicBlock *getSinglePredecessor(); const BasicBlock *getSinglePredecessor() const { return const_cast<BasicBlock*>(this)->getSinglePredecessor(); } /// \brief Return the predecessor of this block if it has a unique predecessor /// block. Otherwise return a null pointer. /// /// Note that unique predecessor doesn't mean single edge, there can be /// multiple edges from the unique predecessor to this block (for example a /// switch statement with multiple cases having the same destination). BasicBlock *getUniquePredecessor(); const BasicBlock *getUniquePredecessor() const { return const_cast<BasicBlock*>(this)->getUniquePredecessor(); } /// \brief Return the successor of this block if it has a single successor. /// Otherwise return a null pointer. /// /// This method is analogous to getSinglePredecessor above. BasicBlock *getSingleSuccessor(); const BasicBlock *getSingleSuccessor() const { return const_cast<BasicBlock*>(this)->getSingleSuccessor(); } /// \brief Return the successor of this block if it has a unique successor. /// Otherwise return a null pointer. /// /// This method is analogous to getUniquePredecessor above. BasicBlock *getUniqueSuccessor(); const BasicBlock *getUniqueSuccessor() const { return const_cast<BasicBlock*>(this)->getUniqueSuccessor(); } //===--------------------------------------------------------------------===// /// Instruction iterator methods /// inline iterator begin() { return InstList.begin(); } inline const_iterator begin() const { return InstList.begin(); } inline iterator end () { return InstList.end(); } inline const_iterator end () const { return InstList.end(); } inline reverse_iterator rbegin() { return InstList.rbegin(); } inline const_reverse_iterator rbegin() const { return InstList.rbegin(); } inline reverse_iterator rend () { return InstList.rend(); } inline const_reverse_iterator rend () const { return InstList.rend(); } inline size_t size() const { return InstList.size(); } inline bool empty() const { return InstList.empty(); } inline const Instruction &front() const { return InstList.front(); } inline Instruction &front() { return InstList.front(); } inline const Instruction &back() const { return InstList.back(); } inline Instruction &back() { return InstList.back(); } size_t compute_size_no_dbg() const; // HLSL Change - Get the size of the block without the debug insts /// \brief Return the underlying instruction list container. /// /// Currently you need to access the underlying instruction list container /// directly if you want to modify it. const InstListType &getInstList() const { return InstList; } InstListType &getInstList() { return InstList; } /// \brief Returns a pointer to a member of the instruction list. static iplist<Instruction> BasicBlock::*getSublistAccess(Instruction*) { return &BasicBlock::InstList; } /// \brief Returns a pointer to the symbol table if one exists. ValueSymbolTable *getValueSymbolTable(); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Value *V) { return V->getValueID() == Value::BasicBlockVal; } /// \brief Cause all subinstructions to "let go" of all the references that /// said subinstructions are maintaining. /// /// This allows one to 'delete' a whole class at a time, even though there may /// be circular references... first all references are dropped, and all use /// counts go to zero. Then everything is delete'd for real. Note that no /// operations are valid on an object that has "dropped all references", /// except operator delete. void dropAllReferences(); /// \brief Notify the BasicBlock that the predecessor \p Pred is no longer /// able to reach it. /// /// This is actually not used to update the Predecessor list, but is actually /// used to update the PHI nodes that reside in the block. Note that this /// should be called while the predecessor still refers to this block. void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs = false); /// \brief Split the basic block into two basic blocks at the specified /// instruction. /// /// Note that all instructions BEFORE the specified iterator stay as part of /// the original basic block, an unconditional branch is added to the original /// BB, and the rest of the instructions in the BB are moved to the new BB, /// including the old terminator. The newly formed BasicBlock is returned. /// This function invalidates the specified iterator. /// /// Note that this only works on well formed basic blocks (must have a /// terminator), and 'I' must not be the end of instruction list (which would /// cause a degenerate basic block to be formed, having a terminator inside of /// the basic block). /// /// Also note that this doesn't preserve any passes. To split blocks while /// keeping loop information consistent, use the SplitBlock utility function. BasicBlock *splitBasicBlock(iterator I, const Twine &BBName = ""); /// \brief Returns true if there are any uses of this basic block other than /// direct branches, switches, etc. to it. bool hasAddressTaken() const { return getSubclassDataFromValue() != 0; } /// \brief Update all phi nodes in this basic block's successors to refer to /// basic block \p New instead of to it. void replaceSuccessorsPhiUsesWith(BasicBlock *New); /// \brief Return true if this basic block is a landing pad. /// /// Being a ``landing pad'' means that the basic block is the destination of /// the 'unwind' edge of an invoke instruction. bool isLandingPad() const; /// \brief Return the landingpad instruction associated with the landing pad. LandingPadInst *getLandingPadInst(); const LandingPadInst *getLandingPadInst() const; private: /// \brief Increment the internal refcount of the number of BlockAddresses /// referencing this BasicBlock by \p Amt. /// /// This is almost always 0, sometimes one possibly, but almost never 2, and /// inconceivably 3 or more. void AdjustBlockAddressRefCount(int Amt) { setValueSubclassData(getSubclassDataFromValue()+Amt); assert((int)(signed char)getSubclassDataFromValue() >= 0 && "Refcount wrap-around"); } /// \brief Shadow Value::setValueSubclassData with a private forwarding method /// so that any future subclasses cannot accidentally use it. void setValueSubclassData(unsigned short D) { Value::setValueSubclassData(D); } }; // createSentinel is used to get hold of the node that marks the end of the // list... (same trick used here as in ilist_traits<Instruction>) // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends inline BasicBlock * ilist_traits<BasicBlock>::createSentinel() const { return static_cast<BasicBlock *>(&Sentinel); } // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef) } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Mangler.h
//===-- llvm/IR/Mangler.h - Self-contained name mangler ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Unified name mangler for various backends. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_MANGLER_H #define LLVM_IR_MANGLER_H #include "llvm/ADT/DenseMap.h" #include "llvm/Support/raw_ostream.h" namespace llvm { class DataLayout; class GlobalValue; template <typename T> class SmallVectorImpl; class Twine; class Mangler { /// We need to give global values the same name every time they are mangled. /// This keeps track of the number we give to anonymous ones. mutable DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs; /// This simple counter is used to unique value names. mutable unsigned NextAnonGlobalID; public: Mangler() : NextAnonGlobalID(1) {} /// Print the appropriate prefix and the specified global variable's name. /// If the global variable doesn't have a name, this fills in a unique name /// for the global. void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV, bool CannotUsePrivateLabel) const; void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV, bool CannotUsePrivateLabel) const; /// Print the appropriate prefix and the specified name as the global variable /// name. GVName must not be empty. static void getNameWithPrefix(raw_ostream &OS, const Twine &GVName, const DataLayout &DL); static void getNameWithPrefix(SmallVectorImpl<char> &OutName, const Twine &GVName, const DataLayout &DL); }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/TypeBuilder.h
//===---- llvm/TypeBuilder.h - Builder for LLVM types -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the TypeBuilder class, which is used as a convenient way to // create LLVM types with a consistent and simplified interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_TYPEBUILDER_H #define LLVM_IR_TYPEBUILDER_H #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/LLVMContext.h" #include <climits> namespace llvm { /// TypeBuilder - This provides a uniform API for looking up types /// known at compile time. To support cross-compilation, we define a /// series of tag types in the llvm::types namespace, like i<N>, /// ieee_float, ppc_fp128, etc. TypeBuilder<T, false> allows T to be /// any of these, a native C type (whose size may depend on the host /// compiler), or a pointer, function, or struct type built out of /// these. TypeBuilder<T, true> removes native C types from this set /// to guarantee that its result is suitable for cross-compilation. /// We define the primitive types, pointer types, and functions up to /// 5 arguments here, but to use this class with your own types, /// you'll need to specialize it. For example, say you want to call a /// function defined externally as: /// /// \code{.cpp} /// /// struct MyType { /// int32 a; /// int32 *b; /// void *array[1]; // Intended as a flexible array. /// }; /// int8 AFunction(struct MyType *value); /// /// \endcode /// /// You'll want to use /// Function::Create(TypeBuilder<types::i<8>(MyType*), true>::get(), ...) /// to declare the function, but when you first try this, your compiler will /// complain that TypeBuilder<MyType, true>::get() doesn't exist. To fix this, /// write: /// /// \code{.cpp} /// /// namespace llvm { /// template<bool xcompile> class TypeBuilder<MyType, xcompile> { /// public: /// static StructType *get(LLVMContext &Context) { /// // If you cache this result, be sure to cache it separately /// // for each LLVMContext. /// return StructType::get( /// TypeBuilder<types::i<32>, xcompile>::get(Context), /// TypeBuilder<types::i<32>*, xcompile>::get(Context), /// TypeBuilder<types::i<8>*[], xcompile>::get(Context), /// nullptr); /// } /// /// // You may find this a convenient place to put some constants /// // to help with getelementptr. They don't have any effect on /// // the operation of TypeBuilder. /// enum Fields { /// FIELD_A, /// FIELD_B, /// FIELD_ARRAY /// }; /// } /// } // namespace llvm /// /// \endcode /// /// TypeBuilder cannot handle recursive types or types you only know at runtime. /// If you try to give it a recursive type, it will deadlock, infinitely /// recurse, or do something similarly undesirable. template<typename T, bool cross_compilable> class TypeBuilder {}; // Types for use with cross-compilable TypeBuilders. These correspond // exactly with an LLVM-native type. namespace types { /// i<N> corresponds to the LLVM IntegerType with N bits. template<uint32_t num_bits> class i {}; // The following classes represent the LLVM floating types. class ieee_float {}; class ieee_double {}; class x86_fp80 {}; class fp128 {}; class ppc_fp128 {}; // X86 MMX. class x86_mmx {}; } // namespace types // LLVM doesn't have const or volatile types. template<typename T, bool cross> class TypeBuilder<const T, cross> : public TypeBuilder<T, cross> {}; template<typename T, bool cross> class TypeBuilder<volatile T, cross> : public TypeBuilder<T, cross> {}; template<typename T, bool cross> class TypeBuilder<const volatile T, cross> : public TypeBuilder<T, cross> {}; // Pointers template<typename T, bool cross> class TypeBuilder<T*, cross> { public: static PointerType *get(LLVMContext &Context) { return PointerType::getUnqual(TypeBuilder<T,cross>::get(Context)); } }; /// There is no support for references template<typename T, bool cross> class TypeBuilder<T&, cross> {}; // Arrays template<typename T, size_t N, bool cross> class TypeBuilder<T[N], cross> { public: static ArrayType *get(LLVMContext &Context) { return ArrayType::get(TypeBuilder<T, cross>::get(Context), N); } }; /// LLVM uses an array of length 0 to represent an unknown-length array. template<typename T, bool cross> class TypeBuilder<T[], cross> { public: static ArrayType *get(LLVMContext &Context) { return ArrayType::get(TypeBuilder<T, cross>::get(Context), 0); } }; // Define the C integral types only for TypeBuilder<T, false>. // // C integral types do not have a defined size. It would be nice to use the // stdint.h-defined typedefs that do have defined sizes, but we'd run into the // following problem: // // On an ILP32 machine, stdint.h might define: // // typedef int int32_t; // typedef long long int64_t; // typedef long size_t; // // If we defined TypeBuilder<int32_t> and TypeBuilder<int64_t>, then any use of // TypeBuilder<size_t> would fail. We couldn't define TypeBuilder<size_t> in // addition to the defined-size types because we'd get duplicate definitions on // platforms where stdint.h instead defines: // // typedef int int32_t; // typedef long long int64_t; // typedef int size_t; // // So we define all the primitive C types and nothing else. #define DEFINE_INTEGRAL_TYPEBUILDER(T) \ template<> class TypeBuilder<T, false> { \ public: \ static IntegerType *get(LLVMContext &Context) { \ return IntegerType::get(Context, sizeof(T) * CHAR_BIT); \ } \ }; \ template<> class TypeBuilder<T, true> { \ /* We provide a definition here so users don't accidentally */ \ /* define these types to work. */ \ } DEFINE_INTEGRAL_TYPEBUILDER(char); DEFINE_INTEGRAL_TYPEBUILDER(signed char); DEFINE_INTEGRAL_TYPEBUILDER(unsigned char); DEFINE_INTEGRAL_TYPEBUILDER(short); DEFINE_INTEGRAL_TYPEBUILDER(unsigned short); DEFINE_INTEGRAL_TYPEBUILDER(int); DEFINE_INTEGRAL_TYPEBUILDER(unsigned int); DEFINE_INTEGRAL_TYPEBUILDER(long); DEFINE_INTEGRAL_TYPEBUILDER(unsigned long); #ifdef _MSC_VER DEFINE_INTEGRAL_TYPEBUILDER(__int64); DEFINE_INTEGRAL_TYPEBUILDER(unsigned __int64); #else /* _MSC_VER */ DEFINE_INTEGRAL_TYPEBUILDER(long long); DEFINE_INTEGRAL_TYPEBUILDER(unsigned long long); #endif /* _MSC_VER */ #undef DEFINE_INTEGRAL_TYPEBUILDER template<uint32_t num_bits, bool cross> class TypeBuilder<types::i<num_bits>, cross> { public: static IntegerType *get(LLVMContext &C) { return IntegerType::get(C, num_bits); } }; template<> class TypeBuilder<float, false> { public: static Type *get(LLVMContext& C) { return Type::getFloatTy(C); } }; template<> class TypeBuilder<float, true> {}; template<> class TypeBuilder<double, false> { public: static Type *get(LLVMContext& C) { return Type::getDoubleTy(C); } }; template<> class TypeBuilder<double, true> {}; template<bool cross> class TypeBuilder<types::ieee_float, cross> { public: static Type *get(LLVMContext& C) { return Type::getFloatTy(C); } }; template<bool cross> class TypeBuilder<types::ieee_double, cross> { public: static Type *get(LLVMContext& C) { return Type::getDoubleTy(C); } }; template<bool cross> class TypeBuilder<types::x86_fp80, cross> { public: static Type *get(LLVMContext& C) { return Type::getX86_FP80Ty(C); } }; template<bool cross> class TypeBuilder<types::fp128, cross> { public: static Type *get(LLVMContext& C) { return Type::getFP128Ty(C); } }; template<bool cross> class TypeBuilder<types::ppc_fp128, cross> { public: static Type *get(LLVMContext& C) { return Type::getPPC_FP128Ty(C); } }; template<bool cross> class TypeBuilder<types::x86_mmx, cross> { public: static Type *get(LLVMContext& C) { return Type::getX86_MMXTy(C); } }; template<bool cross> class TypeBuilder<void, cross> { public: static Type *get(LLVMContext &C) { return Type::getVoidTy(C); } }; /// void* is disallowed in LLVM types, but it occurs often enough in C code that /// we special case it. template<> class TypeBuilder<void*, false> : public TypeBuilder<types::i<8>*, false> {}; template<> class TypeBuilder<const void*, false> : public TypeBuilder<types::i<8>*, false> {}; template<> class TypeBuilder<volatile void*, false> : public TypeBuilder<types::i<8>*, false> {}; template<> class TypeBuilder<const volatile void*, false> : public TypeBuilder<types::i<8>*, false> {}; template<typename R, bool cross> class TypeBuilder<R(), cross> { public: static FunctionType *get(LLVMContext &Context) { return FunctionType::get(TypeBuilder<R, cross>::get(Context), false); } }; template<typename R, typename A1, bool cross> class TypeBuilder<R(A1), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, false); } }; template<typename R, typename A1, typename A2, bool cross> class TypeBuilder<R(A1, A2), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, false); } }; template<typename R, typename A1, typename A2, typename A3, bool cross> class TypeBuilder<R(A1, A2, A3), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), TypeBuilder<A3, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, false); } }; template<typename R, typename A1, typename A2, typename A3, typename A4, bool cross> class TypeBuilder<R(A1, A2, A3, A4), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), TypeBuilder<A3, cross>::get(Context), TypeBuilder<A4, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, false); } }; template<typename R, typename A1, typename A2, typename A3, typename A4, typename A5, bool cross> class TypeBuilder<R(A1, A2, A3, A4, A5), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), TypeBuilder<A3, cross>::get(Context), TypeBuilder<A4, cross>::get(Context), TypeBuilder<A5, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, false); } }; template<typename R, bool cross> class TypeBuilder<R(...), cross> { public: static FunctionType *get(LLVMContext &Context) { return FunctionType::get(TypeBuilder<R, cross>::get(Context), true); } }; template<typename R, typename A1, bool cross> class TypeBuilder<R(A1, ...), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true); } }; template<typename R, typename A1, typename A2, bool cross> class TypeBuilder<R(A1, A2, ...), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true); } }; template<typename R, typename A1, typename A2, typename A3, bool cross> class TypeBuilder<R(A1, A2, A3, ...), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), TypeBuilder<A3, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true); } }; template<typename R, typename A1, typename A2, typename A3, typename A4, bool cross> class TypeBuilder<R(A1, A2, A3, A4, ...), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), TypeBuilder<A3, cross>::get(Context), TypeBuilder<A4, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true); } }; template<typename R, typename A1, typename A2, typename A3, typename A4, typename A5, bool cross> class TypeBuilder<R(A1, A2, A3, A4, A5, ...), cross> { public: static FunctionType *get(LLVMContext &Context) { Type *params[] = { TypeBuilder<A1, cross>::get(Context), TypeBuilder<A2, cross>::get(Context), TypeBuilder<A3, cross>::get(Context), TypeBuilder<A4, cross>::get(Context), TypeBuilder<A5, cross>::get(Context), }; return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true); } }; } // namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/AssemblyAnnotationWriter.h
//===-- AssemblyAnnotationWriter.h - Annotation .ll files -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Clients of the assembly writer can use this interface to add their own // special-purpose annotations to LLVM assembly language printouts. Note that // the assembly parser won't be able to parse these, in general, so // implementations are advised to print stuff as LLVM comments. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_ASSEMBLYANNOTATIONWRITER_H #define LLVM_IR_ASSEMBLYANNOTATIONWRITER_H namespace llvm { class Function; class BasicBlock; class Instruction; class Value; class formatted_raw_ostream; class AssemblyAnnotationWriter { public: virtual ~AssemblyAnnotationWriter(); /// emitFunctionAnnot - This may be implemented to emit a string right before /// the start of a function. virtual void emitFunctionAnnot(const Function *, formatted_raw_ostream &) {} /// emitBasicBlockStartAnnot - This may be implemented to emit a string right /// after the basic block label, but before the first instruction in the /// block. virtual void emitBasicBlockStartAnnot(const BasicBlock *, formatted_raw_ostream &) { } /// emitBasicBlockEndAnnot - This may be implemented to emit a string right /// after the basic block. virtual void emitBasicBlockEndAnnot(const BasicBlock *, formatted_raw_ostream &) { } /// emitInstructionAnnot - This may be implemented to emit a string right /// before an instruction is emitted. virtual void emitInstructionAnnot(const Instruction *, formatted_raw_ostream &) {} /// printInfoComment - This may be implemented to emit a comment to the /// right of an instruction or global value. virtual void printInfoComment(const Value &, formatted_raw_ostream &) {} }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Type.h
//===-- llvm/Type.h - Classes for handling data types -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of the Type class. For more "Type" // stuff, look in DerivedTypes.h. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_TYPE_H #define LLVM_IR_TYPE_H #include "llvm-c/Core.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/Casting.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/ErrorHandling.h" namespace llvm { class PointerType; class IntegerType; class raw_ostream; class Module; class LLVMContext; class LLVMContextImpl; class StringRef; template<class GraphType> struct GraphTraits; /// The instances of the Type class are immutable: once they are created, /// they are never changed. Also note that only one instance of a particular /// type is ever created. Thus seeing if two types are equal is a matter of /// doing a trivial pointer comparison. To enforce that no two equal instances /// are created, Type instances can only be created via static factory methods /// in class Type and in derived classes. Once allocated, Types are never /// free'd. /// class Type { public: //===--------------------------------------------------------------------===// /// Definitions of all of the base types for the Type system. Based on this /// value, you can cast to a class defined in DerivedTypes.h. /// Note: If you add an element to this, you need to add an element to the /// Type::getPrimitiveType function, or else things will break! /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding. /// enum TypeID { // PrimitiveTypes - make sure LastPrimitiveTyID stays up to date. VoidTyID = 0, ///< 0: type with no size HalfTyID, ///< 1: 16-bit floating point type FloatTyID, ///< 2: 32-bit floating point type DoubleTyID, ///< 3: 64-bit floating point type X86_FP80TyID, ///< 4: 80-bit floating point type (X87) FP128TyID, ///< 5: 128-bit floating point type (112-bit mantissa) PPC_FP128TyID, ///< 6: 128-bit floating point type (two 64-bits, PowerPC) LabelTyID, ///< 7: Labels MetadataTyID, ///< 8: Metadata X86_MMXTyID, ///< 9: MMX vectors (64 bits, X86 specific) // Derived types... see DerivedTypes.h file. // Make sure FirstDerivedTyID stays up to date! IntegerTyID, ///< 10: Arbitrary bit width integers FunctionTyID, ///< 11: Functions StructTyID, ///< 12: Structures ArrayTyID, ///< 13: Arrays PointerTyID, ///< 14: Pointers VectorTyID ///< 15: SIMD 'packed' format, or other vector type }; private: /// Context - This refers to the LLVMContext in which this type was uniqued. LLVMContext &Context; // Due to Ubuntu GCC bug 910363: // https://bugs.launchpad.net/ubuntu/+source/gcc-4.5/+bug/910363 // Bitpack ID and SubclassData manually. // Note: TypeID : low 8 bit; SubclassData : high 24 bit. uint32_t IDAndSubclassData; protected: friend class LLVMContextImpl; explicit Type(LLVMContext &C, TypeID tid) : Context(C), IDAndSubclassData(0), NumContainedTys(0), ContainedTys(nullptr) { setTypeID(tid); } ~Type() = default; void setTypeID(TypeID ID) { IDAndSubclassData = (ID & 0xFF) | (IDAndSubclassData & 0xFFFFFF00); assert(getTypeID() == ID && "TypeID data too large for field"); } unsigned getSubclassData() const { return IDAndSubclassData >> 8; } void setSubclassData(unsigned val) { IDAndSubclassData = (IDAndSubclassData & 0xFF) | (val << 8); // Ensure we don't have any accidental truncation. assert(getSubclassData() == val && "Subclass data too large for field"); } /// NumContainedTys - Keeps track of how many Type*'s there are in the /// ContainedTys list. unsigned NumContainedTys; /// ContainedTys - A pointer to the array of Types contained by this Type. /// For example, this includes the arguments of a function type, the elements /// of a structure, the pointee of a pointer, the element type of an array, /// etc. This pointer may be 0 for types that don't contain other types /// (Integer, Double, Float). Type * const *ContainedTys; public: void print(raw_ostream &O) const; LLVM_DUMP_METHOD void dump() const; // HLSL Change - Add LLVM_DUMP_METHOD /// getContext - Return the LLVMContext in which this type was uniqued. LLVMContext &getContext() const { return Context; } //===--------------------------------------------------------------------===// // Accessors for working with types. // /// getTypeID - Return the type id for the type. This will return one /// of the TypeID enum elements defined above. /// TypeID getTypeID() const { return (TypeID)(IDAndSubclassData & 0xFF); } /// isVoidTy - Return true if this is 'void'. bool isVoidTy() const { return getTypeID() == VoidTyID; } /// isHalfTy - Return true if this is 'half', a 16-bit IEEE fp type. bool isHalfTy() const { return getTypeID() == HalfTyID; } /// isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type. bool isFloatTy() const { return getTypeID() == FloatTyID; } /// isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type. bool isDoubleTy() const { return getTypeID() == DoubleTyID; } /// isX86_FP80Ty - Return true if this is x86 long double. bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; } /// isFP128Ty - Return true if this is 'fp128'. bool isFP128Ty() const { return getTypeID() == FP128TyID; } /// isPPC_FP128Ty - Return true if this is powerpc long double. bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; } /// isFloatingPointTy - Return true if this is one of the six floating point /// types bool isFloatingPointTy() const { return getTypeID() == HalfTyID || getTypeID() == FloatTyID || getTypeID() == DoubleTyID || getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID || getTypeID() == PPC_FP128TyID; } const fltSemantics &getFltSemantics() const { switch (getTypeID()) { case HalfTyID: return APFloat::IEEEhalf; case FloatTyID: return APFloat::IEEEsingle; case DoubleTyID: return APFloat::IEEEdouble; case X86_FP80TyID: return APFloat::x87DoubleExtended; case FP128TyID: return APFloat::IEEEquad; case PPC_FP128TyID: return APFloat::PPCDoubleDouble; default: llvm_unreachable("Invalid floating type"); } } /// isX86_MMXTy - Return true if this is X86 MMX. bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; } /// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP. /// bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); } /// isLabelTy - Return true if this is 'label'. bool isLabelTy() const { return getTypeID() == LabelTyID; } /// isMetadataTy - Return true if this is 'metadata'. bool isMetadataTy() const { return getTypeID() == MetadataTyID; } /// isIntegerTy - True if this is an instance of IntegerType. /// bool isIntegerTy() const { return getTypeID() == IntegerTyID; } /// isIntegerTy - Return true if this is an IntegerType of the given width. bool isIntegerTy(unsigned Bitwidth) const; /// isIntOrIntVectorTy - Return true if this is an integer type or a vector of /// integer types. /// bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); } /// isFunctionTy - True if this is an instance of FunctionType. /// bool isFunctionTy() const { return getTypeID() == FunctionTyID; } /// isStructTy - True if this is an instance of StructType. /// bool isStructTy() const { return getTypeID() == StructTyID; } /// isArrayTy - True if this is an instance of ArrayType. /// bool isArrayTy() const { return getTypeID() == ArrayTyID; } /// isPointerTy - True if this is an instance of PointerType. /// bool isPointerTy() const { return getTypeID() == PointerTyID; } /// isPtrOrPtrVectorTy - Return true if this is a pointer type or a vector of /// pointer types. /// bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); } /// isVectorTy - True if this is an instance of VectorType. /// bool isVectorTy() const { return getTypeID() == VectorTyID; } /// canLosslesslyBitCastTo - Return true if this type could be converted /// with a lossless BitCast to type 'Ty'. For example, i8* to i32*. BitCasts /// are valid for types of the same size only where no re-interpretation of /// the bits is done. /// @brief Determine if this type could be losslessly bitcast to Ty bool canLosslesslyBitCastTo(Type *Ty) const; /// isEmptyTy - Return true if this type is empty, that is, it has no /// elements or all its elements are empty. bool isEmptyTy() const; /// isFirstClassType - Return true if the type is "first class", meaning it /// is a valid type for a Value. /// bool isFirstClassType() const { return getTypeID() != FunctionTyID && getTypeID() != VoidTyID; } /// isSingleValueType - Return true if the type is a valid type for a /// register in codegen. This includes all first-class types except struct /// and array types. /// bool isSingleValueType() const { return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() || isPointerTy() || isVectorTy(); } /// isAggregateType - Return true if the type is an aggregate type. This /// means it is valid as the first operand of an insertvalue or /// extractvalue instruction. This includes struct and array types, but /// does not include vector types. /// bool isAggregateType() const { return getTypeID() == StructTyID || getTypeID() == ArrayTyID; } /// isSized - Return true if it makes sense to take the size of this type. To /// get the actual size for a particular target, it is reasonable to use the /// DataLayout subsystem to do this. /// bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const { // If it's a primitive, it is always sized. if (getTypeID() == IntegerTyID || isFloatingPointTy() || getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID) return true; // If it is not something that can have a size (e.g. a function or label), // it doesn't have a size. if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && getTypeID() != VectorTyID) return false; // Otherwise we have to try harder to decide. return isSizedDerivedType(Visited); } /// getPrimitiveSizeInBits - Return the basic size of this type if it is a /// primitive type. These are fixed by LLVM and are not target dependent. /// This will return zero if the type does not have a size or is not a /// primitive type. /// /// Note that this may not reflect the size of memory allocated for an /// instance of the type or the number of bytes that are written when an /// instance of the type is stored to memory. The DataLayout class provides /// additional query functions to provide this information. /// unsigned getPrimitiveSizeInBits() const LLVM_READONLY; /// getScalarSizeInBits - If this is a vector type, return the /// getPrimitiveSizeInBits value for the element type. Otherwise return the /// getPrimitiveSizeInBits value for this type. unsigned getScalarSizeInBits() const LLVM_READONLY; /// getFPMantissaWidth - Return the width of the mantissa of this type. This /// is only valid on floating point types. If the FP type does not /// have a stable mantissa (e.g. ppc long double), this method returns -1. int getFPMantissaWidth() const; /// getScalarType - If this is a vector type, return the element type, /// otherwise return 'this'. const Type *getScalarType() const LLVM_READONLY; Type *getScalarType() LLVM_READONLY; //===--------------------------------------------------------------------===// // Type Iteration support. // typedef Type * const *subtype_iterator; subtype_iterator subtype_begin() const { return ContainedTys; } subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];} ArrayRef<Type*> subtypes() const { return makeArrayRef(subtype_begin(), subtype_end()); } typedef std::reverse_iterator<subtype_iterator> subtype_reverse_iterator; subtype_reverse_iterator subtype_rbegin() const { return subtype_reverse_iterator(subtype_end()); } subtype_reverse_iterator subtype_rend() const { return subtype_reverse_iterator(subtype_begin()); } /// getContainedType - This method is used to implement the type iterator /// (defined at the end of the file). For derived types, this returns the /// types 'contained' in the derived type. /// Type *getContainedType(unsigned i) const { assert(i < NumContainedTys && "Index out of range!"); return ContainedTys[i]; } /// getNumContainedTypes - Return the number of types in the derived type. /// unsigned getNumContainedTypes() const { return NumContainedTys; } //===--------------------------------------------------------------------===// // Helper methods corresponding to subclass methods. This forces a cast to // the specified subclass and calls its accessor. "getVectorNumElements" (for // example) is shorthand for cast<VectorType>(Ty)->getNumElements(). This is // only intended to cover the core methods that are frequently used, helper // methods should not be added here. unsigned getIntegerBitWidth() const; Type *getFunctionParamType(unsigned i) const; unsigned getFunctionNumParams() const; bool isFunctionVarArg() const; StringRef getStructName() const; unsigned getStructNumElements() const; Type *getStructElementType(unsigned N) const; Type *getSequentialElementType() const; uint64_t getArrayNumElements() const; Type *getArrayElementType() const { return getSequentialElementType(); } unsigned getVectorNumElements() const; Type *getVectorElementType() const { return getSequentialElementType(); } Type *getPointerElementType() const { return getSequentialElementType(); } /// \brief Get the address space of this pointer or pointer vector type. unsigned getPointerAddressSpace() const; //===--------------------------------------------------------------------===// // Static members exported by the Type class itself. Useful for getting // instances of Type. // /// getPrimitiveType - Return a type based on an identifier. static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber); //===--------------------------------------------------------------------===// // These are the builtin types that are always available. // static Type *getVoidTy(LLVMContext &C); static Type *getLabelTy(LLVMContext &C); static Type *getHalfTy(LLVMContext &C); static Type *getFloatTy(LLVMContext &C); static Type *getDoubleTy(LLVMContext &C); static Type *getMetadataTy(LLVMContext &C); static Type *getX86_FP80Ty(LLVMContext &C); static Type *getFP128Ty(LLVMContext &C); static Type *getPPC_FP128Ty(LLVMContext &C); static Type *getX86_MMXTy(LLVMContext &C); static IntegerType *getIntNTy(LLVMContext &C, unsigned N); static IntegerType *getInt1Ty(LLVMContext &C); static IntegerType *getInt8Ty(LLVMContext &C); static IntegerType *getInt16Ty(LLVMContext &C); static IntegerType *getInt32Ty(LLVMContext &C); static IntegerType *getInt64Ty(LLVMContext &C); static IntegerType *getInt128Ty(LLVMContext &C); //===--------------------------------------------------------------------===// // Convenience methods for getting pointer types with one of the above builtin // types as pointee. // static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0); static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0); /// getPointerTo - Return a pointer to the current type. This is equivalent /// to PointerType::get(Foo, AddrSpace). PointerType *getPointerTo(unsigned AddrSpace = 0); private: /// isSizedDerivedType - Derived types like structures and arrays are sized /// iff all of the members of the type are sized as well. Since asking for /// their size is relatively uncommon, move this operation out of line. bool isSizedDerivedType(SmallPtrSetImpl<const Type*> *Visited = nullptr) const; }; // Printing of types. static inline raw_ostream &operator<<(raw_ostream &OS, Type &T) { T.print(OS); return OS; } // allow isa<PointerType>(x) to work without DerivedTypes.h included. template <> struct isa_impl<PointerType, Type> { static inline bool doit(const Type &Ty) { return Ty.getTypeID() == Type::PointerTyID; } }; // // /////////////////////////////////////////////////////////////////////////////// // Provide specializations of GraphTraits to be able to treat a type as a // graph of sub types. template <> struct GraphTraits<Type*> { typedef Type NodeType; typedef Type::subtype_iterator ChildIteratorType; static inline NodeType *getEntryNode(Type *T) { return T; } static inline ChildIteratorType child_begin(NodeType *N) { return N->subtype_begin(); } static inline ChildIteratorType child_end(NodeType *N) { return N->subtype_end(); } }; template <> struct GraphTraits<const Type*> { typedef const Type NodeType; typedef Type::subtype_iterator ChildIteratorType; static inline NodeType *getEntryNode(NodeType *T) { return T; } static inline ChildIteratorType child_begin(NodeType *N) { return N->subtype_begin(); } static inline ChildIteratorType child_end(NodeType *N) { return N->subtype_end(); } }; // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef) /* Specialized opaque type conversions. */ inline Type **unwrap(LLVMTypeRef* Tys) { return reinterpret_cast<Type**>(Tys); } inline LLVMTypeRef *wrap(Type **Tys) { return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys)); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/AutoUpgrade.h
//===- AutoUpgrade.h - AutoUpgrade Helpers ----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // These functions are implemented by lib/IR/AutoUpgrade.cpp. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_AUTOUPGRADE_H #define LLVM_IR_AUTOUPGRADE_H #include <string> namespace llvm { class CallInst; class Constant; class Function; class Instruction; class Module; class GlobalVariable; class Type; class Value; /// This is a more granular function that simply checks an intrinsic function /// for upgrading, and returns true if it requires upgrading. It may return /// null in NewFn if the all calls to the original intrinsic function /// should be transformed to non-function-call instructions. bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn); /// This is the complement to the above, replacing a specific call to an /// intrinsic function with a call to the specified new function. void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn); /// This is an auto-upgrade hook for any old intrinsic function syntaxes /// which need to have both the function updated as well as all calls updated /// to the new function. This should only be run in a post-processing fashion /// so that it can update all calls to the old function. void UpgradeCallsToIntrinsic(Function* F); /// This checks for global variables which should be upgraded. It returns true /// if it requires upgrading. bool UpgradeGlobalVariable(GlobalVariable *GV); /// If the TBAA tag for the given instruction uses the scalar TBAA format, /// we upgrade it to the struct-path aware TBAA format. void UpgradeInstWithTBAATag(Instruction *I); /// This is an auto-upgrade for bitcast between pointers with different /// address spaces: the instruction is replaced by a pair ptrtoint+inttoptr. Instruction *UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, Instruction *&Temp); /// This is an auto-upgrade for bitcast constant expression between pointers /// with different address spaces: the instruction is replaced by a pair /// ptrtoint+inttoptr. Value *UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy); /// Check the debug info version number, if it is out-dated, drop the debug /// info. Return true if module is modified. bool UpgradeDebugInfo(Module &M); /// Upgrade a metadata string constant in place. void UpgradeMDStringConstant(std::string &String); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/GlobalAlias.h
//===-------- llvm/GlobalAlias.h - GlobalAlias class ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of the GlobalAlias class, which // represents a single function or variable alias in the IR. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_GLOBALALIAS_H #define LLVM_IR_GLOBALALIAS_H #include "llvm/ADT/Twine.h" #include "llvm/ADT/ilist_node.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/OperandTraits.h" namespace llvm { class Module; template<typename ValueSubClass, typename ItemParentClass> class SymbolTableListTraits; class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> { friend class SymbolTableListTraits<GlobalAlias, Module>; void operator=(const GlobalAlias &) = delete; GlobalAlias(const GlobalAlias &) = delete; void setParent(Module *parent); GlobalAlias(PointerType *Ty, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent); public: // allocate space for exactly one operand void *operator new(size_t s) { return User::operator new(s, 1); } /// If a parent module is specified, the alias is automatically inserted into /// the end of the specified module's alias list. static GlobalAlias *create(PointerType *Ty, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent); // Without the Aliasee. static GlobalAlias *create(PointerType *Ty, LinkageTypes Linkage, const Twine &Name, Module *Parent); // The module is taken from the Aliasee. static GlobalAlias *create(PointerType *Ty, LinkageTypes Linkage, const Twine &Name, GlobalValue *Aliasee); // Type, Parent and AddressSpace taken from the Aliasee. static GlobalAlias *create(LinkageTypes Linkage, const Twine &Name, GlobalValue *Aliasee); // Linkage, Type, Parent and AddressSpace taken from the Aliasee. static GlobalAlias *create(const Twine &Name, GlobalValue *Aliasee); /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant); /// removeFromParent - This method unlinks 'this' from the containing module, /// but does not delete it. /// void removeFromParent() override; /// eraseFromParent - This method unlinks 'this' from the containing module /// and deletes it. /// void eraseFromParent() override; /// These methods retrive and set alias target. void setAliasee(Constant *Aliasee); const Constant *getAliasee() const { return const_cast<GlobalAlias *>(this)->getAliasee(); } Constant *getAliasee() { return getOperand(0); } const GlobalObject *getBaseObject() const { return const_cast<GlobalAlias *>(this)->getBaseObject(); } GlobalObject *getBaseObject() { return dyn_cast<GlobalObject>(getAliasee()->stripInBoundsOffsets()); } const GlobalObject *getBaseObject(const DataLayout &DL, APInt &Offset) const { return const_cast<GlobalAlias *>(this)->getBaseObject(DL, Offset); } GlobalObject *getBaseObject(const DataLayout &DL, APInt &Offset) { return dyn_cast<GlobalObject>( getAliasee()->stripAndAccumulateInBoundsConstantOffsets(DL, Offset)); } static bool isValidLinkage(LinkageTypes L) { return isExternalLinkage(L) || isLocalLinkage(L) || isWeakLinkage(L) || isLinkOnceLinkage(L); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Value *V) { return V->getValueID() == Value::GlobalAliasVal; } }; template <> struct OperandTraits<GlobalAlias> : public FixedNumOperandTraits<GlobalAlias, 1> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalAlias, Constant) } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/PassManager.h
//===- PassManager.h - Pass management infrastructure -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This header defines various interfaces for pass management in LLVM. There /// is no "pass" interface in LLVM per se. Instead, an instance of any class /// which supports a method to 'run' it over a unit of IR can be used as /// a pass. A pass manager is generally a tool to collect a sequence of passes /// which run over a particular IR construct, and run each of them in sequence /// over each such construct in the containing IR construct. As there is no /// containing IR construct for a Module, a manager for passes over modules /// forms the base case which runs its managed passes in sequence over the /// single module provided. /// /// The core IR library provides managers for running passes over /// modules and functions. /// /// * FunctionPassManager can run over a Module, runs each pass over /// a Function. /// * ModulePassManager must be directly run, runs each pass over the Module. /// /// Note that the implementations of the pass managers use concept-based /// polymorphism as outlined in the "Value Semantics and Concept-based /// Polymorphism" talk (or its abbreviated sibling "Inheritance Is The Base /// Class of Evil") by Sean Parent: /// * http://github.com/sean-parent/sean-parent.github.com/wiki/Papers-and-Presentations /// * http://www.youtube.com/watch?v=_BpMYeUFXv8 /// * http://channel9.msdn.com/Events/GoingNative/2013/Inheritance-Is-The-Base-Class-of-Evil /// //===----------------------------------------------------------------------===// #ifndef LLVM_IR_PASSMANAGER_H #define LLVM_IR_PASSMANAGER_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/IR/Function.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManagerInternal.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/type_traits.h" #include <list> #include <memory> #include <vector> namespace llvm { class Module; class Function; /// \brief An abstract set of preserved analyses following a transformation pass /// run. /// /// When a transformation pass is run, it can return a set of analyses whose /// results were preserved by that transformation. The default set is "none", /// and preserving analyses must be done explicitly. /// /// There is also an explicit all state which can be used (for example) when /// the IR is not mutated at all. class PreservedAnalyses { public: // We have to explicitly define all the special member functions because MSVC // refuses to generate them. PreservedAnalyses() {} PreservedAnalyses(const PreservedAnalyses &Arg) : PreservedPassIDs(Arg.PreservedPassIDs) {} PreservedAnalyses(PreservedAnalyses &&Arg) : PreservedPassIDs(std::move(Arg.PreservedPassIDs)) {} friend void swap(PreservedAnalyses &LHS, PreservedAnalyses &RHS) { using std::swap; swap(LHS.PreservedPassIDs, RHS.PreservedPassIDs); } PreservedAnalyses &operator=(PreservedAnalyses RHS) { swap(*this, RHS); return *this; } /// \brief Convenience factory function for the empty preserved set. static PreservedAnalyses none() { return PreservedAnalyses(); } /// \brief Construct a special preserved set that preserves all passes. static PreservedAnalyses all() { PreservedAnalyses PA; PA.PreservedPassIDs.insert((void *)AllPassesID); return PA; } /// \brief Mark a particular pass as preserved, adding it to the set. template <typename PassT> void preserve() { preserve(PassT::ID()); } /// \brief Mark an abstract PassID as preserved, adding it to the set. void preserve(void *PassID) { if (!areAllPreserved()) PreservedPassIDs.insert(PassID); } /// \brief Intersect this set with another in place. /// /// This is a mutating operation on this preserved set, removing all /// preserved passes which are not also preserved in the argument. void intersect(const PreservedAnalyses &Arg) { if (Arg.areAllPreserved()) return; if (areAllPreserved()) { PreservedPassIDs = Arg.PreservedPassIDs; return; } for (void *P : PreservedPassIDs) if (!Arg.PreservedPassIDs.count(P)) PreservedPassIDs.erase(P); } /// \brief Intersect this set with a temporary other set in place. /// /// This is a mutating operation on this preserved set, removing all /// preserved passes which are not also preserved in the argument. void intersect(PreservedAnalyses &&Arg) { if (Arg.areAllPreserved()) return; if (areAllPreserved()) { PreservedPassIDs = std::move(Arg.PreservedPassIDs); return; } for (void *P : PreservedPassIDs) if (!Arg.PreservedPassIDs.count(P)) PreservedPassIDs.erase(P); } /// \brief Query whether a pass is marked as preserved by this set. template <typename PassT> bool preserved() const { return preserved(PassT::ID()); } /// \brief Query whether an abstract pass ID is marked as preserved by this /// set. bool preserved(void *PassID) const { return PreservedPassIDs.count((void *)AllPassesID) || PreservedPassIDs.count(PassID); } /// \brief Test whether all passes are preserved. /// /// This is used primarily to optimize for the case of no changes which will /// common in many scenarios. bool areAllPreserved() const { return PreservedPassIDs.count((void *)AllPassesID); } private: // Note that this must not be -1 or -2 as those are already used by the // SmallPtrSet. static const uintptr_t AllPassesID = (intptr_t)(-3); SmallPtrSet<void *, 2> PreservedPassIDs; }; // Forward declare the analysis manager template. template <typename IRUnitT> class AnalysisManager; /// \brief Manages a sequence of passes over units of IR. /// /// A pass manager contains a sequence of passes to run over units of IR. It is /// itself a valid pass over that unit of IR, and when over some given IR will /// run each pass in sequence. This is the primary and most basic building /// block of a pass pipeline. /// /// If it is run with an \c AnalysisManager<IRUnitT> argument, it will propagate /// that analysis manager to each pass it runs, as well as calling the analysis /// manager's invalidation routine with the PreservedAnalyses of each pass it /// runs. template <typename IRUnitT> class PassManager { public: /// \brief Construct a pass manager. /// /// It can be passed a flag to get debug logging as the passes are run. PassManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. PassManager(PassManager &&Arg) : Passes(std::move(Arg.Passes)), DebugLogging(std::move(Arg.DebugLogging)) {} PassManager &operator=(PassManager &&RHS) { Passes = std::move(RHS.Passes); DebugLogging = std::move(RHS.DebugLogging); return *this; } /// \brief Run all of the passes in this manager over the IR. PreservedAnalyses run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM = nullptr) { PreservedAnalyses PA = PreservedAnalyses::all(); if (DebugLogging) dbgs() << "Starting pass manager run.\n"; for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) { if (DebugLogging) dbgs() << "Running pass: " << Passes[Idx]->name() << "\n"; PreservedAnalyses PassPA = Passes[Idx]->run(IR, AM); // If we have an active analysis manager at this level we want to ensure // we update it as each pass runs and potentially invalidates analyses. // We also update the preserved set of analyses based on what analyses we // have already handled the invalidation for here and don't need to // invalidate when finished. if (AM) PassPA = AM->invalidate(IR, std::move(PassPA)); // Finally, we intersect the final preserved analyses to compute the // aggregate preserved set for this pass manager. PA.intersect(std::move(PassPA)); // FIXME: Historically, the pass managers all called the LLVM context's // yield function here. We don't have a generic way to acquire the // context and it isn't yet clear what the right pattern is for yielding // in the new pass manager so it is currently omitted. //IR.getContext().yield(); } if (DebugLogging) dbgs() << "Finished pass manager run.\n"; return PA; } template <typename PassT> void addPass(PassT Pass) { typedef detail::PassModel<IRUnitT, PassT> PassModelT; Passes.emplace_back(new PassModelT(std::move(Pass))); } static StringRef name() { return "PassManager"; } private: typedef detail::PassConcept<IRUnitT> PassConceptT; PassManager(const PassManager &) = delete; PassManager &operator=(const PassManager &) = delete; std::vector<std::unique_ptr<PassConceptT>> Passes; /// \brief Flag indicating whether we should do debug logging. bool DebugLogging; }; /// \brief Convenience typedef for a pass manager over modules. typedef PassManager<Module> ModulePassManager; /// \brief Convenience typedef for a pass manager over functions. typedef PassManager<Function> FunctionPassManager; namespace detail { /// \brief A CRTP base used to implement analysis managers. /// /// This class template serves as the boiler plate of an analysis manager. Any /// analysis manager can be implemented on top of this base class. Any /// implementation will be required to provide specific hooks: /// /// - getResultImpl /// - getCachedResultImpl /// - invalidateImpl /// /// The details of the call pattern are within. /// /// Note that there is also a generic analysis manager template which implements /// the above required functions along with common datastructures used for /// managing analyses. This base class is factored so that if you need to /// customize the handling of a specific IR unit, you can do so without /// replicating *all* of the boilerplate. template <typename DerivedT, typename IRUnitT> class AnalysisManagerBase { DerivedT *derived_this() { return static_cast<DerivedT *>(this); } const DerivedT *derived_this() const { return static_cast<const DerivedT *>(this); } AnalysisManagerBase(const AnalysisManagerBase &) = delete; AnalysisManagerBase & operator=(const AnalysisManagerBase &) = delete; protected: typedef detail::AnalysisResultConcept<IRUnitT> ResultConceptT; typedef detail::AnalysisPassConcept<IRUnitT> PassConceptT; // FIXME: Provide template aliases for the models when we're using C++11 in // a mode supporting them. // We have to explicitly define all the special member functions because MSVC // refuses to generate them. AnalysisManagerBase() {} AnalysisManagerBase(AnalysisManagerBase &&Arg) : AnalysisPasses(std::move(Arg.AnalysisPasses)) {} AnalysisManagerBase &operator=(AnalysisManagerBase &&RHS) { AnalysisPasses = std::move(RHS.AnalysisPasses); return *this; } public: /// \brief Get the result of an analysis pass for this module. /// /// If there is not a valid cached result in the manager already, this will /// re-run the analysis to produce a valid result. template <typename PassT> typename PassT::Result &getResult(IRUnitT &IR) { assert(AnalysisPasses.count(PassT::ID()) && "This analysis pass was not registered prior to being queried"); ResultConceptT &ResultConcept = derived_this()->getResultImpl(PassT::ID(), IR); typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result> ResultModelT; return static_cast<ResultModelT &>(ResultConcept).Result; } /// \brief Get the cached result of an analysis pass for this module. /// /// This method never runs the analysis. /// /// \returns null if there is no cached result. template <typename PassT> typename PassT::Result *getCachedResult(IRUnitT &IR) const { assert(AnalysisPasses.count(PassT::ID()) && "This analysis pass was not registered prior to being queried"); ResultConceptT *ResultConcept = derived_this()->getCachedResultImpl(PassT::ID(), IR); if (!ResultConcept) return nullptr; typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result> ResultModelT; return &static_cast<ResultModelT *>(ResultConcept)->Result; } /// \brief Register an analysis pass with the manager. /// /// This provides an initialized and set-up analysis pass to the analysis /// manager. Whomever is setting up analysis passes must use this to populate /// the manager with all of the analysis passes available. template <typename PassT> void registerPass(PassT Pass) { assert(!AnalysisPasses.count(PassT::ID()) && "Registered the same analysis pass twice!"); typedef detail::AnalysisPassModel<IRUnitT, PassT> PassModelT; AnalysisPasses[PassT::ID()].reset(new PassModelT(std::move(Pass))); } /// \brief Invalidate a specific analysis pass for an IR module. /// /// Note that the analysis result can disregard invalidation. template <typename PassT> void invalidate(IRUnitT &IR) { assert(AnalysisPasses.count(PassT::ID()) && "This analysis pass was not registered prior to being invalidated"); derived_this()->invalidateImpl(PassT::ID(), IR); } /// \brief Invalidate analyses cached for an IR unit. /// /// Walk through all of the analyses pertaining to this unit of IR and /// invalidate them unless they are preserved by the PreservedAnalyses set. /// We accept the PreservedAnalyses set by value and update it with each /// analyis pass which has been successfully invalidated and thus can be /// preserved going forward. The updated set is returned. PreservedAnalyses invalidate(IRUnitT &IR, PreservedAnalyses PA) { return derived_this()->invalidateImpl(IR, std::move(PA)); } protected: /// \brief Lookup a registered analysis pass. PassConceptT &lookupPass(void *PassID) { typename AnalysisPassMapT::iterator PI = AnalysisPasses.find(PassID); assert(PI != AnalysisPasses.end() && "Analysis passes must be registered prior to being queried!"); return *PI->second; } /// \brief Lookup a registered analysis pass. const PassConceptT &lookupPass(void *PassID) const { typename AnalysisPassMapT::const_iterator PI = AnalysisPasses.find(PassID); assert(PI != AnalysisPasses.end() && "Analysis passes must be registered prior to being queried!"); return *PI->second; } private: /// \brief Map type from module analysis pass ID to pass concept pointer. typedef DenseMap<void *, std::unique_ptr<PassConceptT>> AnalysisPassMapT; /// \brief Collection of module analysis passes, indexed by ID. AnalysisPassMapT AnalysisPasses; }; } // End namespace detail /// \brief A generic analysis pass manager with lazy running and caching of /// results. /// /// This analysis manager can be used for any IR unit where the address of the /// IR unit sufficies as its identity. It manages the cache for a unit of IR via /// the address of each unit of IR cached. template <typename IRUnitT> class AnalysisManager : public detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT> { friend class detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT>; typedef detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT> BaseT; typedef typename BaseT::ResultConceptT ResultConceptT; typedef typename BaseT::PassConceptT PassConceptT; public: // Most public APIs are inherited from the CRTP base class. /// \brief Construct an empty analysis manager. /// /// A flag can be passed to indicate that the manager should perform debug /// logging. AnalysisManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. AnalysisManager(AnalysisManager &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))), AnalysisResults(std::move(Arg.AnalysisResults)), DebugLogging(std::move(Arg.DebugLogging)) {} AnalysisManager &operator=(AnalysisManager &&RHS) { BaseT::operator=(std::move(static_cast<BaseT &>(RHS))); AnalysisResults = std::move(RHS.AnalysisResults); DebugLogging = std::move(RHS.DebugLogging); return *this; } /// \brief Returns true if the analysis manager has an empty results cache. bool empty() const { assert(AnalysisResults.empty() == AnalysisResultLists.empty() && "The storage and index of analysis results disagree on how many " "there are!"); return AnalysisResults.empty(); } /// \brief Clear the analysis result cache. /// /// This routine allows cleaning up when the set of IR units itself has /// potentially changed, and thus we can't even look up a a result and /// invalidate it directly. Notably, this does *not* call invalidate functions /// as there is nothing to be done for them. void clear() { AnalysisResults.clear(); AnalysisResultLists.clear(); } private: AnalysisManager(const AnalysisManager &) = delete; AnalysisManager &operator=(const AnalysisManager &) = delete; /// \brief Get an analysis result, running the pass if necessary. ResultConceptT &getResultImpl(void *PassID, IRUnitT &IR) { typename AnalysisResultMapT::iterator RI; bool Inserted; std::tie(RI, Inserted) = AnalysisResults.insert(std::make_pair( std::make_pair(PassID, &IR), typename AnalysisResultListT::iterator())); // If we don't have a cached result for this function, look up the pass and // run it to produce a result, which we then add to the cache. if (Inserted) { auto &P = this->lookupPass(PassID); if (DebugLogging) dbgs() << "Running analysis: " << P.name() << "\n"; AnalysisResultListT &ResultList = AnalysisResultLists[&IR]; ResultList.emplace_back(PassID, P.run(IR, this)); // P.run may have inserted elements into AnalysisResults and invalidated // RI. RI = AnalysisResults.find(std::make_pair(PassID, &IR)); assert(RI != AnalysisResults.end() && "we just inserted it!"); RI->second = std::prev(ResultList.end()); } return *RI->second->second; } /// \brief Get a cached analysis result or return null. ResultConceptT *getCachedResultImpl(void *PassID, IRUnitT &IR) const { typename AnalysisResultMapT::const_iterator RI = AnalysisResults.find(std::make_pair(PassID, &IR)); return RI == AnalysisResults.end() ? nullptr : &*RI->second->second; } /// \brief Invalidate a function pass result. void invalidateImpl(void *PassID, IRUnitT &IR) { typename AnalysisResultMapT::iterator RI = AnalysisResults.find(std::make_pair(PassID, &IR)); if (RI == AnalysisResults.end()) return; if (DebugLogging) dbgs() << "Invalidating analysis: " << this->lookupPass(PassID).name() << "\n"; AnalysisResultLists[&IR].erase(RI->second); AnalysisResults.erase(RI); } /// \brief Invalidate the results for a function.. PreservedAnalyses invalidateImpl(IRUnitT &IR, PreservedAnalyses PA) { // Short circuit for a common case of all analyses being preserved. if (PA.areAllPreserved()) return PA; if (DebugLogging) dbgs() << "Invalidating all non-preserved analyses for: " << IR.getName() << "\n"; // Clear all the invalidated results associated specifically with this // function. SmallVector<void *, 8> InvalidatedPassIDs; AnalysisResultListT &ResultsList = AnalysisResultLists[&IR]; for (typename AnalysisResultListT::iterator I = ResultsList.begin(), E = ResultsList.end(); I != E;) { void *PassID = I->first; // Pass the invalidation down to the pass itself to see if it thinks it is // necessary. The analysis pass can return false if no action on the part // of the analysis manager is required for this invalidation event. if (I->second->invalidate(IR, PA)) { if (DebugLogging) dbgs() << "Invalidating analysis: " << this->lookupPass(PassID).name() << "\n"; InvalidatedPassIDs.push_back(I->first); I = ResultsList.erase(I); } else { ++I; } // After handling each pass, we mark it as preserved. Once we've // invalidated any stale results, the rest of the system is allowed to // start preserving this analysis again. PA.preserve(PassID); } while (!InvalidatedPassIDs.empty()) AnalysisResults.erase( std::make_pair(InvalidatedPassIDs.pop_back_val(), &IR)); if (ResultsList.empty()) AnalysisResultLists.erase(&IR); return PA; } /// \brief List of function analysis pass IDs and associated concept pointers. /// /// Requires iterators to be valid across appending new entries and arbitrary /// erases. Provides both the pass ID and concept pointer such that it is /// half of a bijection and provides storage for the actual result concept. typedef std::list<std::pair< void *, std::unique_ptr<detail::AnalysisResultConcept<IRUnitT>>>> AnalysisResultListT; /// \brief Map type from function pointer to our custom list type. typedef DenseMap<IRUnitT *, AnalysisResultListT> AnalysisResultListMapT; /// \brief Map from function to a list of function analysis results. /// /// Provides linear time removal of all analysis results for a function and /// the ultimate storage for a particular cached analysis result. AnalysisResultListMapT AnalysisResultLists; /// \brief Map type from a pair of analysis ID and function pointer to an /// iterator into a particular result list. typedef DenseMap<std::pair<void *, IRUnitT *>, typename AnalysisResultListT::iterator> AnalysisResultMapT; /// \brief Map from an analysis ID and function to a particular cached /// analysis result. AnalysisResultMapT AnalysisResults; /// \brief A flag indicating whether debug logging is enabled. bool DebugLogging; }; /// \brief Convenience typedef for the Module analysis manager. typedef AnalysisManager<Module> ModuleAnalysisManager; /// \brief Convenience typedef for the Function analysis manager. typedef AnalysisManager<Function> FunctionAnalysisManager; /// \brief A module analysis which acts as a proxy for a function analysis /// manager. /// /// This primarily proxies invalidation information from the module analysis /// manager and module pass manager to a function analysis manager. You should /// never use a function analysis manager from within (transitively) a module /// pass manager unless your parent module pass has received a proxy result /// object for it. class FunctionAnalysisManagerModuleProxy { public: class Result; static void *ID() { return (void *)&PassID; } static StringRef name() { return "FunctionAnalysisManagerModuleProxy"; } explicit FunctionAnalysisManagerModuleProxy(FunctionAnalysisManager &FAM) : FAM(&FAM) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. FunctionAnalysisManagerModuleProxy( const FunctionAnalysisManagerModuleProxy &Arg) : FAM(Arg.FAM) {} FunctionAnalysisManagerModuleProxy(FunctionAnalysisManagerModuleProxy &&Arg) : FAM(std::move(Arg.FAM)) {} FunctionAnalysisManagerModuleProxy & operator=(FunctionAnalysisManagerModuleProxy RHS) { std::swap(FAM, RHS.FAM); return *this; } /// \brief Run the analysis pass and create our proxy result object. /// /// This doesn't do any interesting work, it is primarily used to insert our /// proxy result object into the module analysis cache so that we can proxy /// invalidation to the function analysis manager. /// /// In debug builds, it will also assert that the analysis manager is empty /// as no queries should arrive at the function analysis manager prior to /// this analysis being requested. Result run(Module &M); private: static char PassID; FunctionAnalysisManager *FAM; }; /// \brief The result proxy object for the /// \c FunctionAnalysisManagerModuleProxy. /// /// See its documentation for more information. class FunctionAnalysisManagerModuleProxy::Result { public: explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. Result(const Result &Arg) : FAM(Arg.FAM) {} Result(Result &&Arg) : FAM(std::move(Arg.FAM)) {} Result &operator=(Result RHS) { std::swap(FAM, RHS.FAM); return *this; } ~Result(); /// \brief Accessor for the \c FunctionAnalysisManager. FunctionAnalysisManager &getManager() { return *FAM; } /// \brief Handler for invalidation of the module. /// /// If this analysis itself is preserved, then we assume that the set of \c /// Function objects in the \c Module hasn't changed and thus we don't need /// to invalidate *all* cached data associated with a \c Function* in the \c /// FunctionAnalysisManager. /// /// Regardless of whether this analysis is marked as preserved, all of the /// analyses in the \c FunctionAnalysisManager are potentially invalidated /// based on the set of preserved analyses. bool invalidate(Module &M, const PreservedAnalyses &PA); private: FunctionAnalysisManager *FAM; }; /// \brief A function analysis which acts as a proxy for a module analysis /// manager. /// /// This primarily provides an accessor to a parent module analysis manager to /// function passes. Only the const interface of the module analysis manager is /// provided to indicate that once inside of a function analysis pass you /// cannot request a module analysis to actually run. Instead, the user must /// rely on the \c getCachedResult API. /// /// This proxy *doesn't* manage the invalidation in any way. That is handled by /// the recursive return path of each layer of the pass manager and the /// returned PreservedAnalysis set. class ModuleAnalysisManagerFunctionProxy { public: /// \brief Result proxy object for \c ModuleAnalysisManagerFunctionProxy. class Result { public: explicit Result(const ModuleAnalysisManager &MAM) : MAM(&MAM) {} // We have to explicitly define all the special member functions because // MSVC refuses to generate them. Result(const Result &Arg) : MAM(Arg.MAM) {} Result(Result &&Arg) : MAM(std::move(Arg.MAM)) {} Result &operator=(Result RHS) { std::swap(MAM, RHS.MAM); return *this; } const ModuleAnalysisManager &getManager() const { return *MAM; } /// \brief Handle invalidation by ignoring it, this pass is immutable. bool invalidate(Function &) { return false; } private: const ModuleAnalysisManager *MAM; }; static void *ID() { return (void *)&PassID; } static StringRef name() { return "ModuleAnalysisManagerFunctionProxy"; } ModuleAnalysisManagerFunctionProxy(const ModuleAnalysisManager &MAM) : MAM(&MAM) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. ModuleAnalysisManagerFunctionProxy( const ModuleAnalysisManagerFunctionProxy &Arg) : MAM(Arg.MAM) {} ModuleAnalysisManagerFunctionProxy(ModuleAnalysisManagerFunctionProxy &&Arg) : MAM(std::move(Arg.MAM)) {} ModuleAnalysisManagerFunctionProxy & operator=(ModuleAnalysisManagerFunctionProxy RHS) { std::swap(MAM, RHS.MAM); return *this; } /// \brief Run the analysis pass and create our proxy result object. /// Nothing to see here, it just forwards the \c MAM reference into the /// result. Result run(Function &) { return Result(*MAM); } private: static char PassID; const ModuleAnalysisManager *MAM; }; /// \brief Trivial adaptor that maps from a module to its functions. /// /// Designed to allow composition of a FunctionPass(Manager) and /// a ModulePassManager. Note that if this pass is constructed with a pointer /// to a \c ModuleAnalysisManager it will run the /// \c FunctionAnalysisManagerModuleProxy analysis prior to running the function /// pass over the module to enable a \c FunctionAnalysisManager to be used /// within this run safely. /// /// Function passes run within this adaptor can rely on having exclusive access /// to the function they are run over. They should not read or modify any other /// functions! Other threads or systems may be manipulating other functions in /// the module, and so their state should never be relied on. /// FIXME: Make the above true for all of LLVM's actual passes, some still /// violate this principle. /// /// Function passes can also read the module containing the function, but they /// should not modify that module outside of the use lists of various globals. /// For example, a function pass is not permitted to add functions to the /// module. /// FIXME: Make the above true for all of LLVM's actual passes, some still /// violate this principle. template <typename FunctionPassT> class ModuleToFunctionPassAdaptor { public: explicit ModuleToFunctionPassAdaptor(FunctionPassT Pass) : Pass(std::move(Pass)) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. ModuleToFunctionPassAdaptor(const ModuleToFunctionPassAdaptor &Arg) : Pass(Arg.Pass) {} ModuleToFunctionPassAdaptor(ModuleToFunctionPassAdaptor &&Arg) : Pass(std::move(Arg.Pass)) {} friend void swap(ModuleToFunctionPassAdaptor &LHS, ModuleToFunctionPassAdaptor &RHS) { using std::swap; swap(LHS.Pass, RHS.Pass); } ModuleToFunctionPassAdaptor &operator=(ModuleToFunctionPassAdaptor RHS) { swap(*this, RHS); return *this; } /// \brief Runs the function pass across every function in the module. PreservedAnalyses run(Module &M, ModuleAnalysisManager *AM) { FunctionAnalysisManager *FAM = nullptr; if (AM) // Setup the function analysis manager from its proxy. FAM = &AM->getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); PreservedAnalyses PA = PreservedAnalyses::all(); for (Function &F : M) { if (F.isDeclaration()) continue; PreservedAnalyses PassPA = Pass.run(F, FAM); // We know that the function pass couldn't have invalidated any other // function's analyses (that's the contract of a function pass), so // directly handle the function analysis manager's invalidation here and // update our preserved set to reflect that these have already been // handled. if (FAM) PassPA = FAM->invalidate(F, std::move(PassPA)); // Then intersect the preserved set so that invalidation of module // analyses will eventually occur when the module pass completes. PA.intersect(std::move(PassPA)); } // By definition we preserve the proxy. This precludes *any* invalidation // of function analyses by the proxy, but that's OK because we've taken // care to invalidate analyses in the function analysis manager // incrementally above. PA.preserve<FunctionAnalysisManagerModuleProxy>(); return PA; } static StringRef name() { return "ModuleToFunctionPassAdaptor"; } private: FunctionPassT Pass; }; /// \brief A function to deduce a function pass type and wrap it in the /// templated adaptor. template <typename FunctionPassT> ModuleToFunctionPassAdaptor<FunctionPassT> createModuleToFunctionPassAdaptor(FunctionPassT Pass) { return ModuleToFunctionPassAdaptor<FunctionPassT>(std::move(Pass)); } /// \brief A template utility pass to force an analysis result to be available. /// /// This is a no-op pass which simply forces a specific analysis pass's result /// to be available when it is run. template <typename AnalysisT> struct RequireAnalysisPass { /// \brief Run this pass over some unit of IR. /// /// This pass can be run over any unit of IR and use any analysis manager /// provided they satisfy the basic API requirements. When this pass is /// created, these methods can be instantiated to satisfy whatever the /// context requires. template <typename IRUnitT> PreservedAnalyses run(IRUnitT &Arg, AnalysisManager<IRUnitT> *AM) { if (AM) (void)AM->template getResult<AnalysisT>(Arg); return PreservedAnalyses::all(); } static StringRef name() { return "RequireAnalysisPass"; } }; /// \brief A template utility pass to force an analysis result to be /// invalidated. /// /// This is a no-op pass which simply forces a specific analysis result to be /// invalidated when it is run. template <typename AnalysisT> struct InvalidateAnalysisPass { /// \brief Run this pass over some unit of IR. /// /// This pass can be run over any unit of IR and use any analysis manager /// provided they satisfy the basic API requirements. When this pass is /// created, these methods can be instantiated to satisfy whatever the /// context requires. template <typename IRUnitT> PreservedAnalyses run(IRUnitT &Arg, AnalysisManager<IRUnitT> *AM) { if (AM) // We have to directly invalidate the analysis result as we can't // enumerate all other analyses and use the preserved set to control it. (void)AM->template invalidate<AnalysisT>(Arg); return PreservedAnalyses::all(); } static StringRef name() { return "InvalidateAnalysisPass"; } }; /// \brief A utility pass that does nothing but preserves no analyses. /// /// As a consequence fo not preserving any analyses, this pass will force all /// analysis passes to be re-run to produce fresh results if any are needed. struct InvalidateAllAnalysesPass { /// \brief Run this pass over some unit of IR. template <typename IRUnitT> PreservedAnalyses run(IRUnitT &Arg) { return PreservedAnalyses::none(); } static StringRef name() { return "InvalidateAllAnalysesPass"; } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Instructions.h
//===-- llvm/Instructions.h - Instruction subclass definitions --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file exposes the class definitions of all of the subclasses of the // Instruction class. This is meant to be an easy way to get access to all // instruction subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_INSTRUCTIONS_H #define LLVM_IR_INSTRUCTIONS_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstrTypes.h" #include "llvm/Support/ErrorHandling.h" #include <iterator> namespace llvm { class APInt; class ConstantInt; class ConstantRange; class DataLayout; class LLVMContext; enum AtomicOrdering { NotAtomic = 0, Unordered = 1, Monotonic = 2, // Consume = 3, // Not specified yet. Acquire = 4, Release = 5, AcquireRelease = 6, SequentiallyConsistent = 7 }; enum SynchronizationScope { SingleThread = 0, CrossThread = 1 }; /// Returns true if the ordering is at least as strong as acquire /// (i.e. acquire, acq_rel or seq_cst) inline bool isAtLeastAcquire(AtomicOrdering Ord) { return (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent); } /// Returns true if the ordering is at least as strong as release /// (i.e. release, acq_rel or seq_cst) inline bool isAtLeastRelease(AtomicOrdering Ord) { return (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent); } //===----------------------------------------------------------------------===// // AllocaInst Class //===----------------------------------------------------------------------===// /// AllocaInst - an instruction to allocate memory on the stack /// class AllocaInst : public UnaryInstruction { Type *AllocatedType; protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; AllocaInst *cloneImpl() const; public: explicit AllocaInst(Type *Ty, Value *ArraySize = nullptr, const Twine &Name = "", Instruction *InsertBefore = nullptr); AllocaInst(Type *Ty, Value *ArraySize, const Twine &Name, BasicBlock *InsertAtEnd); AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore = nullptr); AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd); AllocaInst(Type *Ty, Value *ArraySize, unsigned Align, const Twine &Name = "", Instruction *InsertBefore = nullptr); AllocaInst(Type *Ty, Value *ArraySize, unsigned Align, const Twine &Name, BasicBlock *InsertAtEnd); // Out of line virtual method, so the vtable, etc. has a home. ~AllocaInst() override; /// isArrayAllocation - Return true if there is an allocation size parameter /// to the allocation instruction that is not 1. /// bool isArrayAllocation() const; /// getArraySize - Get the number of elements allocated. For a simple /// allocation of a single element, this will return a constant 1 value. /// const Value *getArraySize() const { return getOperand(0); } Value *getArraySize() { return getOperand(0); } /// getType - Overload to return most specific pointer type /// PointerType *getType() const { return cast<PointerType>(Instruction::getType()); } /// getAllocatedType - Return the type that is being allocated by the /// instruction. /// Type *getAllocatedType() const { return AllocatedType; } /// \brief for use only in special circumstances that need to generically /// transform a whole instruction (eg: IR linking and vectorization). void setAllocatedType(Type *Ty) { AllocatedType = Ty; } /// getAlignment - Return the alignment of the memory that is being allocated /// by the instruction. /// unsigned getAlignment() const { return (1u << (getSubclassDataFromInstruction() & 31)) >> 1; } void setAlignment(unsigned Align); /// isStaticAlloca - Return true if this alloca is in the entry block of the /// function and is a constant size. If so, the code generator will fold it /// into the prolog/epilog code, so it is basically free. bool isStaticAlloca() const; /// \brief Return true if this alloca is used as an inalloca argument to a /// call. Such allocas are never considered static even if they are in the /// entry block. bool isUsedWithInAlloca() const { return getSubclassDataFromInstruction() & 32; } /// \brief Specify whether this alloca is used to represent the arguments to /// a call. void setUsedWithInAlloca(bool V) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) | (V ? 32 : 0)); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Alloca); } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; //===----------------------------------------------------------------------===// // LoadInst Class //===----------------------------------------------------------------------===// /// LoadInst - an instruction for reading from memory. This uses the /// SubclassData field in Value to store whether or not the load is volatile. /// class LoadInst : public UnaryInstruction { void AssertOK(); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; LoadInst *cloneImpl() const; public: LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore); LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false, Instruction *InsertBefore = nullptr); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false, Instruction *InsertBefore = nullptr) : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, NameStr, isVolatile, InsertBefore) {} LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, Instruction *InsertBefore = nullptr) : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, NameStr, isVolatile, Align, InsertBefore) {} LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, Instruction *InsertBefore = nullptr); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread, Instruction *InsertBefore = nullptr) : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, NameStr, isVolatile, Align, Order, SynchScope, InsertBefore) {} LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread, Instruction *InsertBefore = nullptr); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, AtomicOrdering Order, SynchronizationScope SynchScope, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore); LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr, bool isVolatile = false, Instruction *InsertBefore = nullptr); explicit LoadInst(Value *Ptr, const char *NameStr = nullptr, bool isVolatile = false, Instruction *InsertBefore = nullptr) : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, NameStr, isVolatile, InsertBefore) {} LoadInst(Value *Ptr, const char *NameStr, bool isVolatile, BasicBlock *InsertAtEnd); /// isVolatile - Return true if this is a load from a volatile memory /// location. /// bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } /// setVolatile - Specify whether this is a volatile load or not. /// void setVolatile(bool V) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | (V ? 1 : 0)); } /// getAlignment - Return the alignment of the access that is being performed /// unsigned getAlignment() const { return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; } void setAlignment(unsigned Align); /// Returns the ordering effect of this fence. AtomicOrdering getOrdering() const { return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); } /// Set the ordering constraint on this load. May not be Release or /// AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | (Ordering << 7)); } SynchronizationScope getSynchScope() const { return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); } /// Specify whether this load is ordered with respect to all /// concurrently executing threads, or only with respect to signal handlers /// executing in the same thread. void setSynchScope(SynchronizationScope xthread) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | (xthread << 6)); } void setAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope = CrossThread) { setOrdering(Ordering); setSynchScope(SynchScope); } bool isSimple() const { return !isAtomic() && !isVolatile(); } bool isUnordered() const { return getOrdering() <= Unordered && !isVolatile(); } Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; } /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Load; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; //===----------------------------------------------------------------------===// // StoreInst Class //===----------------------------------------------------------------------===// /// StoreInst - an instruction for storing to memory /// class StoreInst : public Instruction { void *operator new(size_t, unsigned) = delete; void AssertOK(); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; StoreInst *cloneImpl() const; public: // allocate space for exactly two operands void *operator new(size_t s) { return User::operator new(s, 2); } StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile = false, Instruction *InsertBefore = nullptr); StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, Instruction *InsertBefore = nullptr); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread, Instruction *InsertBefore = nullptr); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, AtomicOrdering Order, SynchronizationScope SynchScope, BasicBlock *InsertAtEnd); /// isVolatile - Return true if this is a store to a volatile memory /// location. /// bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } /// setVolatile - Specify whether this is a volatile store or not. /// void setVolatile(bool V) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | (V ? 1 : 0)); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// getAlignment - Return the alignment of the access that is being performed /// unsigned getAlignment() const { return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; } void setAlignment(unsigned Align); /// Returns the ordering effect of this store. AtomicOrdering getOrdering() const { return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); } /// Set the ordering constraint on this store. May not be Acquire or /// AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | (Ordering << 7)); } SynchronizationScope getSynchScope() const { return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); } /// Specify whether this store instruction is ordered with respect to all /// concurrently executing threads, or only with respect to signal handlers /// executing in the same thread. void setSynchScope(SynchronizationScope xthread) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | (xthread << 6)); } void setAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope = CrossThread) { setOrdering(Ordering); setSynchScope(SynchScope); } bool isSimple() const { return !isAtomic() && !isVolatile(); } bool isUnordered() const { return getOrdering() <= Unordered && !isVolatile(); } Value *getValueOperand() { return getOperand(0); } const Value *getValueOperand() const { return getOperand(0); } Value *getPointerOperand() { return getOperand(1); } const Value *getPointerOperand() const { return getOperand(1); } static unsigned getPointerOperandIndex() { return 1U; } /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Store; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; template <> struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) //===----------------------------------------------------------------------===// // FenceInst Class //===----------------------------------------------------------------------===// /// FenceInst - an instruction for ordering other memory operations /// class FenceInst : public Instruction { void *operator new(size_t, unsigned) = delete; void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; FenceInst *cloneImpl() const; public: // allocate space for exactly zero operands void *operator new(size_t s) { return User::operator new(s, 0); } // Ordering may only be Acquire, Release, AcquireRelease, or // SequentiallyConsistent. FenceInst(LLVMContext &C, AtomicOrdering Ordering, SynchronizationScope SynchScope = CrossThread, Instruction *InsertBefore = nullptr); FenceInst(LLVMContext &C, AtomicOrdering Ordering, SynchronizationScope SynchScope, BasicBlock *InsertAtEnd); /// Returns the ordering effect of this fence. AtomicOrdering getOrdering() const { return AtomicOrdering(getSubclassDataFromInstruction() >> 1); } /// Set the ordering constraint on this fence. May only be Acquire, Release, /// AcquireRelease, or SequentiallyConsistent. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | (Ordering << 1)); } SynchronizationScope getSynchScope() const { return SynchronizationScope(getSubclassDataFromInstruction() & 1); } /// Specify whether this fence orders other operations with respect to all /// concurrently executing threads, or only with respect to signal handlers /// executing in the same thread. void setSynchScope(SynchronizationScope xthread) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | xthread); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Fence; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; //===----------------------------------------------------------------------===// // AtomicCmpXchgInst Class //===----------------------------------------------------------------------===// /// AtomicCmpXchgInst - an instruction that atomically checks whether a /// specified value is in a memory location, and, if it is, stores a new value /// there. Returns the value that was loaded. /// class AtomicCmpXchgInst : public Instruction { void *operator new(size_t, unsigned) = delete; void Init(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; AtomicCmpXchgInst *cloneImpl() const; public: // allocate space for exactly three operands void *operator new(size_t s) { return User::operator new(s, 3); } AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope, Instruction *InsertBefore = nullptr); AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope, BasicBlock *InsertAtEnd); /// isVolatile - Return true if this is a cmpxchg from a volatile memory /// location. /// bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } /// setVolatile - Specify whether this is a volatile cmpxchg. /// void setVolatile(bool V) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | (unsigned)V); } /// Return true if this cmpxchg may spuriously fail. bool isWeak() const { return getSubclassDataFromInstruction() & 0x100; } void setWeak(bool IsWeak) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) | (IsWeak << 8)); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Set the ordering constraint on this cmpxchg. void setSuccessOrdering(AtomicOrdering Ordering) { assert(Ordering != NotAtomic && "CmpXchg instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) | (Ordering << 2)); } void setFailureOrdering(AtomicOrdering Ordering) { assert(Ordering != NotAtomic && "CmpXchg instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) | (Ordering << 5)); } /// Specify whether this cmpxchg is atomic and orders other operations with /// respect to all concurrently executing threads, or only with respect to /// signal handlers executing in the same thread. void setSynchScope(SynchronizationScope SynchScope) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) | (SynchScope << 1)); } /// Returns the ordering constraint on this cmpxchg. AtomicOrdering getSuccessOrdering() const { return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); } /// Returns the ordering constraint on this cmpxchg. AtomicOrdering getFailureOrdering() const { return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); } /// Returns whether this cmpxchg is atomic between threads or only within a /// single thread. SynchronizationScope getSynchScope() const { return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1); } Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; } Value *getCompareOperand() { return getOperand(1); } const Value *getCompareOperand() const { return getOperand(1); } Value *getNewValOperand() { return getOperand(2); } const Value *getNewValOperand() const { return getOperand(2); } /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { return getPointerOperand()->getType()->getPointerAddressSpace(); } /// \brief Returns the strongest permitted ordering on failure, given the /// desired ordering on success. /// /// If the comparison in a cmpxchg operation fails, there is no atomic store /// so release semantics cannot be provided. So this function drops explicit /// Release requests from the AtomicOrdering. A SequentiallyConsistent /// operation would remain SequentiallyConsistent. static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { switch (SuccessOrdering) { default: llvm_unreachable("invalid cmpxchg success ordering"); case Release: case Monotonic: return Monotonic; case AcquireRelease: case Acquire: return Acquire; case SequentiallyConsistent: return SequentiallyConsistent; } } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::AtomicCmpXchg; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; template <> struct OperandTraits<AtomicCmpXchgInst> : public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) //===----------------------------------------------------------------------===// // AtomicRMWInst Class //===----------------------------------------------------------------------===// /// AtomicRMWInst - an instruction that atomically reads a memory location, /// combines it with another value, and then stores the result back. Returns /// the old value. /// class AtomicRMWInst : public Instruction { void *operator new(size_t, unsigned) = delete; protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; AtomicRMWInst *cloneImpl() const; public: /// This enumeration lists the possible modifications atomicrmw can make. In /// the descriptions, 'p' is the pointer to the instruction's memory location, /// 'old' is the initial value of *p, and 'v' is the other value passed to the /// instruction. These instructions always return 'old'. enum BinOp { /// *p = v Xchg, /// *p = old + v Add, /// *p = old - v Sub, /// *p = old & v And, /// *p = ~(old & v) Nand, /// *p = old | v Or, /// *p = old ^ v Xor, /// *p = old >signed v ? old : v Max, /// *p = old <signed v ? old : v Min, /// *p = old >unsigned v ? old : v UMax, /// *p = old <unsigned v ? old : v UMin, FIRST_BINOP = Xchg, LAST_BINOP = UMin, BAD_BINOP }; // allocate space for exactly two operands void *operator new(size_t s) { return User::operator new(s, 2); } AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, AtomicOrdering Ordering, SynchronizationScope SynchScope, Instruction *InsertBefore = nullptr); AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, AtomicOrdering Ordering, SynchronizationScope SynchScope, BasicBlock *InsertAtEnd); BinOp getOperation() const { return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5); } void setOperation(BinOp Operation) { unsigned short SubclassData = getSubclassDataFromInstruction(); setInstructionSubclassData((SubclassData & 31) | (Operation << 5)); } /// isVolatile - Return true if this is a RMW on a volatile memory location. /// bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } /// setVolatile - Specify whether this is a volatile RMW or not. /// void setVolatile(bool V) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | (unsigned)V); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Set the ordering constraint on this RMW. void setOrdering(AtomicOrdering Ordering) { assert(Ordering != NotAtomic && "atomicrmw instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | (Ordering << 2)); } /// Specify whether this RMW orders other operations with respect to all /// concurrently executing threads, or only with respect to signal handlers /// executing in the same thread. void setSynchScope(SynchronizationScope SynchScope) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) | (SynchScope << 1)); } /// Returns the ordering constraint on this RMW. AtomicOrdering getOrdering() const { return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); } /// Returns whether this RMW is atomic between threads or only within a /// single thread. SynchronizationScope getSynchScope() const { return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1); } Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; } Value *getValOperand() { return getOperand(1); } const Value *getValOperand() const { return getOperand(1); } /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::AtomicRMW; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: void Init(BinOp Operation, Value *Ptr, Value *Val, AtomicOrdering Ordering, SynchronizationScope SynchScope); // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; template <> struct OperandTraits<AtomicRMWInst> : public FixedNumOperandTraits<AtomicRMWInst,2> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) //===----------------------------------------------------------------------===// // GetElementPtrInst Class //===----------------------------------------------------------------------===// // checkGEPType - Simple wrapper function to give a better assertion failure // message on bad indexes for a gep instruction. // inline Type *checkGEPType(Type *Ty) { assert(Ty && "Invalid GetElementPtrInst indices for type!"); return Ty; } /// GetElementPtrInst - an instruction for type-safe pointer arithmetic to /// access elements of arrays and structs /// class GetElementPtrInst : public Instruction { Type *SourceElementType; Type *ResultElementType; GetElementPtrInst(const GetElementPtrInst &GEPI); void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); /// Constructors - Create a getelementptr instruction with a base pointer an /// list of indices. The first ctor can optionally insert before an existing /// instruction, the second appends the new instruction to the specified /// BasicBlock. inline GetElementPtrInst(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, unsigned Values, const Twine &NameStr, Instruction *InsertBefore); inline GetElementPtrInst(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, unsigned Values, const Twine &NameStr, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; GetElementPtrInst *cloneImpl() const; public: static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { unsigned Values = 1 + unsigned(IdxList.size()); if (!PointeeType) PointeeType = cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); else assert( PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, NameStr, InsertBefore); } static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr, BasicBlock *InsertAtEnd) { unsigned Values = 1 + unsigned(IdxList.size()); if (!PointeeType) PointeeType = cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); else assert( PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, NameStr, InsertAtEnd); } /// Create an "inbounds" getelementptr. See the documentation for the /// "inbounds" flag in LangRef.html for details. static GetElementPtrInst *CreateInBounds(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "", Instruction *InsertBefore = nullptr){ return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); } static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { GetElementPtrInst *GEP = Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); GEP->setIsInBounds(true); return GEP; } static GetElementPtrInst *CreateInBounds(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr, BasicBlock *InsertAtEnd) { return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); } static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr, BasicBlock *InsertAtEnd) { GetElementPtrInst *GEP = Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); GEP->setIsInBounds(true); return GEP; } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // getType - Overload to return most specific sequential type. SequentialType *getType() const { return cast<SequentialType>(Instruction::getType()); } Type *getSourceElementType() const { return SourceElementType; } void setSourceElementType(Type *Ty) { SourceElementType = Ty; } void setResultElementType(Type *Ty) { ResultElementType = Ty; } Type *getResultElementType() const { assert(ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()); return ResultElementType; } /// \brief Returns the address space of this instruction's pointer type. unsigned getAddressSpace() const { // Note that this is always the same as the pointer operand's address space // and that is cheaper to compute, so cheat here. return getPointerAddressSpace(); } /// getIndexedType - Returns the type of the element that would be loaded with /// a load instruction with the specified parameters. /// /// Null is returned if the indices are invalid for the specified /// pointer type. /// static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); inline op_iterator idx_begin() { return op_begin()+1; } inline const_op_iterator idx_begin() const { return op_begin()+1; } inline op_iterator idx_end() { return op_end(); } inline const_op_iterator idx_end() const { return op_end(); } Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; // get index for modifying correct operand. } /// getPointerOperandType - Method to return the pointer operand as a /// PointerType. Type *getPointerOperandType() const { return getPointerOperand()->getType(); } /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { return getPointerOperandType()->getPointerAddressSpace(); } /// GetGEPReturnType - Returns the pointer type returned by the GEP /// instruction, which may be a vector of pointers. static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) { return getGEPReturnType( cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(), Ptr, IdxList); } static Type *getGEPReturnType(Type *ElTy, Value *Ptr, ArrayRef<Value *> IdxList) { Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), Ptr->getType()->getPointerAddressSpace()); // Vector GEP if (Ptr->getType()->isVectorTy()) { unsigned NumElem = Ptr->getType()->getVectorNumElements(); return VectorType::get(PtrTy, NumElem); } for (Value *Index : IdxList) if (Index->getType()->isVectorTy()) { unsigned NumElem = Index->getType()->getVectorNumElements(); return VectorType::get(PtrTy, NumElem); } // Scalar GEP return PtrTy; } unsigned getNumIndices() const { // Note: always non-negative return getNumOperands() - 1; } bool hasIndices() const { return getNumOperands() > 1; } /// hasAllZeroIndices - Return true if all of the indices of this GEP are /// zeros. If so, the result pointer and the first operand have the same /// value, just potentially different types. bool hasAllZeroIndices() const; /// hasAllConstantIndices - Return true if all of the indices of this GEP are /// constant integers. If so, the result pointer and the first operand have /// a constant offset between them. bool hasAllConstantIndices() const; /// setIsInBounds - Set or clear the inbounds flag on this GEP instruction. /// See LangRef.html for the meaning of inbounds on a getelementptr. void setIsInBounds(bool b = true); /// isInBounds - Determine whether the GEP has the inbounds flag. bool isInBounds() const; /// \brief Accumulate the constant address offset of this GEP if possible. /// /// This routine accepts an APInt into which it will accumulate the constant /// offset of this GEP if the GEP is in fact constant. If the GEP is not /// all-constant, it returns false and the value of the offset APInt is /// undefined (it is *not* preserved!). The APInt passed into this routine /// must be at least as wide as the IntPtr type for the address space of /// the base GEP pointer. bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::GetElementPtr); } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; template <> struct OperandTraits<GetElementPtrInst> : public VariadicOperandTraits<GetElementPtrInst, 1> { }; GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, unsigned Values, const Twine &NameStr, Instruction *InsertBefore) : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, OperandTraits<GetElementPtrInst>::op_end(this) - Values, Values, InsertBefore), SourceElementType(PointeeType), ResultElementType(getIndexedType(PointeeType, IdxList)) { assert(ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()); init(Ptr, IdxList, NameStr); } GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, unsigned Values, const Twine &NameStr, BasicBlock *InsertAtEnd) : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, OperandTraits<GetElementPtrInst>::op_end(this) - Values, Values, InsertAtEnd), SourceElementType(PointeeType), ResultElementType(getIndexedType(PointeeType, IdxList)) { assert(ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()); init(Ptr, IdxList, NameStr); } DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) //===----------------------------------------------------------------------===// // ICmpInst Class //===----------------------------------------------------------------------===// /// This instruction compares its operands according to the predicate given /// to the constructor. It only operates on integers or pointers. The operands /// must be identical types. /// \brief Represent an integer comparison operator. class ICmpInst: public CmpInst { void AssertOK() { assert(getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE && getPredicate() <= CmpInst::LAST_ICMP_PREDICATE && "Invalid ICmp predicate value"); assert(getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!"); // Check that the operands are the right type assert((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction"); } protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical ICmpInst ICmpInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics. ICmpInst( Instruction *InsertBefore, ///< Where to insert Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression Value *RHS, ///< The right-hand-side of the expression const Twine &NameStr = "" ///< Name of the instruction ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS, RHS, NameStr, InsertBefore) { #ifndef NDEBUG AssertOK(); #endif } /// \brief Constructor with insert-at-end semantics. ICmpInst( BasicBlock &InsertAtEnd, ///< Block to insert into. Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression Value *RHS, ///< The right-hand-side of the expression const Twine &NameStr = "" ///< Name of the instruction ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS, RHS, NameStr, &InsertAtEnd) { #ifndef NDEBUG AssertOK(); #endif } /// \brief Constructor with no-insertion semantics ICmpInst( Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression Value *RHS, ///< The right-hand-side of the expression const Twine &NameStr = "" ///< Name of the instruction ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::ICmp, pred, LHS, RHS, NameStr) { #ifndef NDEBUG AssertOK(); #endif } /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. /// @returns the predicate that would be the result if the operand were /// regarded as signed. /// \brief Return the signed version of the predicate Predicate getSignedPredicate() const { return getSignedPredicate(getPredicate()); } /// This is a static version that you can use without an instruction. /// \brief Return the signed version of the predicate. static Predicate getSignedPredicate(Predicate pred); /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. /// @returns the predicate that would be the result if the operand were /// regarded as unsigned. /// \brief Return the unsigned version of the predicate Predicate getUnsignedPredicate() const { return getUnsignedPredicate(getPredicate()); } /// This is a static version that you can use without an instruction. /// \brief Return the unsigned version of the predicate. static Predicate getUnsignedPredicate(Predicate pred); /// isEquality - Return true if this predicate is either EQ or NE. This also /// tests for commutativity. static bool isEquality(Predicate P) { return P == ICMP_EQ || P == ICMP_NE; } /// isEquality - Return true if this predicate is either EQ or NE. This also /// tests for commutativity. bool isEquality() const { return isEquality(getPredicate()); } /// @returns true if the predicate of this ICmpInst is commutative /// \brief Determine if this relation is commutative. bool isCommutative() const { return isEquality(); } /// isRelational - Return true if the predicate is relational (not EQ or NE). /// bool isRelational() const { return !isEquality(); } /// isRelational - Return true if the predicate is relational (not EQ or NE). /// static bool isRelational(Predicate P) { return !isEquality(P); } /// Initialize a set of values that all satisfy the predicate with C. /// \brief Make a ConstantRange for a relation with a constant value. static ConstantRange makeConstantRange(Predicate pred, const APInt &C); /// Exchange the two operands to this instruction in such a way that it does /// not modify the semantics of the instruction. The predicate value may be /// changed to retain the same result if the predicate is order dependent /// (e.g. ult). /// \brief Swap operands and adjust predicate. void swapOperands() { setPredicate(getSwappedPredicate()); Op<0>().swap(Op<1>()); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ICmp; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // FCmpInst Class //===----------------------------------------------------------------------===// /// This instruction compares its operands according to the predicate given /// to the constructor. It only operates on floating point values or packed /// vectors of floating point values. The operands must be identical types. /// \brief Represents a floating point comparison operator. class FCmpInst: public CmpInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical FCmpInst FCmpInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics. FCmpInst( Instruction *InsertBefore, ///< Where to insert Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression Value *RHS, ///< The right-hand-side of the expression const Twine &NameStr = "" ///< Name of the instruction ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, pred, LHS, RHS, NameStr, InsertBefore) { assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp predicate value"); assert(getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!"); // Check that the operands are the right type assert(getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction"); } /// \brief Constructor with insert-at-end semantics. FCmpInst( BasicBlock &InsertAtEnd, ///< Block to insert into. Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression Value *RHS, ///< The right-hand-side of the expression const Twine &NameStr = "" ///< Name of the instruction ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, pred, LHS, RHS, NameStr, &InsertAtEnd) { assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp predicate value"); assert(getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!"); // Check that the operands are the right type assert(getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction"); } /// \brief Constructor with no-insertion semantics FCmpInst( Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression Value *RHS, ///< The right-hand-side of the expression const Twine &NameStr = "" ///< Name of the instruction ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, pred, LHS, RHS, NameStr) { assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp predicate value"); assert(getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!"); // Check that the operands are the right type assert(getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction"); } /// @returns true if the predicate of this instruction is EQ or NE. /// \brief Determine if this is an equality predicate. static bool isEquality(Predicate Pred) { return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || Pred == FCMP_UNE; } /// @returns true if the predicate of this instruction is EQ or NE. /// \brief Determine if this is an equality predicate. bool isEquality() const { return isEquality(getPredicate()); } /// @returns true if the predicate of this instruction is commutative. /// \brief Determine if this is a commutative predicate. bool isCommutative() const { return isEquality() || getPredicate() == FCMP_FALSE || getPredicate() == FCMP_TRUE || getPredicate() == FCMP_ORD || getPredicate() == FCMP_UNO; } /// @returns true if the predicate is relational (not EQ or NE). /// \brief Determine if this a relational predicate. bool isRelational() const { return !isEquality(); } /// Exchange the two operands to this instruction in such a way that it does /// not modify the semantics of the instruction. The predicate value may be /// changed to retain the same result if the predicate is order dependent /// (e.g. ult). /// \brief Swap operands and adjust predicate. void swapOperands() { setPredicate(getSwappedPredicate()); Op<0>().swap(Op<1>()); } /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::FCmp; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// /// CallInst - This class represents a function call, abstracting a target /// machine's calling convention. This class uses low bit of the SubClassData /// field to indicate whether or not this is a tail call. The rest of the bits /// hold the calling convention of the call. /// class CallInst : public Instruction { AttributeSet AttributeList; ///< parameter attributes for call FunctionType *FTy; CallInst(const CallInst &CI); void init(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr) { init(cast<FunctionType>( cast<PointerType>(Func->getType())->getElementType()), Func, Args, NameStr); } void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, const Twine &NameStr); void init(Value *Func, const Twine &NameStr); /// Construct a CallInst given a range of arguments. /// \brief Construct a CallInst from a range of arguments inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, const Twine &NameStr, Instruction *InsertBefore); inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr, Instruction *InsertBefore) : CallInst(cast<FunctionType>( cast<PointerType>(Func->getType())->getElementType()), Func, Args, NameStr, InsertBefore) {} /// Construct a CallInst given a range of arguments. /// \brief Construct a CallInst from a range of arguments inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr, BasicBlock *InsertAtEnd); explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore); CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; CallInst *cloneImpl() const; public: static CallInst *Create(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return Create(cast<FunctionType>( cast<PointerType>(Func->getType())->getElementType()), Func, Args, NameStr, InsertBefore); } static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new (unsigned(Args.size() + 1)) CallInst(Ty, Func, Args, NameStr, InsertBefore); } static CallInst *Create(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new(unsigned(Args.size() + 1)) CallInst(Func, Args, NameStr, InsertAtEnd); } static CallInst *Create(Value *F, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new(1) CallInst(F, NameStr, InsertBefore); } static CallInst *Create(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new(1) CallInst(F, NameStr, InsertAtEnd); } /// CreateMalloc - Generate the IR for a call to malloc: /// 1. Compute the malloc call's argument as the specified type's size, /// possibly multiplied by the array size if the array size is not /// constant 1. /// 2. Call malloc with that argument. /// 3. Bitcast the result of the malloc call to the specified type. static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, Type *AllocTy, Value *AllocSize, Value *ArraySize = nullptr, Function* MallocF = nullptr, const Twine &Name = ""); static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, Type *AllocTy, Value *AllocSize, Value *ArraySize = nullptr, Function* MallocF = nullptr, const Twine &Name = ""); /// CreateFree - Generate the IR for a call to the builtin free function. static Instruction* CreateFree(Value* Source, Instruction *InsertBefore); static Instruction* CreateFree(Value* Source, BasicBlock *InsertAtEnd); ~CallInst() override; FunctionType *getFunctionType() const { return FTy; } void mutateFunctionType(FunctionType *FTy) { mutateType(FTy->getReturnType()); this->FTy = FTy; } // Note that 'musttail' implies 'tail'. enum TailCallKind { TCK_None = 0, TCK_Tail = 1, TCK_MustTail = 2 }; TailCallKind getTailCallKind() const { return TailCallKind(getSubclassDataFromInstruction() & 3); } bool isTailCall() const { return (getSubclassDataFromInstruction() & 3) != TCK_None; } bool isMustTailCall() const { return (getSubclassDataFromInstruction() & 3) == TCK_MustTail; } void setTailCall(bool isTC = true) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | unsigned(isTC ? TCK_Tail : TCK_None)); } void setTailCallKind(TailCallKind TCK) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | unsigned(TCK)); } /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// getNumArgOperands - Return the number of call arguments. /// unsigned getNumArgOperands() const { return getNumOperands() - 1; } /// getArgOperand/setArgOperand - Return/set the i-th call argument. /// Value *getArgOperand(unsigned i) const { return getOperand(i); } void setArgOperand(unsigned i, Value *v) { setOperand(i, v); } /// arg_operands - iteration adapter for range-for loops. iterator_range<op_iterator> arg_operands() { // The last operand in the op list is the callee - it's not one of the args // so we don't want to iterate over it. return iterator_range<op_iterator>(op_begin(), op_end() - 1); } /// arg_operands - iteration adapter for range-for loops. iterator_range<const_op_iterator> arg_operands() const { return iterator_range<const_op_iterator>(op_begin(), op_end() - 1); } /// \brief Wrappers for getting the \c Use of a call argument. const Use &getArgOperandUse(unsigned i) const { return getOperandUse(i); } Use &getArgOperandUse(unsigned i) { return getOperandUse(i); } /// getCallingConv/setCallingConv - Get or set the calling convention of this /// function call. CallingConv::ID getCallingConv() const { return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2); } void setCallingConv(CallingConv::ID CC) { setInstructionSubclassData((getSubclassDataFromInstruction() & 3) | (static_cast<unsigned>(CC) << 2)); } /// getAttributes - Return the parameter attributes for this call. /// const AttributeSet &getAttributes() const { return AttributeList; } /// setAttributes - Set the parameter attributes for this call. /// void setAttributes(const AttributeSet &Attrs) { AttributeList = Attrs; } /// addAttribute - adds the attribute to the list of attributes. void addAttribute(unsigned i, Attribute::AttrKind attr); /// addAttribute - adds the attribute to the list of attributes. void addAttribute(unsigned i, StringRef Kind, StringRef Value); /// removeAttribute - removes the attribute from the list of attributes. void removeAttribute(unsigned i, Attribute attr); /// \brief adds the dereferenceable attribute to the list of attributes. void addDereferenceableAttr(unsigned i, uint64_t Bytes); /// \brief adds the dereferenceable_or_null attribute to the list of /// attributes. void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes); /// \brief Determine whether this call has the given attribute. bool hasFnAttr(Attribute::AttrKind A) const { assert(A != Attribute::NoBuiltin && "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"); return hasFnAttrImpl(A); } /// \brief Determine whether this call has the given attribute. bool hasFnAttr(StringRef A) const { return hasFnAttrImpl(A); } /// \brief Determine whether the call or the callee has the given attributes. bool paramHasAttr(unsigned i, Attribute::AttrKind A) const; /// \brief Extract the alignment for a call or parameter (0=unknown). unsigned getParamAlignment(unsigned i) const { return AttributeList.getParamAlignment(i); } /// \brief Extract the number of dereferenceable bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableBytes(unsigned i) const { return AttributeList.getDereferenceableBytes(i); } /// \brief Extract the number of dereferenceable_or_null bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableOrNullBytes(unsigned i) const { return AttributeList.getDereferenceableOrNullBytes(i); } /// \brief Return true if the call should not be treated as a call to a /// builtin. bool isNoBuiltin() const { return hasFnAttrImpl(Attribute::NoBuiltin) && !hasFnAttrImpl(Attribute::Builtin); } /// \brief Return true if the call should not be inlined. bool isNoInline() const { return hasFnAttr(Attribute::NoInline); } void setIsNoInline() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoInline); } /// \brief Return true if the call can return twice bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } void setCanReturnTwice() { addAttribute(AttributeSet::FunctionIndex, Attribute::ReturnsTwice); } /// \brief Determine if the call does not access memory. bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); } void setDoesNotAccessMemory() { addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone); } /// \brief Determine if the call does not access or only reads memory. bool onlyReadsMemory() const { return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly); } void setOnlyReadsMemory() { addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly); } /// @brief Determine if the call can access memmory only using pointers based /// on its arguments. bool onlyAccessesArgMemory() const { return hasFnAttr(Attribute::ArgMemOnly); } void setOnlyAccessesArgMemory() { addAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly); } /// \brief Determine if the call cannot return. bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); } void setDoesNotReturn() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn); } /// \brief Determine if the call cannot unwind. bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); } void setDoesNotThrow() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind); } /// \brief Determine if the call cannot be duplicated. bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); } void setCannotDuplicate() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate); } /// \brief Determine if the call returns a structure through first /// pointer argument. bool hasStructRetAttr() const { // Be friendly and also check the callee. return paramHasAttr(1, Attribute::StructRet); } /// \brief Determine if any call argument is an aggregate passed by value. bool hasByValArgument() const { return AttributeList.hasAttrSomewhere(Attribute::ByVal); } /// getCalledFunction - Return the function called, or null if this is an /// indirect function invocation. /// Function *getCalledFunction() const { return dyn_cast<Function>(Op<-1>()); } /// getCalledValue - Get a pointer to the function that is invoked by this /// instruction. const Value *getCalledValue() const { return Op<-1>(); } Value *getCalledValue() { return Op<-1>(); } /// setCalledFunction - Set the function called. void setCalledFunction(Value* Fn) { setCalledFunction( cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()), Fn); } void setCalledFunction(FunctionType *FTy, Value *Fn) { this->FTy = FTy; assert(FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())); Op<-1>() = Fn; } /// isInlineAsm - Check if this call is an inline asm statement. bool isInlineAsm() const { return isa<InlineAsm>(Op<-1>()); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Call; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: template<typename AttrKind> bool hasFnAttrImpl(AttrKind A) const { if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A)) return true; if (const Function *F = getCalledFunction()) return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, A); return false; } // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; template <> struct OperandTraits<CallInst> : public VariadicOperandTraits<CallInst, 1> { }; CallInst::CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr, BasicBlock *InsertAtEnd) : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType()) ->getElementType())->getReturnType(), Instruction::Call, OperandTraits<CallInst>::op_end(this) - (Args.size() + 1), unsigned(Args.size() + 1), InsertAtEnd) { init(Func, Args, NameStr); } CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, const Twine &NameStr, Instruction *InsertBefore) : Instruction(Ty->getReturnType(), Instruction::Call, OperandTraits<CallInst>::op_end(this) - (Args.size() + 1), unsigned(Args.size() + 1), InsertBefore) { init(Ty, Func, Args, NameStr); } // Note: if you get compile errors about private methods then // please update your code to use the high-level operand // interfaces. See line 943 above. DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value) //===----------------------------------------------------------------------===// // SelectInst Class //===----------------------------------------------------------------------===// /// SelectInst - This class represents the LLVM 'select' instruction. /// class SelectInst : public Instruction { void init(Value *C, Value *S1, Value *S2) { assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); Op<0>() = C; Op<1>() = S1; Op<2>() = S2; } SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, Instruction *InsertBefore) : Instruction(S1->getType(), Instruction::Select, &Op<0>(), 3, InsertBefore) { init(C, S1, S2); setName(NameStr); } SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock *InsertAtEnd) : Instruction(S1->getType(), Instruction::Select, &Op<0>(), 3, InsertAtEnd) { init(C, S1, S2); setName(NameStr); } protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; SelectInst *cloneImpl() const; public: static SelectInst *Create(Value *C, Value *S1, Value *S2, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); } static SelectInst *Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); } const Value *getCondition() const { return Op<0>(); } const Value *getTrueValue() const { return Op<1>(); } const Value *getFalseValue() const { return Op<2>(); } Value *getCondition() { return Op<0>(); } Value *getTrueValue() { return Op<1>(); } Value *getFalseValue() { return Op<2>(); } /// areInvalidOperands - Return a string if the specified operands are invalid /// for a select operation, otherwise return null. static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); OtherOps getOpcode() const { return static_cast<OtherOps>(Instruction::getOpcode()); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Select; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; template <> struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) //===----------------------------------------------------------------------===// // VAArgInst Class //===----------------------------------------------------------------------===// /// VAArgInst - This class represents the va_arg llvm instruction, which returns /// an argument of the specified type given a va_list and increments that list /// class VAArgInst : public UnaryInstruction { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; VAArgInst *cloneImpl() const; public: VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) : UnaryInstruction(Ty, VAArg, List, InsertBefore) { setName(NameStr); } VAArgInst(Value *List, Type *Ty, const Twine &NameStr, BasicBlock *InsertAtEnd) : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { setName(NameStr); } Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == VAArg; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // ExtractElementInst Class //===----------------------------------------------------------------------===// /// ExtractElementInst - This instruction extracts a single (scalar) /// element from a VectorType value /// class ExtractElementInst : public Instruction { ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", Instruction *InsertBefore = nullptr); ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; ExtractElementInst *cloneImpl() const; public: static ExtractElementInst *Create(Value *Vec, Value *Idx, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); } static ExtractElementInst *Create(Value *Vec, Value *Idx, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); } /// isValidOperands - Return true if an extractelement instruction can be /// formed with the specified operands. static bool isValidOperands(const Value *Vec, const Value *Idx); Value *getVectorOperand() { return Op<0>(); } Value *getIndexOperand() { return Op<1>(); } const Value *getVectorOperand() const { return Op<0>(); } const Value *getIndexOperand() const { return Op<1>(); } VectorType *getVectorOperandType() const { return cast<VectorType>(getVectorOperand()->getType()); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ExtractElement; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; template <> struct OperandTraits<ExtractElementInst> : public FixedNumOperandTraits<ExtractElementInst, 2> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) //===----------------------------------------------------------------------===// // InsertElementInst Class //===----------------------------------------------------------------------===// /// InsertElementInst - This instruction inserts a single (scalar) /// element into a VectorType value /// class InsertElementInst : public Instruction { InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr = "", Instruction *InsertBefore = nullptr); InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; InsertElementInst *cloneImpl() const; public: static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); } static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); } /// isValidOperands - Return true if an insertelement instruction can be /// formed with the specified operands. static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx); /// getType - Overload to return most specific vector type. /// VectorType *getType() const { return cast<VectorType>(Instruction::getType()); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::InsertElement; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; template <> struct OperandTraits<InsertElementInst> : public FixedNumOperandTraits<InsertElementInst, 3> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) //===----------------------------------------------------------------------===// // ShuffleVectorInst Class //===----------------------------------------------------------------------===// /// ShuffleVectorInst - This instruction constructs a fixed permutation of two /// input vectors. /// class ShuffleVectorInst : public Instruction { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; ShuffleVectorInst *cloneImpl() const; public: // allocate space for exactly three operands void *operator new(size_t s) { return User::operator new(s, 3); } ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, const Twine &NameStr = "", Instruction *InsertBefor = nullptr); ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, const Twine &NameStr, BasicBlock *InsertAtEnd); /// isValidOperands - Return true if a shufflevector instruction can be /// formed with the specified operands. static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask); /// getType - Overload to return most specific vector type. /// VectorType *getType() const { return cast<VectorType>(Instruction::getType()); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); Constant *getMask() const { return cast<Constant>(getOperand(2)); } /// getMaskValue - Return the index from the shuffle mask for the specified /// output result. This is either -1 if the element is undef or a number less /// than 2*numelements. static int getMaskValue(Constant *Mask, unsigned i); int getMaskValue(unsigned i) const { return getMaskValue(getMask(), i); } /// getShuffleMask - Return the full mask for this instruction, where each /// element is the element number and undef's are returned as -1. static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result); void getShuffleMask(SmallVectorImpl<int> &Result) const { return getShuffleMask(getMask(), Result); } SmallVector<int, 16> getShuffleMask() const { SmallVector<int, 16> Mask; getShuffleMask(Mask); return Mask; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ShuffleVector; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; template <> struct OperandTraits<ShuffleVectorInst> : public FixedNumOperandTraits<ShuffleVectorInst, 3> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) //===----------------------------------------------------------------------===// // ExtractValueInst Class //===----------------------------------------------------------------------===// /// ExtractValueInst - This instruction extracts a struct member or array /// element value from an aggregate value. /// class ExtractValueInst : public UnaryInstruction { SmallVector<unsigned, 4> Indices; ExtractValueInst(const ExtractValueInst &EVI); void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); /// Constructors - Create a extractvalue instruction with a base aggregate /// value and a list of indices. The first ctor can optionally insert before /// an existing instruction, the second appends the new instruction to the /// specified BasicBlock. inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, const Twine &NameStr, Instruction *InsertBefore); inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, const Twine &NameStr, BasicBlock *InsertAtEnd); // allocate space for exactly one operand void *operator new(size_t s) { return User::operator new(s, 1); } protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; ExtractValueInst *cloneImpl() const; public: static ExtractValueInst *Create(Value *Agg, ArrayRef<unsigned> Idxs, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); } static ExtractValueInst *Create(Value *Agg, ArrayRef<unsigned> Idxs, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); } /// getIndexedType - Returns the type of the element that would be extracted /// with an extractvalue instruction with the specified parameters. /// /// Null is returned if the indices are invalid for the specified type. static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); typedef const unsigned* idx_iterator; inline idx_iterator idx_begin() const { return Indices.begin(); } inline idx_iterator idx_end() const { return Indices.end(); } inline iterator_range<idx_iterator> indices() const { return iterator_range<idx_iterator>(idx_begin(), idx_end()); } Value *getAggregateOperand() { return getOperand(0); } const Value *getAggregateOperand() const { return getOperand(0); } static unsigned getAggregateOperandIndex() { return 0U; // get index for modifying correct operand } ArrayRef<unsigned> getIndices() const { return Indices; } unsigned getNumIndices() const { return (unsigned)Indices.size(); } bool hasIndices() const { return true; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ExtractValue; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, const Twine &NameStr, Instruction *InsertBefore) : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), ExtractValue, Agg, InsertBefore) { init(Idxs, NameStr); } ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, const Twine &NameStr, BasicBlock *InsertAtEnd) : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), ExtractValue, Agg, InsertAtEnd) { init(Idxs, NameStr); } //===----------------------------------------------------------------------===// // InsertValueInst Class //===----------------------------------------------------------------------===// /// InsertValueInst - This instruction inserts a struct field of array element /// value into an aggregate value. /// class InsertValueInst : public Instruction { SmallVector<unsigned, 4> Indices; void *operator new(size_t, unsigned) = delete; InsertValueInst(const InsertValueInst &IVI); void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &NameStr); /// Constructors - Create a insertvalue instruction with a base aggregate /// value, a value to insert, and a list of indices. The first ctor can /// optionally insert before an existing instruction, the second appends /// the new instruction to the specified BasicBlock. inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &NameStr, Instruction *InsertBefore); inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &NameStr, BasicBlock *InsertAtEnd); /// Constructors - These two constructors are convenience methods because one /// and two index insertvalue instructions are so common. InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr = "", Instruction *InsertBefore = nullptr); InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; InsertValueInst *cloneImpl() const; public: // allocate space for exactly two operands void *operator new(size_t s) { return User::operator new(s, 2); } static InsertValueInst *Create(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); } static InsertValueInst *Create(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); typedef const unsigned* idx_iterator; inline idx_iterator idx_begin() const { return Indices.begin(); } inline idx_iterator idx_end() const { return Indices.end(); } inline iterator_range<idx_iterator> indices() const { return iterator_range<idx_iterator>(idx_begin(), idx_end()); } Value *getAggregateOperand() { return getOperand(0); } const Value *getAggregateOperand() const { return getOperand(0); } static unsigned getAggregateOperandIndex() { return 0U; // get index for modifying correct operand } Value *getInsertedValueOperand() { return getOperand(1); } const Value *getInsertedValueOperand() const { return getOperand(1); } static unsigned getInsertedValueOperandIndex() { return 1U; // get index for modifying correct operand } ArrayRef<unsigned> getIndices() const { return Indices; } unsigned getNumIndices() const { return (unsigned)Indices.size(); } bool hasIndices() const { return true; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::InsertValue; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; template <> struct OperandTraits<InsertValueInst> : public FixedNumOperandTraits<InsertValueInst, 2> { }; InsertValueInst::InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &NameStr, Instruction *InsertBefore) : Instruction(Agg->getType(), InsertValue, OperandTraits<InsertValueInst>::op_begin(this), 2, InsertBefore) { init(Agg, Val, Idxs, NameStr); } InsertValueInst::InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &NameStr, BasicBlock *InsertAtEnd) : Instruction(Agg->getType(), InsertValue, OperandTraits<InsertValueInst>::op_begin(this), 2, InsertAtEnd) { init(Agg, Val, Idxs, NameStr); } DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) //===----------------------------------------------------------------------===// // PHINode Class //===----------------------------------------------------------------------===// // PHINode - The PHINode class is used to represent the magical mystical PHI // node, that can not exist in nature, but can be synthesized in a computer // scientist's overactive imagination. // class PHINode : public Instruction { void *operator new(size_t, unsigned) = delete; /// ReservedSpace - The number of operands actually allocated. NumOperands is /// the number actually in use. unsigned ReservedSpace; PHINode(const PHINode &PN); // allocate space for exactly zero operands void *operator new(size_t s) { return User::operator new(s); } explicit PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), ReservedSpace(NumReservedValues) { setName(NameStr); allocHungoffUses(ReservedSpace); } PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock *InsertAtEnd) : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), ReservedSpace(NumReservedValues) { setName(NameStr); allocHungoffUses(ReservedSpace); } protected: // allocHungoffUses - this is more complicated than the generic // User::allocHungoffUses, because we have to allocate Uses for the incoming // values and pointers to the incoming blocks, all in one allocation. void allocHungoffUses(unsigned N) { User::allocHungoffUses(N, /* IsPhi */ true); } // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; PHINode *cloneImpl() const; public: /// Constructors - NumReservedValues is a hint for the number of incoming /// edges that this phi node will have (use 0 if you really have no idea). static PHINode *Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); } static PHINode *Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock *InsertAtEnd) { return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); } /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Block iterator interface. This provides access to the list of incoming // basic blocks, which parallels the list of incoming values. typedef BasicBlock **block_iterator; typedef BasicBlock * const *const_block_iterator; block_iterator block_begin() { Use::UserRef *ref = reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace); return reinterpret_cast<block_iterator>(ref + 1); } const_block_iterator block_begin() const { const Use::UserRef *ref = reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace); return reinterpret_cast<const_block_iterator>(ref + 1); } block_iterator block_end() { return block_begin() + getNumOperands(); } const_block_iterator block_end() const { return block_begin() + getNumOperands(); } op_range incoming_values() { return operands(); } const_op_range incoming_values() const { return operands(); } /// getNumIncomingValues - Return the number of incoming edges /// unsigned getNumIncomingValues() const { return getNumOperands(); } /// getIncomingValue - Return incoming value number x /// Value *getIncomingValue(unsigned i) const { return getOperand(i); } void setIncomingValue(unsigned i, Value *V) { setOperand(i, V); } static unsigned getOperandNumForIncomingValue(unsigned i) { return i; } static unsigned getIncomingValueNumForOperand(unsigned i) { return i; } /// getIncomingBlock - Return incoming basic block number @p i. /// BasicBlock *getIncomingBlock(unsigned i) const { return block_begin()[i]; } /// getIncomingBlock - Return incoming basic block corresponding /// to an operand of the PHI. /// BasicBlock *getIncomingBlock(const Use &U) const { assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?"); return getIncomingBlock(unsigned(&U - op_begin())); } /// getIncomingBlock - Return incoming basic block corresponding /// to value use iterator. /// BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { return getIncomingBlock(I.getUse()); } void setIncomingBlock(unsigned i, BasicBlock *BB) { block_begin()[i] = BB; } /// addIncoming - Add an incoming value to the end of the PHI list /// void addIncoming(Value *V, BasicBlock *BB) { assert(V && "PHI node got a null value!"); assert(BB && "PHI node got a null basic block!"); assert(getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!"); if (getNumOperands() == ReservedSpace) growOperands(); // Get more space! // Initialize some new operands. setNumHungOffUseOperands(getNumOperands() + 1); setIncomingValue(getNumOperands() - 1, V); setIncomingBlock(getNumOperands() - 1, BB); } /// removeIncomingValue - Remove an incoming value. This is useful if a /// predecessor basic block is deleted. The value removed is returned. /// /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty /// is true), the PHI node is destroyed and any uses of it are replaced with /// dummy values. The only time there should be zero incoming values to a PHI /// node is when the block is dead, so this strategy is sound. /// Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { int Idx = getBasicBlockIndex(BB); assert(Idx >= 0 && "Invalid basic block argument to remove!"); return removeIncomingValue(Idx, DeletePHIIfEmpty); } /// getBasicBlockIndex - Return the first index of the specified basic /// block in the value list for this PHI. Returns -1 if no instance. /// int getBasicBlockIndex(const BasicBlock *BB) const { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) if (block_begin()[i] == BB) return i; return -1; } Value *getIncomingValueForBlock(const BasicBlock *BB) const { int Idx = getBasicBlockIndex(BB); assert(Idx >= 0 && "Invalid basic block argument!"); return getIncomingValue(Idx); } /// hasConstantValue - If the specified PHI node always merges together the /// same value, return the value, otherwise return null. Value *hasConstantValue() const; /// Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::PHI; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: void growOperands(); }; template <> struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value) //===----------------------------------------------------------------------===// // LandingPadInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- /// LandingPadInst - The landingpad instruction holds all of the information /// necessary to generate correct exception handling. The landingpad instruction /// cannot be moved from the top of a landing pad block, which itself is /// accessible only from the 'unwind' edge of an invoke. This uses the /// SubclassData field in Value to store whether or not the landingpad is a /// cleanup. /// class LandingPadInst : public Instruction { /// ReservedSpace - The number of operands actually allocated. NumOperands is /// the number actually in use. unsigned ReservedSpace; LandingPadInst(const LandingPadInst &LP); public: enum ClauseType { Catch, Filter }; private: void *operator new(size_t, unsigned) = delete; // Allocate space for exactly zero operands. void *operator new(size_t s) { return User::operator new(s); } void growOperands(unsigned Size); void init(unsigned NumReservedValues, const Twine &NameStr); explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, const Twine &NameStr, Instruction *InsertBefore); explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, const Twine &NameStr, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; LandingPadInst *cloneImpl() const; public: /// Constructors - NumReservedClauses is a hint for the number of incoming /// clauses that this landingpad will have (use 0 if you really have no idea). static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr = "", Instruction *InsertBefore = nullptr); static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr, BasicBlock *InsertAtEnd); /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// isCleanup - Return 'true' if this landingpad instruction is a /// cleanup. I.e., it should be run when unwinding even if its landing pad /// doesn't catch the exception. bool isCleanup() const { return getSubclassDataFromInstruction() & 1; } /// setCleanup - Indicate that this landingpad instruction is a cleanup. void setCleanup(bool V) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | (V ? 1 : 0)); } /// Add a catch or filter clause to the landing pad. void addClause(Constant *ClauseVal); /// Get the value of the clause at index Idx. Use isCatch/isFilter to /// determine what type of clause this is. Constant *getClause(unsigned Idx) const { return cast<Constant>(getOperandList()[Idx]); } /// isCatch - Return 'true' if the clause and index Idx is a catch clause. bool isCatch(unsigned Idx) const { return !isa<ArrayType>(getOperandList()[Idx]->getType()); } /// isFilter - Return 'true' if the clause and index Idx is a filter clause. bool isFilter(unsigned Idx) const { return isa<ArrayType>(getOperandList()[Idx]->getType()); } /// getNumClauses - Get the number of clauses for this landing pad. unsigned getNumClauses() const { return getNumOperands(); } /// reserveClauses - Grow the size of the operand list to accommodate the new /// number of clauses. void reserveClauses(unsigned Size) { growOperands(Size); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::LandingPad; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; template <> struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value) //===----------------------------------------------------------------------===// // ReturnInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- /// ReturnInst - Return a value (possibly void), from a function. Execution /// does not continue in this function any longer. /// class ReturnInst : public TerminatorInst { ReturnInst(const ReturnInst &RI); private: // ReturnInst constructors: // ReturnInst() - 'ret void' instruction // ReturnInst( null) - 'ret void' instruction // ReturnInst(Value* X) - 'ret X' instruction // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B // // NOTE: If the Value* passed is of type void then the constructor behaves as // if it was passed NULL. explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, Instruction *InsertBefore = nullptr); ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; ReturnInst *cloneImpl() const; public: static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, Instruction *InsertBefore = nullptr) { return new(!!retVal) ReturnInst(C, retVal, InsertBefore); } static ReturnInst* Create(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd) { return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); } static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { return new(0) ReturnInst(C, InsertAtEnd); } ~ReturnInst() override; /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Convenience accessor. Returns null if there is no return value. Value *getReturnValue() const { return getNumOperands() != 0 ? getOperand(0) : nullptr; } unsigned getNumSuccessors() const { return 0; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Ret); } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: BasicBlock *getSuccessorV(unsigned idx) const override; unsigned getNumSuccessorsV() const override; void setSuccessorV(unsigned idx, BasicBlock *B) override; }; template <> struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value) //===----------------------------------------------------------------------===// // BranchInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- /// BranchInst - Conditional or Unconditional Branch instruction. /// class BranchInst : public TerminatorInst { /// Ops list - Branches are strange. The operands are ordered: /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because /// they don't have to check for cond/uncond branchness. These are mostly /// accessed relative from op_end(). BranchInst(const BranchInst &BI); void AssertOK(); // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): // BranchInst(BB *B) - 'br B' // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' // BranchInst(BB* B, Inst *I) - 'br B' insert before I // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I // BranchInst(BB* B, BB *I) - 'br B' insert at end // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, Instruction *InsertBefore = nullptr); BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; BranchInst *cloneImpl() const; public: static BranchInst *Create(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr) { return new(1) BranchInst(IfTrue, InsertBefore); } static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, Instruction *InsertBefore = nullptr) { return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); } static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { return new(1) BranchInst(IfTrue, InsertAtEnd); } static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, BasicBlock *InsertAtEnd) { return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); } /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); bool isUnconditional() const { return getNumOperands() == 1; } bool isConditional() const { return getNumOperands() == 3; } Value *getCondition() const { assert(isConditional() && "Cannot get condition of an uncond branch!"); return Op<-3>(); } void setCondition(Value *V) { assert(isConditional() && "Cannot set condition of unconditional branch!"); Op<-3>() = V; } unsigned getNumSuccessors() const { return 1+isConditional(); } BasicBlock *getSuccessor(unsigned i) const { assert(i < getNumSuccessors() && "Successor # out of range for Branch!"); return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); } void setSuccessor(unsigned idx, BasicBlock *NewSucc) { assert(idx < getNumSuccessors() && "Successor # out of range for Branch!"); *(&Op<-1>() - idx) = (Value*)NewSucc; } /// \brief Swap the successors of this branch instruction. /// /// Swaps the successors of the branch instruction. This also swaps any /// branch weight metadata associated with the instruction so that it /// continues to map correctly to each operand. void swapSuccessors(); // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Br); } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: BasicBlock *getSuccessorV(unsigned idx) const override; unsigned getNumSuccessorsV() const override; void setSuccessorV(unsigned idx, BasicBlock *B) override; }; template <> struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) //===----------------------------------------------------------------------===// // SwitchInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- /// SwitchInst - Multiway switch /// class SwitchInst : public TerminatorInst { void *operator new(size_t, unsigned) = delete; unsigned ReservedSpace; // Operand[0] = Value to switch on // Operand[1] = Default basic block destination // Operand[2n ] = Value to match // Operand[2n+1] = BasicBlock to go to on match SwitchInst(const SwitchInst &SI); void init(Value *Value, BasicBlock *Default, unsigned NumReserved); void growOperands(); // allocate space for exactly zero operands void *operator new(size_t s) { return User::operator new(s); } /// SwitchInst ctor - Create a new switch instruction, specifying a value to /// switch on and a default destination. The number of additional cases can /// be specified here to make memory allocation more efficient. This /// constructor can also autoinsert before another instruction. SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, Instruction *InsertBefore); /// SwitchInst ctor - Create a new switch instruction, specifying a value to /// switch on and a default destination. The number of additional cases can /// be specified here to make memory allocation more efficient. This /// constructor also autoinserts at the end of the specified BasicBlock. SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; SwitchInst *cloneImpl() const; public: // -2 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); template <class SwitchInstTy, class ConstantIntTy, class BasicBlockTy> class CaseIteratorT { protected: SwitchInstTy *SI; unsigned Index; public: typedef CaseIteratorT<SwitchInstTy, ConstantIntTy, BasicBlockTy> Self; /// Initializes case iterator for given SwitchInst and for given /// case number. CaseIteratorT(SwitchInstTy *SI, unsigned CaseNum) { this->SI = SI; Index = CaseNum; } /// Initializes case iterator for given SwitchInst and for given /// TerminatorInst's successor index. static Self fromSuccessorIndex(SwitchInstTy *SI, unsigned SuccessorIndex) { assert(SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!"); return SuccessorIndex != 0 ? Self(SI, SuccessorIndex - 1) : Self(SI, DefaultPseudoIndex); } /// Resolves case value for current case. ConstantIntTy *getCaseValue() { assert(Index < SI->getNumCases() && "Index out the number of cases."); return reinterpret_cast<ConstantIntTy*>(SI->getOperand(2 + Index*2)); } /// Resolves successor for current case. BasicBlockTy *getCaseSuccessor() { assert((Index < SI->getNumCases() || Index == DefaultPseudoIndex) && "Index out the number of cases."); return SI->getSuccessor(getSuccessorIndex()); } /// Returns number of current case. unsigned getCaseIndex() const { return Index; } /// Returns TerminatorInst's successor index for current case successor. unsigned getSuccessorIndex() const { assert((Index == DefaultPseudoIndex || Index < SI->getNumCases()) && "Index out the number of cases."); return Index != DefaultPseudoIndex ? Index + 1 : 0; } Self operator++() { // Check index correctness after increment. // Note: Index == getNumCases() means end(). assert(Index+1 <= SI->getNumCases() && "Index out the number of cases."); ++Index; return *this; } Self operator++(int) { Self tmp = *this; ++(*this); return tmp; } Self operator--() { // Check index correctness after decrement. // Note: Index == getNumCases() means end(). // Also allow "-1" iterator here. That will became valid after ++. assert((Index == 0 || Index-1 <= SI->getNumCases()) && "Index out the number of cases."); --Index; return *this; } Self operator--(int) { Self tmp = *this; --(*this); return tmp; } bool operator==(const Self& RHS) const { assert(RHS.SI == SI && "Incompatible operators."); return RHS.Index == Index; } bool operator!=(const Self& RHS) const { assert(RHS.SI == SI && "Incompatible operators."); return RHS.Index != Index; } Self &operator*() { return *this; } }; typedef CaseIteratorT<const SwitchInst, const ConstantInt, const BasicBlock> ConstCaseIt; class CaseIt : public CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> { typedef CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> ParentTy; public: CaseIt(const ParentTy& Src) : ParentTy(Src) {} CaseIt(SwitchInst *SI, unsigned CaseNum) : ParentTy(SI, CaseNum) {} /// Sets the new value for current case. void setValue(ConstantInt *V) { assert(Index < SI->getNumCases() && "Index out the number of cases."); SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); } /// Sets the new successor for current case. void setSuccessor(BasicBlock *S) { SI->setSuccessor(getSuccessorIndex(), S); } }; static SwitchInst *Create(Value *Value, BasicBlock *Default, unsigned NumCases, Instruction *InsertBefore = nullptr) { return new SwitchInst(Value, Default, NumCases, InsertBefore); } static SwitchInst *Create(Value *Value, BasicBlock *Default, unsigned NumCases, BasicBlock *InsertAtEnd) { return new SwitchInst(Value, Default, NumCases, InsertAtEnd); } /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Accessor Methods for Switch stmt Value *getCondition() const { return getOperand(0); } void setCondition(Value *V) { setOperand(0, V); } BasicBlock *getDefaultDest() const { return cast<BasicBlock>(getOperand(1)); } void setDefaultDest(BasicBlock *DefaultCase) { setOperand(1, reinterpret_cast<Value*>(DefaultCase)); } /// getNumCases - return the number of 'cases' in this switch instruction, /// except the default case unsigned getNumCases() const { return getNumOperands()/2 - 1; } /// Returns a read/write iterator that points to the first /// case in SwitchInst. CaseIt case_begin() { return CaseIt(this, 0); } /// Returns a read-only iterator that points to the first /// case in the SwitchInst. ConstCaseIt case_begin() const { return ConstCaseIt(this, 0); } /// Returns a read/write iterator that points one past the last /// in the SwitchInst. CaseIt case_end() { return CaseIt(this, getNumCases()); } /// Returns a read-only iterator that points one past the last /// in the SwitchInst. ConstCaseIt case_end() const { return ConstCaseIt(this, getNumCases()); } /// cases - iteration adapter for range-for loops. iterator_range<CaseIt> cases() { return iterator_range<CaseIt>(case_begin(), case_end()); } /// cases - iteration adapter for range-for loops. iterator_range<ConstCaseIt> cases() const { return iterator_range<ConstCaseIt>(case_begin(), case_end()); } /// Returns an iterator that points to the default case. /// Note: this iterator allows to resolve successor only. Attempt /// to resolve case value causes an assertion. /// Also note, that increment and decrement also causes an assertion and /// makes iterator invalid. CaseIt case_default() { return CaseIt(this, DefaultPseudoIndex); } ConstCaseIt case_default() const { return ConstCaseIt(this, DefaultPseudoIndex); } /// findCaseValue - Search all of the case values for the specified constant. /// If it is explicitly handled, return the case iterator of it, otherwise /// return default case iterator to indicate /// that it is handled by the default handler. CaseIt findCaseValue(const ConstantInt *C) { for (CaseIt i = case_begin(), e = case_end(); i != e; ++i) if (i.getCaseValue() == C) return i; return case_default(); } ConstCaseIt findCaseValue(const ConstantInt *C) const { for (ConstCaseIt i = case_begin(), e = case_end(); i != e; ++i) if (i.getCaseValue() == C) return i; return case_default(); } /// findCaseDest - Finds the unique case value for a given successor. Returns /// null if the successor is not found, not unique, or is the default case. ConstantInt *findCaseDest(BasicBlock *BB) { if (BB == getDefaultDest()) return nullptr; ConstantInt *CI = nullptr; for (CaseIt i = case_begin(), e = case_end(); i != e; ++i) { if (i.getCaseSuccessor() == BB) { if (CI) return nullptr; // Multiple cases lead to BB. else CI = i.getCaseValue(); } } return CI; } /// addCase - Add an entry to the switch instruction... /// Note: /// This action invalidates case_end(). Old case_end() iterator will /// point to the added case. void addCase(ConstantInt *OnVal, BasicBlock *Dest); /// removeCase - This method removes the specified case and its successor /// from the switch instruction. Note that this operation may reorder the /// remaining cases at index idx and above. /// Note: /// This action invalidates iterators for all cases following the one removed, /// including the case_end() iterator. void removeCase(CaseIt i); unsigned getNumSuccessors() const { return getNumOperands()/2; } BasicBlock *getSuccessor(unsigned idx) const { assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!"); return cast<BasicBlock>(getOperand(idx*2+1)); } void setSuccessor(unsigned idx, BasicBlock *NewSucc) { assert(idx < getNumSuccessors() && "Successor # out of range for switch!"); setOperand(idx*2+1, (Value*)NewSucc); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Switch; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: BasicBlock *getSuccessorV(unsigned idx) const override; unsigned getNumSuccessorsV() const override; void setSuccessorV(unsigned idx, BasicBlock *B) override; }; template <> struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) //===----------------------------------------------------------------------===// // IndirectBrInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- /// IndirectBrInst - Indirect Branch Instruction. /// class IndirectBrInst : public TerminatorInst { void *operator new(size_t, unsigned) = delete; unsigned ReservedSpace; // Operand[0] = Value to switch on // Operand[1] = Default basic block destination // Operand[2n ] = Value to match // Operand[2n+1] = BasicBlock to go to on match IndirectBrInst(const IndirectBrInst &IBI); void init(Value *Address, unsigned NumDests); void growOperands(); // allocate space for exactly zero operands void *operator new(size_t s) { return User::operator new(s); } /// IndirectBrInst ctor - Create a new indirectbr instruction, specifying an /// Address to jump to. The number of expected destinations can be specified /// here to make memory allocation more efficient. This constructor can also /// autoinsert before another instruction. IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); /// IndirectBrInst ctor - Create a new indirectbr instruction, specifying an /// Address to jump to. The number of expected destinations can be specified /// here to make memory allocation more efficient. This constructor also /// autoinserts at the end of the specified BasicBlock. IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; IndirectBrInst *cloneImpl() const; public: static IndirectBrInst *Create(Value *Address, unsigned NumDests, Instruction *InsertBefore = nullptr) { return new IndirectBrInst(Address, NumDests, InsertBefore); } static IndirectBrInst *Create(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd) { return new IndirectBrInst(Address, NumDests, InsertAtEnd); } /// Provide fast operand accessors. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Accessor Methods for IndirectBrInst instruction. Value *getAddress() { return getOperand(0); } const Value *getAddress() const { return getOperand(0); } void setAddress(Value *V) { setOperand(0, V); } /// getNumDestinations - return the number of possible destinations in this /// indirectbr instruction. unsigned getNumDestinations() const { return getNumOperands()-1; } /// getDestination - Return the specified destination. BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } /// addDestination - Add a destination. /// void addDestination(BasicBlock *Dest); /// removeDestination - This method removes the specified successor from the /// indirectbr instruction. void removeDestination(unsigned i); unsigned getNumSuccessors() const { return getNumOperands()-1; } BasicBlock *getSuccessor(unsigned i) const { return cast<BasicBlock>(getOperand(i+1)); } void setSuccessor(unsigned i, BasicBlock *NewSucc) { setOperand(i+1, (Value*)NewSucc); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::IndirectBr; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: BasicBlock *getSuccessorV(unsigned idx) const override; unsigned getNumSuccessorsV() const override; void setSuccessorV(unsigned idx, BasicBlock *B) override; }; template <> struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value) //===----------------------------------------------------------------------===// // InvokeInst Class //===----------------------------------------------------------------------===// /// InvokeInst - Invoke instruction. The SubclassData field is used to hold the /// calling convention of the call. /// class InvokeInst : public TerminatorInst { AttributeSet AttributeList; FunctionType *FTy; InvokeInst(const InvokeInst &BI); void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, const Twine &NameStr) { init(cast<FunctionType>( cast<PointerType>(Func->getType())->getElementType()), Func, IfNormal, IfException, Args, NameStr); } void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, const Twine &NameStr); /// Construct an InvokeInst given a range of arguments. /// /// \brief Construct an InvokeInst from a range of arguments inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, unsigned Values, const Twine &NameStr, Instruction *InsertBefore) : InvokeInst(cast<FunctionType>( cast<PointerType>(Func->getType())->getElementType()), Func, IfNormal, IfException, Args, Values, NameStr, InsertBefore) {} inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, unsigned Values, const Twine &NameStr, Instruction *InsertBefore); /// Construct an InvokeInst given a range of arguments. /// /// \brief Construct an InvokeInst from a range of arguments inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, unsigned Values, const Twine &NameStr, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; InvokeInst *cloneImpl() const; public: static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { return Create(cast<FunctionType>( cast<PointerType>(Func->getType())->getElementType()), Func, IfNormal, IfException, Args, NameStr, InsertBefore); } static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { unsigned Values = unsigned(Args.size()) + 3; return new (Values) InvokeInst(Ty, Func, IfNormal, IfException, Args, Values, NameStr, InsertBefore); } static InvokeInst *Create(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, const Twine &NameStr, BasicBlock *InsertAtEnd) { unsigned Values = unsigned(Args.size()) + 3; return new(Values) InvokeInst(Func, IfNormal, IfException, Args, Values, NameStr, InsertAtEnd); } /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); FunctionType *getFunctionType() const { return FTy; } void mutateFunctionType(FunctionType *FTy) { mutateType(FTy->getReturnType()); this->FTy = FTy; } /// getNumArgOperands - Return the number of invoke arguments. /// unsigned getNumArgOperands() const { return getNumOperands() - 3; } /// getArgOperand/setArgOperand - Return/set the i-th invoke argument. /// Value *getArgOperand(unsigned i) const { return getOperand(i); } void setArgOperand(unsigned i, Value *v) { setOperand(i, v); } /// arg_operands - iteration adapter for range-for loops. iterator_range<op_iterator> arg_operands() { return iterator_range<op_iterator>(op_begin(), op_end() - 3); } /// arg_operands - iteration adapter for range-for loops. iterator_range<const_op_iterator> arg_operands() const { return iterator_range<const_op_iterator>(op_begin(), op_end() - 3); } /// \brief Wrappers for getting the \c Use of a invoke argument. const Use &getArgOperandUse(unsigned i) const { return getOperandUse(i); } Use &getArgOperandUse(unsigned i) { return getOperandUse(i); } /// getCallingConv/setCallingConv - Get or set the calling convention of this /// function call. CallingConv::ID getCallingConv() const { return static_cast<CallingConv::ID>(getSubclassDataFromInstruction()); } void setCallingConv(CallingConv::ID CC) { setInstructionSubclassData(static_cast<unsigned>(CC)); } /// getAttributes - Return the parameter attributes for this invoke. /// const AttributeSet &getAttributes() const { return AttributeList; } /// setAttributes - Set the parameter attributes for this invoke. /// void setAttributes(const AttributeSet &Attrs) { AttributeList = Attrs; } /// addAttribute - adds the attribute to the list of attributes. void addAttribute(unsigned i, Attribute::AttrKind attr); /// removeAttribute - removes the attribute from the list of attributes. void removeAttribute(unsigned i, Attribute attr); /// \brief adds the dereferenceable attribute to the list of attributes. void addDereferenceableAttr(unsigned i, uint64_t Bytes); /// \brief adds the dereferenceable_or_null attribute to the list of /// attributes. void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes); /// \brief Determine whether this call has the given attribute. bool hasFnAttr(Attribute::AttrKind A) const { assert(A != Attribute::NoBuiltin && "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin"); return hasFnAttrImpl(A); } /// \brief Determine whether the call or the callee has the given attributes. bool paramHasAttr(unsigned i, Attribute::AttrKind A) const; /// \brief Extract the alignment for a call or parameter (0=unknown). unsigned getParamAlignment(unsigned i) const { return AttributeList.getParamAlignment(i); } /// \brief Extract the number of dereferenceable bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableBytes(unsigned i) const { return AttributeList.getDereferenceableBytes(i); } /// \brief Extract the number of dereferenceable_or_null bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableOrNullBytes(unsigned i) const { return AttributeList.getDereferenceableOrNullBytes(i); } /// \brief Return true if the call should not be treated as a call to a /// builtin. bool isNoBuiltin() const { // We assert in hasFnAttr if one passes in Attribute::NoBuiltin, so we have // to check it by hand. return hasFnAttrImpl(Attribute::NoBuiltin) && !hasFnAttrImpl(Attribute::Builtin); } /// \brief Return true if the call should not be inlined. bool isNoInline() const { return hasFnAttr(Attribute::NoInline); } void setIsNoInline() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoInline); } /// \brief Determine if the call does not access memory. bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); } void setDoesNotAccessMemory() { addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone); } /// \brief Determine if the call does not access or only reads memory. bool onlyReadsMemory() const { return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly); } void setOnlyReadsMemory() { addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly); } /// @brief Determine if the call access memmory only using it's pointer /// arguments. bool onlyAccessesArgMemory() const { return hasFnAttr(Attribute::ArgMemOnly); } void setOnlyAccessesArgMemory() { addAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly); } /// \brief Determine if the call cannot return. bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); } void setDoesNotReturn() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn); } /// \brief Determine if the call cannot unwind. bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); } void setDoesNotThrow() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind); } /// \brief Determine if the invoke cannot be duplicated. bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); } void setCannotDuplicate() { addAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate); } /// \brief Determine if the call returns a structure through first /// pointer argument. bool hasStructRetAttr() const { // Be friendly and also check the callee. return paramHasAttr(1, Attribute::StructRet); } /// \brief Determine if any call argument is an aggregate passed by value. bool hasByValArgument() const { return AttributeList.hasAttrSomewhere(Attribute::ByVal); } /// getCalledFunction - Return the function called, or null if this is an /// indirect function invocation. /// Function *getCalledFunction() const { return dyn_cast<Function>(Op<-3>()); } /// getCalledValue - Get a pointer to the function that is invoked by this /// instruction const Value *getCalledValue() const { return Op<-3>(); } Value *getCalledValue() { return Op<-3>(); } /// setCalledFunction - Set the function called. void setCalledFunction(Value* Fn) { setCalledFunction( cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()), Fn); } void setCalledFunction(FunctionType *FTy, Value *Fn) { this->FTy = FTy; assert(FTy == cast<FunctionType>( cast<PointerType>(Fn->getType())->getElementType())); Op<-3>() = Fn; } // get*Dest - Return the destination basic blocks... BasicBlock *getNormalDest() const { return cast<BasicBlock>(Op<-2>()); } BasicBlock *getUnwindDest() const { return cast<BasicBlock>(Op<-1>()); } void setNormalDest(BasicBlock *B) { Op<-2>() = reinterpret_cast<Value*>(B); } void setUnwindDest(BasicBlock *B) { Op<-1>() = reinterpret_cast<Value*>(B); } /// getLandingPadInst - Get the landingpad instruction from the landing pad /// block (the unwind destination). LandingPadInst *getLandingPadInst() const; BasicBlock *getSuccessor(unsigned i) const { assert(i < 2 && "Successor # out of range for invoke!"); return i == 0 ? getNormalDest() : getUnwindDest(); } void setSuccessor(unsigned idx, BasicBlock *NewSucc) { assert(idx < 2 && "Successor # out of range for invoke!"); *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc); } unsigned getNumSuccessors() const { return 2; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Invoke); } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: BasicBlock *getSuccessorV(unsigned idx) const override; unsigned getNumSuccessorsV() const override; void setSuccessorV(unsigned idx, BasicBlock *B) override; bool hasFnAttrImpl(Attribute::AttrKind A) const; // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } }; template <> struct OperandTraits<InvokeInst> : public VariadicOperandTraits<InvokeInst, 3> { }; InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, unsigned Values, const Twine &NameStr, Instruction *InsertBefore) : TerminatorInst(Ty->getReturnType(), Instruction::Invoke, OperandTraits<InvokeInst>::op_end(this) - Values, Values, InsertBefore) { init(Ty, Func, IfNormal, IfException, Args, NameStr); } InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef<Value *> Args, unsigned Values, const Twine &NameStr, BasicBlock *InsertAtEnd) : TerminatorInst(cast<FunctionType>(cast<PointerType>(Func->getType()) ->getElementType())->getReturnType(), Instruction::Invoke, OperandTraits<InvokeInst>::op_end(this) - Values, Values, InsertAtEnd) { init(Func, IfNormal, IfException, Args, NameStr); } DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value) //===----------------------------------------------------------------------===// // ResumeInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- /// ResumeInst - Resume the propagation of an exception. /// class ResumeInst : public TerminatorInst { ResumeInst(const ResumeInst &RI); explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; ResumeInst *cloneImpl() const; public: static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { return new(1) ResumeInst(Exn, InsertBefore); } static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { return new(1) ResumeInst(Exn, InsertAtEnd); } /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Convenience accessor. Value *getValue() const { return Op<0>(); } unsigned getNumSuccessors() const { return 0; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Resume; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: BasicBlock *getSuccessorV(unsigned idx) const override; unsigned getNumSuccessorsV() const override; void setSuccessorV(unsigned idx, BasicBlock *B) override; }; template <> struct OperandTraits<ResumeInst> : public FixedNumOperandTraits<ResumeInst, 1> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) //===----------------------------------------------------------------------===// // UnreachableInst Class //===----------------------------------------------------------------------===// //===--------------------------------------------------------------------------- /// UnreachableInst - This function has undefined behavior. In particular, the /// presence of this instruction indicates some higher level knowledge that the /// end of the block cannot be reached. /// class UnreachableInst : public TerminatorInst { void *operator new(size_t, unsigned) = delete; protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; UnreachableInst *cloneImpl() const; public: // allocate space for exactly zero operands void *operator new(size_t s) { return User::operator new(s, 0); } explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); unsigned getNumSuccessors() const { return 0; } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Unreachable; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } private: BasicBlock *getSuccessorV(unsigned idx) const override; unsigned getNumSuccessorsV() const override; void setSuccessorV(unsigned idx, BasicBlock *B) override; }; //===----------------------------------------------------------------------===// // TruncInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a truncation of integer types. class TruncInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical TruncInst TruncInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics TruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The (smaller) type to truncate to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics TruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The (smaller) type to truncate to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Trunc; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // ZExtInst Class //===----------------------------------------------------------------------===// /// \brief This class represents zero extension of integer types. class ZExtInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical ZExtInst ZExtInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics ZExtInst( Value *S, ///< The value to be zero extended Type *Ty, ///< The type to zero extend to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end semantics. ZExtInst( Value *S, ///< The value to be zero extended Type *Ty, ///< The type to zero extend to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == ZExt; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // SExtInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a sign extension of integer types. class SExtInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical SExtInst SExtInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics SExtInst( Value *S, ///< The value to be sign extended Type *Ty, ///< The type to sign extend to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics SExtInst( Value *S, ///< The value to be sign extended Type *Ty, ///< The type to sign extend to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == SExt; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // FPTruncInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a truncation of floating point types. class FPTruncInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical FPTruncInst FPTruncInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics FPTruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The type to truncate to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-before-instruction semantics FPTruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The type to truncate to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPTrunc; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // FPExtInst Class //===----------------------------------------------------------------------===// /// \brief This class represents an extension of floating point types. class FPExtInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical FPExtInst FPExtInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics FPExtInst( Value *S, ///< The value to be extended Type *Ty, ///< The type to extend to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics FPExtInst( Value *S, ///< The value to be extended Type *Ty, ///< The type to extend to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPExt; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // UIToFPInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a cast unsigned integer to floating point. class UIToFPInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical UIToFPInst UIToFPInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics UIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics UIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == UIToFP; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // SIToFPInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a cast from signed integer to floating point. class SIToFPInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical SIToFPInst SIToFPInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics SIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics SIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == SIToFP; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // FPToUIInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a cast from floating point to unsigned integer class FPToUIInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical FPToUIInst FPToUIInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics FPToUIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics FPToUIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< Where to insert the new instruction ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPToUI; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // FPToSIInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a cast from floating point to signed integer. class FPToSIInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical FPToSIInst FPToSIInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics FPToSIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics FPToSIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPToSI; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // IntToPtrInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a cast from an integer to a pointer. class IntToPtrInst : public CastInst { public: /// \brief Constructor with insert-before-instruction semantics IntToPtrInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics IntToPtrInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical IntToPtrInst IntToPtrInst *cloneImpl() const; /// \brief Returns the address space of this instruction's pointer type. unsigned getAddressSpace() const { return getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == IntToPtr; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // PtrToIntInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a cast from a pointer to an integer class PtrToIntInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical PtrToIntInst PtrToIntInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics PtrToIntInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics PtrToIntInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); /// \brief Gets the pointer operand. Value *getPointerOperand() { return getOperand(0); } /// \brief Gets the pointer operand. const Value *getPointerOperand() const { return getOperand(0); } /// \brief Gets the operand index of the pointer operand. static unsigned getPointerOperandIndex() { return 0U; } /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == PtrToInt; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // BitCastInst Class //===----------------------------------------------------------------------===// /// \brief This class represents a no-op cast from one type to another. class BitCastInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical BitCastInst BitCastInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics BitCastInst( Value *S, ///< The value to be casted Type *Ty, ///< The type to casted to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics BitCastInst( Value *S, ///< The value to be casted Type *Ty, ///< The type to casted to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == BitCast; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; //===----------------------------------------------------------------------===// // AddrSpaceCastInst Class // // /////////////////////////////////////////////////////////////////////////////// /// \brief This class represents a conversion between pointers from /// one address space to another. class AddrSpaceCastInst : public CastInst { protected: // Note: Instruction needs to be a friend here to call cloneImpl. friend class Instruction; /// \brief Clone an identical AddrSpaceCastInst AddrSpaceCastInst *cloneImpl() const; public: /// \brief Constructor with insert-before-instruction semantics AddrSpaceCastInst( Value *S, ///< The value to be casted Type *Ty, ///< The type to casted to const Twine &NameStr = "", ///< A name for the new instruction Instruction *InsertBefore = nullptr ///< Where to insert the new instruction ); /// \brief Constructor with insert-at-end-of-block semantics AddrSpaceCastInst( Value *S, ///< The value to be casted Type *Ty, ///< The type to casted to const Twine &NameStr, ///< A name for the new instruction BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == AddrSpaceCast; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/GlobalVariable.h
//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of the GlobalVariable class, which // represents a single global variable (or constant) in the VM. // // Global variables are constant pointers that refer to hunks of space that are // allocated by either the VM, or by the linker in a static compiler. A global // variable may have an initial value, which is copied into the executables .data // area. Global Constants are required to have initializers. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_GLOBALVARIABLE_H #define LLVM_IR_GLOBALVARIABLE_H #include "llvm/ADT/Twine.h" #include "llvm/ADT/ilist_node.h" #include "llvm/IR/GlobalObject.h" #include "llvm/IR/OperandTraits.h" namespace llvm { class Module; class Constant; template<typename ValueSubClass, typename ItemParentClass> class SymbolTableListTraits; class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> { friend class SymbolTableListTraits<GlobalVariable, Module>; void *operator new(size_t, unsigned) = delete; void operator=(const GlobalVariable &) = delete; GlobalVariable(const GlobalVariable &) = delete; void setParent(Module *parent); bool isConstantGlobal : 1; // Is this a global constant? bool isExternallyInitializedConstant : 1; // Is this a global whose value // can change from its initial // value before global // initializers are run? public: // allocate space for exactly one operand void *operator new(size_t s) { return User::operator new(s, 1); } // HLSL Change Begin: Match operator new/delete void operator delete(void* Ptr) { User::operator delete(Ptr, 1); } // HLSL Change End /// GlobalVariable ctor - If a parent module is specified, the global is /// automatically inserted into the end of the specified modules global list. GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage, Constant *Initializer = nullptr, const Twine &Name = "", ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0, bool isExternallyInitialized = false); /// GlobalVariable ctor - This creates a global and inserts it before the /// specified other global. GlobalVariable(Module &M, Type *Ty, bool isConstant, LinkageTypes Linkage, Constant *Initializer, const Twine &Name = "", GlobalVariable *InsertBefore = nullptr, ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0, bool isExternallyInitialized = false); ~GlobalVariable() override { // FIXME: needed by operator delete setGlobalVariableNumOperands(1); } /// Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// Definitions have initializers, declarations don't. /// inline bool hasInitializer() const { return !isDeclaration(); } /// hasDefinitiveInitializer - Whether the global variable has an initializer, /// and any other instances of the global (this can happen due to weak /// linkage) are guaranteed to have the same initializer. /// /// Note that if you want to transform a global, you must use /// hasUniqueInitializer() instead, because of the *_odr linkage type. /// /// Example: /// /// @a = global SomeType* null - Initializer is both definitive and unique. /// /// @b = global weak SomeType* null - Initializer is neither definitive nor /// unique. /// /// @c = global weak_odr SomeType* null - Initializer is definitive, but not /// unique. inline bool hasDefinitiveInitializer() const { return hasInitializer() && // The initializer of a global variable with weak linkage may change at // link time. !mayBeOverridden() && // The initializer of a global variable with the externally_initialized // marker may change at runtime before C++ initializers are evaluated. !isExternallyInitialized(); } /// hasUniqueInitializer - Whether the global variable has an initializer, and /// any changes made to the initializer will turn up in the final executable. inline bool hasUniqueInitializer() const { return hasInitializer() && // It's not safe to modify initializers of global variables with weak // linkage, because the linker might choose to discard the initializer and // use the initializer from another instance of the global variable // instead. It is wrong to modify the initializer of a global variable // with *_odr linkage because then different instances of the global may // have different initializers, breaking the One Definition Rule. !isWeakForLinker() && // It is not safe to modify initializers of global variables with the // external_initializer marker since the value may be changed at runtime // before C++ initializers are evaluated. !isExternallyInitialized(); } /// getInitializer - Return the initializer for this global variable. It is /// illegal to call this method if the global is external, because we cannot /// tell what the value is initialized to! /// inline const Constant *getInitializer() const { assert(hasInitializer() && "GV doesn't have initializer!"); return static_cast<Constant*>(Op<0>().get()); } inline Constant *getInitializer() { assert(hasInitializer() && "GV doesn't have initializer!"); return static_cast<Constant*>(Op<0>().get()); } /// setInitializer - Sets the initializer for this global variable, removing /// any existing initializer if InitVal==NULL. If this GV has type T*, the /// initializer must have type T. void setInitializer(Constant *InitVal); /// If the value is a global constant, its value is immutable throughout the /// runtime execution of the program. Assigning a value into the constant /// leads to undefined behavior. /// bool isConstant() const { return isConstantGlobal; } void setConstant(bool Val) { isConstantGlobal = Val; } bool isExternallyInitialized() const { return isExternallyInitializedConstant; } void setExternallyInitialized(bool Val) { isExternallyInitializedConstant = Val; } /// copyAttributesFrom - copy all additional attributes (those not needed to /// create a GlobalVariable) from the GlobalVariable Src to this one. void copyAttributesFrom(const GlobalValue *Src) override; /// removeFromParent - This method unlinks 'this' from the containing module, /// but does not delete it. /// void removeFromParent() override; /// eraseFromParent - This method unlinks 'this' from the containing module /// and deletes it. /// void eraseFromParent() override; // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Value *V) { return V->getValueID() == Value::GlobalVariableVal; } }; template <> struct OperandTraits<GlobalVariable> : public OptionalOperandTraits<GlobalVariable> { }; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value) } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Argument.h
//===-- llvm/Argument.h - Definition of the Argument class ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the Argument class. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_ARGUMENT_H #define LLVM_IR_ARGUMENT_H #include "llvm/ADT/Twine.h" #include "llvm/ADT/ilist_node.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/Value.h" namespace llvm { template<typename ValueSubClass, typename ItemParentClass> class SymbolTableListTraits; /// \brief LLVM Argument representation /// /// This class represents an incoming formal argument to a Function. A formal /// argument, since it is ``formal'', does not contain an actual value but /// instead represents the type, argument number, and attributes of an argument /// for a specific function. When used in the body of said function, the /// argument of course represents the value of the actual argument that the /// function was called with. class Argument : public Value, public ilist_node<Argument> { virtual void anchor(); Function *Parent; friend class SymbolTableListTraits<Argument, Function>; void setParent(Function *parent); public: /// \brief Constructor. /// /// If \p F is specified, the argument is inserted at the end of the argument /// list for \p F. explicit Argument(Type *Ty, const Twine &Name = "", Function *F = nullptr); inline const Function *getParent() const { return Parent; } inline Function *getParent() { return Parent; } /// \brief Return the index of this formal argument in its containing /// function. /// /// For example in "void foo(int a, float b)" a is 0 and b is 1. unsigned getArgNo() const; /// \brief Return true if this argument has the nonnull attribute on it in /// its containing function. Also returns true if at least one byte is known /// to be dereferenceable and the pointer is in addrspace(0). bool hasNonNullAttr() const; /// \brief If this argument has the dereferenceable attribute on it in its /// containing function, return the number of bytes known to be /// dereferenceable. Otherwise, zero is returned. uint64_t getDereferenceableBytes() const; /// \brief If this argument has the dereferenceable_or_null attribute on /// it in its containing function, return the number of bytes known to be /// dereferenceable. Otherwise, zero is returned. uint64_t getDereferenceableOrNullBytes() const; /// \brief Return true if this argument has the byval attribute on it in its /// containing function. bool hasByValAttr() const; /// \brief Return true if this argument has the byval attribute or inalloca /// attribute on it in its containing function. These attributes both /// represent arguments being passed by value. bool hasByValOrInAllocaAttr() const; /// \brief If this is a byval or inalloca argument, return its alignment. unsigned getParamAlignment() const; /// \brief Return true if this argument has the nest attribute on it in its /// containing function. bool hasNestAttr() const; /// \brief Return true if this argument has the noalias attribute on it in its /// containing function. bool hasNoAliasAttr() const; /// \brief Return true if this argument has the nocapture attribute on it in /// its containing function. bool hasNoCaptureAttr() const; /// \brief Return true if this argument has the sret attribute on it in its /// containing function. bool hasStructRetAttr() const; /// \brief Return true if this argument has the returned attribute on it in /// its containing function. bool hasReturnedAttr() const; /// \brief Return true if this argument has the readonly or readnone attribute /// on it in its containing function. bool onlyReadsMemory() const; /// \brief Return true if this argument has the inalloca attribute on it in /// its containing function. bool hasInAllocaAttr() const; /// \brief Return true if this argument has the zext attribute on it in its /// containing function. bool hasZExtAttr() const; /// \brief Return true if this argument has the sext attribute on it in its /// containing function. bool hasSExtAttr() const; /// \brief Add a Attribute to an argument. void addAttr(AttributeSet AS); /// \brief Remove a Attribute from an argument. void removeAttr(AttributeSet AS); /// \brief Method for support type inquiry through isa, cast, and /// dyn_cast. static inline bool classof(const Value *V) { return V->getValueID() == ArgumentVal; } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Statepoint.h
//===-- llvm/IR/Statepoint.h - gc.statepoint utilities ------ --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains utility functions and a wrapper class analogous to // CallSite for accessing the fields of gc.statepoint, gc.relocate, and // gc.result intrinsics // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_STATEPOINT_H #define LLVM_IR_STATEPOINT_H #include "llvm/ADT/iterator_range.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/Support/Compiler.h" namespace llvm { /// The statepoint intrinsic accepts a set of flags as its third argument. /// Valid values come out of this set. enum class StatepointFlags { None = 0, GCTransition = 1, ///< Indicates that this statepoint is a transition from ///< GC-aware code to code that is not GC-aware. MaskAll = GCTransition ///< A bitmask that includes all valid flags. }; class GCRelocateOperands; class ImmutableStatepoint; bool isStatepoint(const ImmutableCallSite &CS); bool isStatepoint(const Value *V); bool isStatepoint(const Value &V); bool isGCRelocate(const Value *V); bool isGCRelocate(const ImmutableCallSite &CS); bool isGCResult(const Value *V); bool isGCResult(const ImmutableCallSite &CS); /// Analogous to CallSiteBase, this provides most of the actual /// functionality for Statepoint and ImmutableStatepoint. It is /// templatized to allow easily specializing of const and non-const /// concrete subtypes. This is structured analogous to CallSite /// rather than the IntrinsicInst.h helpers since we want to support /// invokable statepoints in the near future. template <typename FunTy, typename InstructionTy, typename ValueTy, typename CallSiteTy> class StatepointBase { CallSiteTy StatepointCS; void *operator new(size_t, unsigned) = delete; void *operator new(size_t s) = delete; protected: explicit StatepointBase(InstructionTy *I) { if (isStatepoint(I)) { StatepointCS = CallSiteTy(I); assert(StatepointCS && "isStatepoint implies CallSite"); } } explicit StatepointBase(CallSiteTy CS) { if (isStatepoint(CS)) StatepointCS = CS; } public: typedef typename CallSiteTy::arg_iterator arg_iterator; enum { IDPos = 0, NumPatchBytesPos = 1, CalledFunctionPos = 2, NumCallArgsPos = 3, FlagsPos = 4, CallArgsBeginPos = 5, }; explicit operator bool() const { // We do not assign non-statepoint CallSites to StatepointCS. return (bool)StatepointCS; } /// Return the underlying CallSite. CallSiteTy getCallSite() const { assert(*this && "check validity first!"); return StatepointCS; } uint64_t getFlags() const { return cast<ConstantInt>(getCallSite().getArgument(FlagsPos)) ->getZExtValue(); } /// Return the ID associated with this statepoint. uint64_t getID() const { const Value *IDVal = getCallSite().getArgument(IDPos); return cast<ConstantInt>(IDVal)->getZExtValue(); } /// Return the number of patchable bytes associated with this statepoint. uint32_t getNumPatchBytes() const { const Value *NumPatchBytesVal = getCallSite().getArgument(NumPatchBytesPos); uint64_t NumPatchBytes = cast<ConstantInt>(NumPatchBytesVal)->getZExtValue(); assert(isInt<32>(NumPatchBytes) && "should fit in 32 bits!"); return NumPatchBytes; } /// Return the value actually being called or invoked. ValueTy *getCalledValue() const { return getCallSite().getArgument(CalledFunctionPos); } InstructionTy *getInstruction() const { return getCallSite().getInstruction(); } /// Return the function being called if this is a direct call, otherwise /// return null (if it's an indirect call). FunTy *getCalledFunction() const { return dyn_cast<Function>(getCalledValue()); } /// Return the caller function for this statepoint. FunTy *getCaller() const { return getCallSite().getCaller(); } /// Determine if the statepoint cannot unwind. bool doesNotThrow() const { Function *F = getCalledFunction(); return getCallSite().doesNotThrow() || (F ? F->doesNotThrow() : false); } /// Return the type of the value returned by the call underlying the /// statepoint. Type *getActualReturnType() const { auto *FTy = cast<FunctionType>( cast<PointerType>(getCalledValue()->getType())->getElementType()); return FTy->getReturnType(); } /// Number of arguments to be passed to the actual callee. int getNumCallArgs() const { const Value *NumCallArgsVal = getCallSite().getArgument(NumCallArgsPos); return cast<ConstantInt>(NumCallArgsVal)->getZExtValue(); } size_t arg_size() const { return getNumCallArgs(); } typename CallSiteTy::arg_iterator arg_begin() const { assert(CallArgsBeginPos <= (int)getCallSite().arg_size()); return getCallSite().arg_begin() + CallArgsBeginPos; } typename CallSiteTy::arg_iterator arg_end() const { auto I = arg_begin() + arg_size(); assert((getCallSite().arg_end() - I) >= 0); return I; } ValueTy *getArgument(unsigned Index) { assert(Index < arg_size() && "out of bounds!"); return *(arg_begin() + Index); } /// range adapter for call arguments iterator_range<arg_iterator> call_args() const { return iterator_range<arg_iterator>(arg_begin(), arg_end()); } /// \brief Return true if the call or the callee has the given attribute. bool paramHasAttr(unsigned i, Attribute::AttrKind A) const { Function *F = getCalledFunction(); return getCallSite().paramHasAttr(i + CallArgsBeginPos, A) || (F ? F->getAttributes().hasAttribute(i, A) : false); } /// Number of GC transition args. int getNumTotalGCTransitionArgs() const { const Value *NumGCTransitionArgs = *arg_end(); return cast<ConstantInt>(NumGCTransitionArgs)->getZExtValue(); } typename CallSiteTy::arg_iterator gc_transition_args_begin() const { auto I = arg_end() + 1; assert((getCallSite().arg_end() - I) >= 0); return I; } typename CallSiteTy::arg_iterator gc_transition_args_end() const { auto I = gc_transition_args_begin() + getNumTotalGCTransitionArgs(); assert((getCallSite().arg_end() - I) >= 0); return I; } /// range adapter for GC transition arguments iterator_range<arg_iterator> gc_transition_args() const { return iterator_range<arg_iterator>(gc_transition_args_begin(), gc_transition_args_end()); } /// Number of additional arguments excluding those intended /// for garbage collection. int getNumTotalVMSArgs() const { const Value *NumVMSArgs = *gc_transition_args_end(); return cast<ConstantInt>(NumVMSArgs)->getZExtValue(); } typename CallSiteTy::arg_iterator vm_state_begin() const { auto I = gc_transition_args_end() + 1; assert((getCallSite().arg_end() - I) >= 0); return I; } typename CallSiteTy::arg_iterator vm_state_end() const { auto I = vm_state_begin() + getNumTotalVMSArgs(); assert((getCallSite().arg_end() - I) >= 0); return I; } /// range adapter for vm state arguments iterator_range<arg_iterator> vm_state_args() const { return iterator_range<arg_iterator>(vm_state_begin(), vm_state_end()); } typename CallSiteTy::arg_iterator gc_args_begin() const { return vm_state_end(); } typename CallSiteTy::arg_iterator gc_args_end() const { return getCallSite().arg_end(); } /// range adapter for gc arguments iterator_range<arg_iterator> gc_args() const { return iterator_range<arg_iterator>(gc_args_begin(), gc_args_end()); } /// Get list of all gc reloactes linked to this statepoint /// May contain several relocations for the same base/derived pair. /// For example this could happen due to relocations on unwinding /// path of invoke. std::vector<GCRelocateOperands> getRelocates() const; /// Get the experimental_gc_result call tied to this statepoint. Can be /// nullptr if there isn't a gc_result tied to this statepoint. Guaranteed to /// be a CallInst if non-null. InstructionTy *getGCResult() const { for (auto *U : getInstruction()->users()) if (isGCResult(U)) return cast<CallInst>(U); return nullptr; } #ifndef NDEBUG /// Asserts if this statepoint is malformed. Common cases for failure /// include incorrect length prefixes for variable length sections or /// illegal values for parameters. void verify() { assert(getNumCallArgs() >= 0 && "number of arguments to actually callee can't be negative"); // The internal asserts in the iterator accessors do the rest. (void)arg_begin(); (void)arg_end(); (void)gc_transition_args_begin(); (void)gc_transition_args_end(); (void)vm_state_begin(); (void)vm_state_end(); (void)gc_args_begin(); (void)gc_args_end(); } #endif }; /// A specialization of it's base class for read only access /// to a gc.statepoint. class ImmutableStatepoint : public StatepointBase<const Function, const Instruction, const Value, ImmutableCallSite> { typedef StatepointBase<const Function, const Instruction, const Value, ImmutableCallSite> Base; public: explicit ImmutableStatepoint(const Instruction *I) : Base(I) {} explicit ImmutableStatepoint(ImmutableCallSite CS) : Base(CS) {} }; /// A specialization of it's base class for read-write access /// to a gc.statepoint. class Statepoint : public StatepointBase<Function, Instruction, Value, CallSite> { typedef StatepointBase<Function, Instruction, Value, CallSite> Base; public: explicit Statepoint(Instruction *I) : Base(I) {} explicit Statepoint(CallSite CS) : Base(CS) {} }; /// Wraps a call to a gc.relocate and provides access to it's operands. /// TODO: This should likely be refactored to resememble the wrappers in /// InstrinsicInst.h. class GCRelocateOperands { ImmutableCallSite RelocateCS; public: GCRelocateOperands(const User *U) : RelocateCS(U) { assert(isGCRelocate(U)); } GCRelocateOperands(const Instruction *inst) : RelocateCS(inst) { assert(isGCRelocate(inst)); } GCRelocateOperands(CallSite CS) : RelocateCS(CS) { assert(isGCRelocate(CS)); } /// Return true if this relocate is tied to the invoke statepoint. /// This includes relocates which are on the unwinding path. bool isTiedToInvoke() const { const Value *Token = RelocateCS.getArgument(0); return isa<ExtractValueInst>(Token) || isa<InvokeInst>(Token); } /// Get enclosed relocate intrinsic ImmutableCallSite getUnderlyingCallSite() { return RelocateCS; } /// The statepoint with which this gc.relocate is associated. const Instruction *getStatepoint() { const Value *Token = RelocateCS.getArgument(0); // This takes care both of relocates for call statepoints and relocates // on normal path of invoke statepoint. if (!isa<ExtractValueInst>(Token)) { return cast<Instruction>(Token); } // This relocate is on exceptional path of an invoke statepoint const BasicBlock *InvokeBB = cast<Instruction>(Token)->getParent()->getUniquePredecessor(); assert(InvokeBB && "safepoints should have unique landingpads"); assert(InvokeBB->getTerminator() && "safepoint block should be well formed"); assert(isStatepoint(InvokeBB->getTerminator())); return InvokeBB->getTerminator(); } /// The index into the associate statepoint's argument list /// which contains the base pointer of the pointer whose /// relocation this gc.relocate describes. unsigned getBasePtrIndex() { return cast<ConstantInt>(RelocateCS.getArgument(1))->getZExtValue(); } /// The index into the associate statepoint's argument list which /// contains the pointer whose relocation this gc.relocate describes. unsigned getDerivedPtrIndex() { return cast<ConstantInt>(RelocateCS.getArgument(2))->getZExtValue(); } Value *getBasePtr() { ImmutableCallSite CS(getStatepoint()); return *(CS.arg_begin() + getBasePtrIndex()); } Value *getDerivedPtr() { ImmutableCallSite CS(getStatepoint()); return *(CS.arg_begin() + getDerivedPtrIndex()); } }; template <typename FunTy, typename InstructionTy, typename ValueTy, typename CallSiteTy> std::vector<GCRelocateOperands> StatepointBase<FunTy, InstructionTy, ValueTy, CallSiteTy>::getRelocates() const { std::vector<GCRelocateOperands> Result; CallSiteTy StatepointCS = getCallSite(); // Search for relocated pointers. Note that working backwards from the // gc_relocates ensures that we only get pairs which are actually relocated // and used after the statepoint. for (const User *U : getInstruction()->users()) if (isGCRelocate(U)) Result.push_back(GCRelocateOperands(U)); if (!StatepointCS.isInvoke()) return Result; // We need to scan thorough exceptional relocations if it is invoke statepoint LandingPadInst *LandingPad = cast<InvokeInst>(getInstruction())->getLandingPadInst(); // Search for extract value from landingpad instruction to which // gc relocates will be attached for (const User *LandingPadUser : LandingPad->users()) { if (!isa<ExtractValueInst>(LandingPadUser)) continue; // gc relocates should be attached to this extract value for (const User *U : LandingPadUser->users()) if (isGCRelocate(U)) Result.push_back(GCRelocateOperands(U)); } return Result; } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/PassManagerInternal.h
//===- PassManager internal APIs and implementation details -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This header provides internal APIs and implementation details used by the /// pass management interfaces exposed in PassManager.h. To understand more /// context of why these particular interfaces are needed, see that header /// file. None of these APIs should be used elsewhere. /// //===----------------------------------------------------------------------===// #ifndef LLVM_IR_PASSMANAGERINTERNAL_H #define LLVM_IR_PASSMANAGERINTERNAL_H #include "llvm/ADT/StringRef.h" #include "llvm/ADT/STLExtras.h" namespace llvm { template <typename IRUnitT> class AnalysisManager; class PreservedAnalyses; /// \brief Implementation details of the pass manager interfaces. namespace detail { /// \brief Template for the abstract base class used to dispatch /// polymorphically over pass objects. template <typename IRUnitT> struct PassConcept { // Boiler plate necessary for the container of derived classes. virtual ~PassConcept() {} /// \brief The polymorphic API which runs the pass over a given IR entity. /// /// Note that actual pass object can omit the analysis manager argument if /// desired. Also that the analysis manager may be null if there is no /// analysis manager in the pass pipeline. virtual PreservedAnalyses run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) = 0; /// \brief Polymorphic method to access the name of a pass. virtual StringRef name() = 0; }; /// \brief SFINAE metafunction for computing whether \c PassT has a run method /// accepting an \c AnalysisManager<IRUnitT>. template <typename IRUnitT, typename PassT, typename ResultT> class PassRunAcceptsAnalysisManager { typedef char SmallType; struct BigType { char a, b; }; template <typename T, ResultT (T::*)(IRUnitT &, AnalysisManager<IRUnitT> *)> struct Checker; template <typename T> static SmallType f(Checker<T, &T::run> *); template <typename T> static BigType f(...); public: enum { Value = sizeof(f<PassT>(nullptr)) == sizeof(SmallType) }; }; /// \brief A template wrapper used to implement the polymorphic API. /// /// Can be instantiated for any object which provides a \c run method accepting /// an \c IRUnitT. It requires the pass to be a copyable object. When the /// \c run method also accepts an \c AnalysisManager<IRUnitT>*, we pass it /// along. template <typename IRUnitT, typename PassT, typename PreservedAnalysesT = PreservedAnalyses, bool AcceptsAnalysisManager = PassRunAcceptsAnalysisManager< IRUnitT, PassT, PreservedAnalysesT>::Value> struct PassModel; /// \brief Specialization of \c PassModel for passes that accept an analyis /// manager. template <typename IRUnitT, typename PassT, typename PreservedAnalysesT> struct PassModel<IRUnitT, PassT, PreservedAnalysesT, true> : PassConcept<IRUnitT> { explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. PassModel(const PassModel &Arg) : Pass(Arg.Pass) {} PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {} friend void swap(PassModel &LHS, PassModel &RHS) { using std::swap; swap(LHS.Pass, RHS.Pass); } PassModel &operator=(PassModel RHS) { swap(*this, RHS); return *this; } PreservedAnalysesT run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) override { return Pass.run(IR, AM); } StringRef name() override { return PassT::name(); } PassT Pass; }; /// \brief Specialization of \c PassModel for passes that accept an analyis /// manager. template <typename IRUnitT, typename PassT, typename PreservedAnalysesT> struct PassModel<IRUnitT, PassT, PreservedAnalysesT, false> : PassConcept<IRUnitT> { explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. PassModel(const PassModel &Arg) : Pass(Arg.Pass) {} PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {} friend void swap(PassModel &LHS, PassModel &RHS) { using std::swap; swap(LHS.Pass, RHS.Pass); } PassModel &operator=(PassModel RHS) { swap(*this, RHS); return *this; } PreservedAnalysesT run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) override { return Pass.run(IR); } StringRef name() override { return PassT::name(); } PassT Pass; }; /// \brief Abstract concept of an analysis result. /// /// This concept is parameterized over the IR unit that this result pertains /// to. template <typename IRUnitT> struct AnalysisResultConcept { virtual ~AnalysisResultConcept() {} /// \brief Method to try and mark a result as invalid. /// /// When the outer analysis manager detects a change in some underlying /// unit of the IR, it will call this method on all of the results cached. /// /// This method also receives a set of preserved analyses which can be used /// to avoid invalidation because the pass which changed the underlying IR /// took care to update or preserve the analysis result in some way. /// /// \returns true if the result is indeed invalid (the default). virtual bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) = 0; }; /// \brief SFINAE metafunction for computing whether \c ResultT provides an /// \c invalidate member function. template <typename IRUnitT, typename ResultT> class ResultHasInvalidateMethod { typedef char SmallType; struct BigType { char a, b; }; template <typename T, bool (T::*)(IRUnitT &, const PreservedAnalyses &)> struct Checker; template <typename T> static SmallType f(Checker<T, &T::invalidate> *); template <typename T> static BigType f(...); public: enum { Value = sizeof(f<ResultT>(nullptr)) == sizeof(SmallType) }; }; /// \brief Wrapper to model the analysis result concept. /// /// By default, this will implement the invalidate method with a trivial /// implementation so that the actual analysis result doesn't need to provide /// an invalidation handler. It is only selected when the invalidation handler /// is not part of the ResultT's interface. template <typename IRUnitT, typename PassT, typename ResultT, typename PreservedAnalysesT = PreservedAnalyses, bool HasInvalidateHandler = ResultHasInvalidateMethod<IRUnitT, ResultT>::Value> struct AnalysisResultModel; /// \brief Specialization of \c AnalysisResultModel which provides the default /// invalidate functionality. template <typename IRUnitT, typename PassT, typename ResultT, typename PreservedAnalysesT> struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, false> : AnalysisResultConcept<IRUnitT> { explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {} AnalysisResultModel(AnalysisResultModel &&Arg) : Result(std::move(Arg.Result)) {} friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) { using std::swap; swap(LHS.Result, RHS.Result); } AnalysisResultModel &operator=(AnalysisResultModel RHS) { swap(*this, RHS); return *this; } /// \brief The model bases invalidation solely on being in the preserved set. // // FIXME: We should actually use two different concepts for analysis results // rather than two different models, and avoid the indirect function call for // ones that use the trivial behavior. bool invalidate(IRUnitT &, const PreservedAnalysesT &PA) override { return !PA.preserved(PassT::ID()); } ResultT Result; }; /// \brief Specialization of \c AnalysisResultModel which delegates invalidate /// handling to \c ResultT. template <typename IRUnitT, typename PassT, typename ResultT, typename PreservedAnalysesT> struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, true> : AnalysisResultConcept<IRUnitT> { explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {} AnalysisResultModel(AnalysisResultModel &&Arg) : Result(std::move(Arg.Result)) {} friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) { using std::swap; swap(LHS.Result, RHS.Result); } AnalysisResultModel &operator=(AnalysisResultModel RHS) { swap(*this, RHS); return *this; } /// \brief The model delegates to the \c ResultT method. bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA) override { return Result.invalidate(IR, PA); } ResultT Result; }; /// \brief Abstract concept of an analysis pass. /// /// This concept is parameterized over the IR unit that it can run over and /// produce an analysis result. template <typename IRUnitT> struct AnalysisPassConcept { virtual ~AnalysisPassConcept() {} /// \brief Method to run this analysis over a unit of IR. /// \returns A unique_ptr to the analysis result object to be queried by /// users. virtual std::unique_ptr<AnalysisResultConcept<IRUnitT>> run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) = 0; /// \brief Polymorphic method to access the name of a pass. virtual StringRef name() = 0; }; /// \brief Wrapper to model the analysis pass concept. /// /// Can wrap any type which implements a suitable \c run method. The method /// must accept the IRUnitT as an argument and produce an object which can be /// wrapped in a \c AnalysisResultModel. template <typename IRUnitT, typename PassT, bool AcceptsAnalysisManager = PassRunAcceptsAnalysisManager< IRUnitT, PassT, typename PassT::Result>::Value> struct AnalysisPassModel; /// \brief Specialization of \c AnalysisPassModel which passes an /// \c AnalysisManager to PassT's run method. template <typename IRUnitT, typename PassT> struct AnalysisPassModel<IRUnitT, PassT, true> : AnalysisPassConcept<IRUnitT> { explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {} AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {} friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) { using std::swap; swap(LHS.Pass, RHS.Pass); } AnalysisPassModel &operator=(AnalysisPassModel RHS) { swap(*this, RHS); return *this; } // FIXME: Replace PassT::Result with type traits when we use C++11. typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result> ResultModelT; /// \brief The model delegates to the \c PassT::run method. /// /// The return is wrapped in an \c AnalysisResultModel. std::unique_ptr<AnalysisResultConcept<IRUnitT>> run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) override { return make_unique<ResultModelT>(Pass.run(IR, AM)); } /// \brief The model delegates to a static \c PassT::name method. /// /// The returned string ref must point to constant immutable data! StringRef name() override { return PassT::name(); } PassT Pass; }; /// \brief Specialization of \c AnalysisPassModel which does not pass an /// \c AnalysisManager to PassT's run method. template <typename IRUnitT, typename PassT> struct AnalysisPassModel<IRUnitT, PassT, false> : AnalysisPassConcept<IRUnitT> { explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {} // We have to explicitly define all the special member functions because MSVC // refuses to generate them. AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {} AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {} friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) { using std::swap; swap(LHS.Pass, RHS.Pass); } AnalysisPassModel &operator=(AnalysisPassModel RHS) { swap(*this, RHS); return *this; } // FIXME: Replace PassT::Result with type traits when we use C++11. typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result> ResultModelT; /// \brief The model delegates to the \c PassT::run method. /// /// The return is wrapped in an \c AnalysisResultModel. std::unique_ptr<AnalysisResultConcept<IRUnitT>> run(IRUnitT &IR, AnalysisManager<IRUnitT> *) override { return make_unique<ResultModelT>(Pass.run(IR)); } /// \brief The model delegates to a static \c PassT::name method. /// /// The returned string ref must point to constant immutable data! StringRef name() override { return PassT::name(); } PassT Pass; }; } // End namespace detail } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/IRBuilder.h
//===---- llvm/IRBuilder.h - Builder for LLVM Instructions ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the IRBuilder class, which is used as a convenient way // to create LLVM instructions with a consistent and simplified interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_IRBUILDER_H #define LLVM_IR_IRBUILDER_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/ConstantFolder.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Operator.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/CBindingWrapping.h" namespace llvm { class MDNode; /// \brief This provides the default implementation of the IRBuilder /// 'InsertHelper' method that is called whenever an instruction is created by /// IRBuilder and needs to be inserted. /// /// By default, this inserts the instruction at the insertion point. template <bool preserveNames = true> class IRBuilderDefaultInserter { protected: void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB, BasicBlock::iterator InsertPt) const { if (BB) BB->getInstList().insert(InsertPt, I); if (preserveNames) I->setName(Name); } }; /// \brief Common base class shared among various IRBuilders. class IRBuilderBase { DebugLoc CurDbgLocation; protected: BasicBlock *BB; BasicBlock::iterator InsertPt; LLVMContext &Context; MDNode *DefaultFPMathTag; FastMathFlags FMF; public: IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr) : Context(context), DefaultFPMathTag(FPMathTag), FMF() { ClearInsertionPoint(); } //===--------------------------------------------------------------------===// // Builder configuration methods //===--------------------------------------------------------------------===// /// \brief Clear the insertion point: created instructions will not be /// inserted into a block. void ClearInsertionPoint() { BB = nullptr; InsertPt = nullptr; } BasicBlock *GetInsertBlock() const { return BB; } BasicBlock::iterator GetInsertPoint() const { return InsertPt; } LLVMContext &getContext() const { return Context; } /// \brief This specifies that created instructions should be appended to the /// end of the specified block. void SetInsertPoint(BasicBlock *TheBB) { BB = TheBB; InsertPt = BB->end(); } /// \brief This specifies that created instructions should be inserted before /// the specified instruction. void SetInsertPoint(Instruction *I) { BB = I->getParent(); InsertPt = I; assert(I != BB->end() && "Can't read debug loc from end()"); SetCurrentDebugLocation(I->getDebugLoc()); } /// \brief This specifies that created instructions should be inserted at the /// specified point. void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) { BB = TheBB; InsertPt = IP; if (IP != TheBB->end()) SetCurrentDebugLocation(IP->getDebugLoc()); } /// \brief Set location information used by debugging information. void SetCurrentDebugLocation(DebugLoc L) { CurDbgLocation = std::move(L); // HLSL Change - begin // Don't propagate debug locations at line 0 if (CurDbgLocation && CurDbgLocation.getLine() == 0) CurDbgLocation = nullptr; // HLSL Change - end } /// \brief Get location information used by debugging information. const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; } /// \brief If this builder has a current debug location, set it on the /// specified instruction. void SetInstDebugLocation(Instruction *I) const { if (CurDbgLocation) I->setDebugLoc(CurDbgLocation); } /// \brief Get the return type of the current function that we're emitting /// into. Type *getCurrentFunctionReturnType() const; /// InsertPoint - A saved insertion point. class InsertPoint { BasicBlock *Block; BasicBlock::iterator Point; public: /// \brief Creates a new insertion point which doesn't point to anything. InsertPoint() : Block(nullptr) {} /// \brief Creates a new insertion point at the given location. InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint) : Block(InsertBlock), Point(InsertPoint) {} /// \brief Returns true if this insert point is set. bool isSet() const { return (Block != nullptr); } llvm::BasicBlock *getBlock() const { return Block; } llvm::BasicBlock::iterator getPoint() const { return Point; } }; /// \brief Returns the current insert point. InsertPoint saveIP() const { return InsertPoint(GetInsertBlock(), GetInsertPoint()); } /// \brief Returns the current insert point, clearing it in the process. InsertPoint saveAndClearIP() { InsertPoint IP(GetInsertBlock(), GetInsertPoint()); ClearInsertionPoint(); return IP; } /// \brief Sets the current insert point to a previously-saved location. void restoreIP(InsertPoint IP) { if (IP.isSet()) SetInsertPoint(IP.getBlock(), IP.getPoint()); else ClearInsertionPoint(); } /// \brief Get the floating point math metadata being used. MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; } /// \brief Get the flags to be applied to created floating point ops FastMathFlags getFastMathFlags() const { return FMF; } /// \brief Clear the fast-math flags. void clearFastMathFlags() { FMF.clear(); } /// \brief Set the floating point math metadata to be used. void SetDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; } /// \brief Set the fast-math flags to be used with generated fp-math operators void SetFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; } //===--------------------------------------------------------------------===// // RAII helpers. //===--------------------------------------------------------------------===// // \brief RAII object that stores the current insertion point and restores it // when the object is destroyed. This includes the debug location. class InsertPointGuard { IRBuilderBase &Builder; AssertingVH<BasicBlock> Block; BasicBlock::iterator Point; DebugLoc DbgLoc; InsertPointGuard(const InsertPointGuard &) = delete; InsertPointGuard &operator=(const InsertPointGuard &) = delete; public: InsertPointGuard(IRBuilderBase &B) : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()), DbgLoc(B.getCurrentDebugLocation()) {} ~InsertPointGuard() { Builder.restoreIP(InsertPoint(Block, Point)); Builder.SetCurrentDebugLocation(DbgLoc); } }; // \brief RAII object that stores the current fast math settings and restores // them when the object is destroyed. class FastMathFlagGuard { IRBuilderBase &Builder; FastMathFlags FMF; MDNode *FPMathTag; FastMathFlagGuard(const FastMathFlagGuard &) = delete; FastMathFlagGuard &operator=( const FastMathFlagGuard &) = delete; public: FastMathFlagGuard(IRBuilderBase &B) : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag) {} ~FastMathFlagGuard() { Builder.FMF = FMF; Builder.DefaultFPMathTag = FPMathTag; } }; //===--------------------------------------------------------------------===// // Miscellaneous creation methods. //===--------------------------------------------------------------------===// /// \brief Make a new global variable with initializer type i8* /// /// Make a new global variable with an initializer that has array of i8 type /// filled in with the null terminated string value specified. The new global /// variable will be marked mergable with any others of the same contents. If /// Name is specified, it is the name of the global variable created. GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "", unsigned AddressSpace = 0); /// \brief Get a constant value representing either true or false. ConstantInt *getInt1(bool V) { return ConstantInt::get(getInt1Ty(), V); } /// \brief Get the constant value for i1 true. ConstantInt *getTrue() { return ConstantInt::getTrue(Context); } /// \brief Get the constant value for i1 false. ConstantInt *getFalse() { return ConstantInt::getFalse(Context); } /// \brief Get a constant 8-bit value. ConstantInt *getInt8(uint8_t C) { return ConstantInt::get(getInt8Ty(), C); } /// \brief Get a constant 16-bit value. ConstantInt *getInt16(uint16_t C) { return ConstantInt::get(getInt16Ty(), C); } /// \brief Get a constant 32-bit value. ConstantInt *getInt32(uint32_t C) { return ConstantInt::get(getInt32Ty(), C); } /// \brief Get a constant 64-bit value. ConstantInt *getInt64(uint64_t C) { return ConstantInt::get(getInt64Ty(), C); } /// \brief Get a constant N-bit value, zero extended or truncated from /// a 64-bit value. ConstantInt *getIntN(unsigned N, uint64_t C) { return ConstantInt::get(getIntNTy(N), C); } /// \brief Get a constant integer value. ConstantInt *getInt(const APInt &AI) { return ConstantInt::get(Context, AI); } //===--------------------------------------------------------------------===// // Type creation methods //===--------------------------------------------------------------------===// /// \brief Fetch the type representing a single bit IntegerType *getInt1Ty() { return Type::getInt1Ty(Context); } /// \brief Fetch the type representing an 8-bit integer. IntegerType *getInt8Ty() { return Type::getInt8Ty(Context); } /// \brief Fetch the type representing a 16-bit integer. IntegerType *getInt16Ty() { return Type::getInt16Ty(Context); } /// \brief Fetch the type representing a 32-bit integer. IntegerType *getInt32Ty() { return Type::getInt32Ty(Context); } /// \brief Fetch the type representing a 64-bit integer. IntegerType *getInt64Ty() { return Type::getInt64Ty(Context); } /// \brief Fetch the type representing a 128-bit integer. IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); } /// \brief Fetch the type representing an N-bit integer. IntegerType *getIntNTy(unsigned N) { return Type::getIntNTy(Context, N); } /// \brief Fetch the type representing a 16-bit floating point value. Type *getHalfTy() { return Type::getHalfTy(Context); } /// \brief Fetch the type representing a 32-bit floating point value. Type *getFloatTy() { return Type::getFloatTy(Context); } /// \brief Fetch the type representing a 64-bit floating point value. Type *getDoubleTy() { return Type::getDoubleTy(Context); } /// \brief Fetch the type representing void. Type *getVoidTy() { return Type::getVoidTy(Context); } /// \brief Fetch the type representing a pointer to an 8-bit integer value. PointerType *getInt8PtrTy(unsigned AddrSpace = 0) { return Type::getInt8PtrTy(Context, AddrSpace); } /// \brief Fetch the type representing a pointer to an integer value. IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) { return DL.getIntPtrType(Context, AddrSpace); } //===--------------------------------------------------------------------===// // Intrinsic creation methods //===--------------------------------------------------------------------===// /// \brief Create and insert a memset to the specified pointer and the /// specified value. /// /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is /// specified, it will be added to the instruction. Likewise with alias.scope /// and noalias tags. CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, TBAATag, ScopeTag, NoAliasTag); } CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); /// \brief Create and insert a memcpy between the specified pointers. /// /// If the pointers aren't i8*, they will be converted. If a TBAA tag is /// specified, it will be added to the instruction. Likewise with alias.scope /// and noalias tags. CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag, TBAAStructTag, ScopeTag, NoAliasTag); } CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); /// \brief Create and insert a memmove between the specified /// pointers. /// /// If the pointers aren't i8*, they will be converted. If a TBAA tag is /// specified, it will be added to the instruction. Likewise with alias.scope /// and noalias tags. CallInst *CreateMemMove(Value *Dst, Value *Src, uint64_t Size, unsigned Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { return CreateMemMove(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag, ScopeTag, NoAliasTag); } CallInst *CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); /// \brief Create a lifetime.start intrinsic. /// /// If the pointer isn't i8* it will be converted. CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr); /// \brief Create a lifetime.end intrinsic. /// /// If the pointer isn't i8* it will be converted. CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr); /// \brief Create a call to Masked Load intrinsic CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask, Value *PassThru = 0, const Twine &Name = ""); /// \brief Create a call to Masked Store intrinsic CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align, Value *Mask); /// \brief Create an assume intrinsic call that allows the optimizer to /// assume that the provided condition will be true. CallInst *CreateAssumption(Value *Cond); /// \brief Create a call to the experimental.gc.statepoint intrinsic to /// start a new statepoint sequence. CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, ArrayRef<Value *> CallArgs, ArrayRef<Value *> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name = ""); // \brief Conveninence function for the common case when CallArgs are filled // in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be // .get()'ed to get the Value pointer. CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, ArrayRef<Use> CallArgs, ArrayRef<Value *> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name = ""); /// brief Create an invoke to the experimental.gc.statepoint intrinsic to /// start a new statepoint sequence. InvokeInst * CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs, ArrayRef<Value *> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name = ""); // Conveninence function for the common case when CallArgs are filled in using // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to // get the Value *. InvokeInst * CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, ArrayRef<Value *> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name = ""); /// \brief Create a call to the experimental.gc.result intrinsic to extract /// the result from a call wrapped in a statepoint. CallInst *CreateGCResult(Instruction *Statepoint, Type *ResultType, const Twine &Name = ""); /// \brief Create a call to the experimental.gc.relocate intrinsics to /// project the relocated value of one pointer from the statepoint. CallInst *CreateGCRelocate(Instruction *Statepoint, int BaseOffset, int DerivedOffset, Type *ResultType, const Twine &Name = ""); private: /// \brief Create a call to a masked intrinsic with given Id. /// Masked intrinsic has only one overloaded type - data type. CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops, Type *DataTy, const Twine &Name = ""); Value *getCastedInt8PtrValue(Value *Ptr); }; /// \brief This provides a uniform API for creating instructions and inserting /// them into a basic block: either at the end of a BasicBlock, or at a specific /// iterator location in a block. /// /// Note that the builder does not expose the full generality of LLVM /// instructions. For access to extra instruction properties, use the mutators /// (e.g. setVolatile) on the instructions after they have been /// created. Convenience state exists to specify fast-math flags and fp-math /// tags. /// /// The first template argument handles whether or not to preserve names in the /// final instruction output. This defaults to on. The second template argument /// specifies a class to use for creating constants. This defaults to creating /// minimally folded constants. The third template argument allows clients to /// specify custom insertion hooks that are called on every newly created /// insertion. template<bool preserveNames = true, typename T = ConstantFolder, typename Inserter = IRBuilderDefaultInserter<preserveNames> > class IRBuilder : public IRBuilderBase, public Inserter { T Folder; public: bool AllowFolding = true; // HLSL Change - Runtime flag on whether to do folding IRBuilder(LLVMContext &C, const T &F, const Inserter &I = Inserter(), MDNode *FPMathTag = nullptr) : IRBuilderBase(C, FPMathTag), Inserter(I), Folder(F) { } explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr) : IRBuilderBase(C, FPMathTag), Folder() { } explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr) : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) { SetInsertPoint(TheBB); } explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr) : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() { SetInsertPoint(TheBB); } explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr) : IRBuilderBase(IP->getContext(), FPMathTag), Folder() { SetInsertPoint(IP); } IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F, MDNode *FPMathTag = nullptr) : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) { SetInsertPoint(TheBB, IP); } IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, MDNode *FPMathTag = nullptr) : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() { SetInsertPoint(TheBB, IP); } /// \brief Get the constant folder being used. const T &getFolder() { return Folder; } /// \brief Return true if this builder is configured to actually add the /// requested names to IR created through it. bool isNamePreserving() const { return preserveNames; } /// \brief Insert and return the specified instruction. template<typename InstTy> InstTy *Insert(InstTy *I, const Twine &Name = "") const { this->InsertHelper(I, Name, BB, InsertPt); this->SetInstDebugLocation(I); return I; } /// \brief No-op overload to handle constants. Constant *Insert(Constant *C, const Twine& = "") const { return C; } //===--------------------------------------------------------------------===// // Instruction creation methods: Terminators //===--------------------------------------------------------------------===// private: /// \brief Helper to add branch weight metadata onto an instruction. /// \returns The annotated instruction. template <typename InstTy> InstTy *addBranchWeights(InstTy *I, MDNode *Weights) { if (Weights) I->setMetadata(LLVMContext::MD_prof, Weights); return I; } public: /// \brief Create a 'ret void' instruction. ReturnInst *CreateRetVoid() { return Insert(ReturnInst::Create(Context)); } /// \brief Create a 'ret <val>' instruction. ReturnInst *CreateRet(Value *V) { return Insert(ReturnInst::Create(Context, V)); } /// \brief Create a sequence of N insertvalue instructions, /// with one Value from the retVals array each, that build a aggregate /// return value one value at a time, and a ret instruction to return /// the resulting aggregate value. /// /// This is a convenience function for code that uses aggregate return values /// as a vehicle for having multiple return values. ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) { Value *V = UndefValue::get(getCurrentFunctionReturnType()); for (unsigned i = 0; i != N; ++i) V = CreateInsertValue(V, retVals[i], i, "mrv"); return Insert(ReturnInst::Create(Context, V)); } /// \brief Create an unconditional 'br label X' instruction. BranchInst *CreateBr(BasicBlock *Dest) { return Insert(BranchInst::Create(Dest)); } /// \brief Create a conditional 'br Cond, TrueDest, FalseDest' /// instruction. BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights = nullptr) { return Insert(addBranchWeights(BranchInst::Create(True, False, Cond), BranchWeights)); } /// \brief Create a switch instruction with the specified value, default dest, /// and with a hint for the number of cases that will be added (for efficient /// allocation). SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10, MDNode *BranchWeights = nullptr) { return Insert(addBranchWeights(SwitchInst::Create(V, Dest, NumCases), BranchWeights)); } /// \brief Create an indirect branch instruction with the specified address /// operand, with an optional hint for the number of destinations that will be /// added (for efficient allocation). IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) { return Insert(IndirectBrInst::Create(Addr, NumDests)); } InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, const Twine &Name = "") { return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, None), Name); } InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, Value *Arg1, const Twine &Name = "") { return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Arg1), Name); } InvokeInst *CreateInvoke3(Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, Value *Arg1, Value *Arg2, Value *Arg3, const Twine &Name = "") { Value *Args[] = { Arg1, Arg2, Arg3 }; return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args), Name); } /// \brief Create an invoke instruction. InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Value *> Args, const Twine &Name = "") { return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args), Name); } ResumeInst *CreateResume(Value *Exn) { return Insert(ResumeInst::Create(Exn)); } UnreachableInst *CreateUnreachable() { return Insert(new UnreachableInst(Context)); } //===--------------------------------------------------------------------===// // Instruction creation methods: Binary Operators //===--------------------------------------------------------------------===// private: BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name, bool HasNUW, bool HasNSW) { BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name); if (HasNUW) BO->setHasNoUnsignedWrap(); if (HasNSW) BO->setHasNoSignedWrap(); return BO; } Instruction *AddFPMathAttributes(Instruction *I, MDNode *FPMathTag, FastMathFlags FMF) const { if (!FPMathTag) FPMathTag = DefaultFPMathTag; if (FPMathTag) I->setMetadata(LLVMContext::MD_fpmath, FPMathTag); I->setFastMathFlags(FMF); return I; } public: Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "", bool HasNUW = false, bool HasNSW = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name); return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, HasNUW, HasNSW); } Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateAdd(LHS, RHS, Name, false, true); } Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateAdd(LHS, RHS, Name, true, false); } Value *CreateFAdd(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateFAdd(LC, RC), Name); return Insert(AddFPMathAttributes(BinaryOperator::CreateFAdd(LHS, RHS), FPMathTag, FMF), Name); } Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "", bool HasNUW = false, bool HasNSW = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name); return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, HasNUW, HasNSW); } Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateSub(LHS, RHS, Name, false, true); } Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateSub(LHS, RHS, Name, true, false); } Value *CreateFSub(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateFSub(LC, RC), Name); return Insert(AddFPMathAttributes(BinaryOperator::CreateFSub(LHS, RHS), FPMathTag, FMF), Name); } Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "", bool HasNUW = false, bool HasNSW = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name); return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, HasNUW, HasNSW); } Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateMul(LHS, RHS, Name, false, true); } Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateMul(LHS, RHS, Name, true, false); } Value *CreateFMul(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateFMul(LC, RC), Name); return Insert(AddFPMathAttributes(BinaryOperator::CreateFMul(LHS, RHS), FPMathTag, FMF), Name); } Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "", bool isExact = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateUDiv(LC, RC, isExact), Name); if (!isExact) return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); } Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateUDiv(LHS, RHS, Name, true); } Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "", bool isExact = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateSDiv(LC, RC, isExact), Name); if (!isExact) return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); } Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateSDiv(LHS, RHS, Name, true); } Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateFDiv(LC, RC), Name); return Insert(AddFPMathAttributes(BinaryOperator::CreateFDiv(LHS, RHS), FPMathTag, FMF), Name); } Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateURem(LC, RC), Name); return Insert(BinaryOperator::CreateURem(LHS, RHS), Name); } Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateSRem(LC, RC), Name); return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name); } Value *CreateFRem(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateFRem(LC, RC), Name); return Insert(AddFPMathAttributes(BinaryOperator::CreateFRem(LHS, RHS), FPMathTag, FMF), Name); } Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "", bool HasNUW = false, bool HasNSW = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name); return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name, HasNUW, HasNSW); } Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "", bool HasNUW = false, bool HasNSW = false) { return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, HasNUW, HasNSW); } Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "", bool HasNUW = false, bool HasNSW = false) { return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, HasNUW, HasNSW); } Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "", bool isExact = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateLShr(LC, RC, isExact), Name); if (!isExact) return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name); return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name); } Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "", bool isExact = false) { return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); } Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "", bool isExact = false) { return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); } Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "", bool isExact = false) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateAShr(LC, RC, isExact), Name); if (!isExact) return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name); return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name); } Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "", bool isExact = false) { return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); } Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "", bool isExact = false) { return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); } Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") { if (AllowFolding) if (Constant *RC = dyn_cast<Constant>(RHS)) { if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isAllOnesValue()) return LHS; // LHS & -1 -> LHS if (Constant *LC = dyn_cast<Constant>(LHS)) return Insert(Folder.CreateAnd(LC, RC), Name); } return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name); } Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") { return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); } Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") { return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); } Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") { if (AllowFolding) if (Constant *RC = dyn_cast<Constant>(RHS)) { if (RC->isNullValue()) return LHS; // LHS | 0 -> LHS if (Constant *LC = dyn_cast<Constant>(LHS)) return Insert(Folder.CreateOr(LC, RC), Name); } return Insert(BinaryOperator::CreateOr(LHS, RHS), Name); } Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") { return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); } Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") { return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); } Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateXor(LC, RC), Name); return Insert(BinaryOperator::CreateXor(LHS, RHS), Name); } Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") { return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); } Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") { return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); } Value *CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateBinOp(Opc, LC, RC), Name); llvm::Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS); if (isa<FPMathOperator>(BinOp)) BinOp = AddFPMathAttributes(BinOp, FPMathTag, FMF); return Insert(BinOp, Name); } Value *CreateNeg(Value *V, const Twine &Name = "", bool HasNUW = false, bool HasNSW = false) { if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name); BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name); if (HasNUW) BO->setHasNoUnsignedWrap(); if (HasNSW) BO->setHasNoSignedWrap(); return BO; } Value *CreateNSWNeg(Value *V, const Twine &Name = "") { return CreateNeg(V, Name, false, true); } Value *CreateNUWNeg(Value *V, const Twine &Name = "") { return CreateNeg(V, Name, true, false); } Value *CreateFNeg(Value *V, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateFNeg(VC), Name); return Insert(AddFPMathAttributes(BinaryOperator::CreateFNeg(V), FPMathTag, FMF), Name); } Value *CreateNot(Value *V, const Twine &Name = "") { if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateNot(VC), Name); return Insert(BinaryOperator::CreateNot(V), Name); } //===--------------------------------------------------------------------===// // Instruction creation methods: Memory Instructions //===--------------------------------------------------------------------===// AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr, const Twine &Name = "") { return Insert(new AllocaInst(Ty, ArraySize), Name); } // \brief Provided to resolve 'CreateLoad(Ptr, "...")' correctly, instead of // converting the string to 'bool' for the isVolatile parameter. LoadInst *CreateLoad(Value *Ptr, const char *Name) { return Insert(new LoadInst(Ptr), Name); } LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") { return Insert(new LoadInst(Ptr), Name); } LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { return Insert(new LoadInst(Ty, Ptr), Name); } LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") { return Insert(new LoadInst(Ptr, nullptr, isVolatile), Name); } StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { return Insert(new StoreInst(Val, Ptr, isVolatile)); } // \brief Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")' // correctly, instead of converting the string to 'bool' for the isVolatile // parameter. LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) { LoadInst *LI = CreateLoad(Ptr, Name); LI->setAlignment(Align); return LI; } LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const Twine &Name = "") { LoadInst *LI = CreateLoad(Ptr, Name); LI->setAlignment(Align); return LI; } LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile, const Twine &Name = "") { LoadInst *LI = CreateLoad(Ptr, isVolatile, Name); LI->setAlignment(Align); return LI; } StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, bool isVolatile = false) { StoreInst *SI = CreateStore(Val, Ptr, isVolatile); SI->setAlignment(Align); return SI; } FenceInst *CreateFence(AtomicOrdering Ordering, SynchronizationScope SynchScope = CrossThread, const Twine &Name = "") { return Insert(new FenceInst(Context, Ordering, SynchScope), Name); } AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope = CrossThread) { return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering, SynchScope)); } AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, AtomicOrdering Ordering, SynchronizationScope SynchScope = CrossThread) { return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SynchScope)); } Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") { return CreateGEP(nullptr, Ptr, IdxList, Name); } Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") { if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) { // Every index must be constant. size_t i, e; for (i = 0, e = IdxList.size(); i != e; ++i) if (!isa<Constant>(IdxList[i])) break; if (i == e) return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name); } return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name); } Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") { return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name); } Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") { if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) { // Every index must be constant. size_t i, e; for (i = 0, e = IdxList.size(); i != e; ++i) if (!isa<Constant>(IdxList[i])) break; if (i == e) return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList), Name); } return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name); } Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") { return CreateGEP(nullptr, Ptr, Idx, Name); } Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") { if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) if (Constant *IC = dyn_cast<Constant>(Idx)) return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name); return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); } Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") { if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) if (Constant *IC = dyn_cast<Constant>(Idx)) return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name); return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); } Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") { return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name); } Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name = "") { Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name); return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); } Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name = "") { Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name); return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); } Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name = "") { Value *Idxs[] = { ConstantInt::get(Type::getInt32Ty(Context), Idx0), ConstantInt::get(Type::getInt32Ty(Context), Idx1) }; if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name); return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); } Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name = "") { Value *Idxs[] = { ConstantInt::get(Type::getInt32Ty(Context), Idx0), ConstantInt::get(Type::getInt32Ty(Context), Idx1) }; if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name); return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); } Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") { Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idx), Name); return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idx), Name); } Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") { Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idx), Name); return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name); } Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1, const Twine &Name = "") { Value *Idxs[] = { ConstantInt::get(Type::getInt64Ty(Context), Idx0), ConstantInt::get(Type::getInt64Ty(Context), Idx1) }; if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idxs), Name); return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idxs), Name); } Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1, const Twine &Name = "") { Value *Idxs[] = { ConstantInt::get(Type::getInt64Ty(Context), Idx0), ConstantInt::get(Type::getInt64Ty(Context), Idx1) }; if (AllowFolding) if (Constant *PC = dyn_cast<Constant>(Ptr)) return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idxs), Name); return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idxs), Name); } Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, const Twine &Name = "") { return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name); } /// \brief Same as CreateGlobalString, but return a pointer with "i8*" type /// instead of a pointer to array of i8. Value *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "", unsigned AddressSpace = 0) { GlobalVariable *gv = CreateGlobalString(Str, Name, AddressSpace); Value *zero = ConstantInt::get(Type::getInt32Ty(Context), 0); Value *Args[] = { zero, zero }; return CreateInBoundsGEP(gv->getValueType(), gv, Args, Name); } //===--------------------------------------------------------------------===// // Instruction creation methods: Cast/Conversion Operators //===--------------------------------------------------------------------===// Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::Trunc, V, DestTy, Name); } Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::ZExt, V, DestTy, Name); } Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::SExt, V, DestTy, Name); } /// \brief Create a ZExt or Trunc from the integer value V to DestTy. Return /// the value untouched if the type of V is already DestTy. Value *CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name = "") { assert(V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"); Type *VTy = V->getType(); if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) return CreateZExt(V, DestTy, Name); if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) return CreateTrunc(V, DestTy, Name); return V; } /// \brief Create a SExt or Trunc from the integer value V to DestTy. Return /// the value untouched if the type of V is already DestTy. Value *CreateSExtOrTrunc(Value *V, Type *DestTy, const Twine &Name = "") { assert(V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"); Type *VTy = V->getType(); if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) return CreateSExt(V, DestTy, Name); if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) return CreateTrunc(V, DestTy, Name); return V; } Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){ return CreateCast(Instruction::FPToUI, V, DestTy, Name); } Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = ""){ return CreateCast(Instruction::FPToSI, V, DestTy, Name); } Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ return CreateCast(Instruction::UIToFP, V, DestTy, Name); } Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ return CreateCast(Instruction::SIToFP, V, DestTy, Name); } Value *CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::FPTrunc, V, DestTy, Name); } Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::FPExt, V, DestTy, Name); } Value *CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::PtrToInt, V, DestTy, Name); } Value *CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::IntToPtr, V, DestTy, Name); } Value *CreateBitCast(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::BitCast, V, DestTy, Name); } Value *CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name); } Value *CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name); return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name); } Value *CreateSExtOrBitCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name); return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name); } Value *CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name); return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name); } Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateCast(Op, VC, DestTy), Name); return Insert(CastInst::Create(Op, V, DestTy), Name); } Value *CreatePointerCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreatePointerCast(VC, DestTy), Name); return Insert(CastInst::CreatePointerCast(V, DestTy), Name); } Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) { return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy), Name); } return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy), Name); } Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name); return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name); } Value *CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (V->getType()->isPointerTy() && DestTy->isIntegerTy()) return CreatePtrToInt(V, DestTy, Name); if (V->getType()->isIntegerTy() && DestTy->isPointerTy()) return CreateIntToPtr(V, DestTy, Name); return CreateBitCast(V, DestTy, Name); } private: // \brief Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a // compile time error, instead of converting the string to bool for the // isSigned parameter. Value *CreateIntCast(Value *, Type *, const char *) = delete; public: Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) return V; if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(V)) return Insert(Folder.CreateFPCast(VC, DestTy), Name); return Insert(CastInst::CreateFPCast(V, DestTy), Name); } //===--------------------------------------------------------------------===// // Instruction creation methods: Compare Instructions //===--------------------------------------------------------------------===// Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name); } Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name); } Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name); } Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name); } Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name); } Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name); } Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name); } Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name); } Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name); } Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name); } Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag); } Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag); } Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name = "") { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateICmp(P, LC, RC), Name); return Insert(new ICmpInst(P, LHS, RHS), Name); } Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (AllowFolding) if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) return Insert(Folder.CreateFCmp(P, LC, RC), Name); return Insert(AddFPMathAttributes(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name); } //===--------------------------------------------------------------------===// // Instruction creation methods: Other Instructions //===--------------------------------------------------------------------===// PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name = "") { return Insert(PHINode::Create(Ty, NumReservedValues), Name); } CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None, const Twine &Name = "") { return Insert(CallInst::Create(Callee, Args), Name); } CallInst *CreateCall(llvm::FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args, const Twine &Name = "") { return Insert(CallInst::Create(FTy, Callee, Args), Name); } CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "") { return CreateCall(Callee->getFunctionType(), Callee, Args, Name); } Value *CreateSelect(Value *C, Value *True, Value *False, const Twine &Name = "") { if (AllowFolding) if (Constant *CC = dyn_cast<Constant>(C)) if (Constant *TC = dyn_cast<Constant>(True)) if (Constant *FC = dyn_cast<Constant>(False)) return Insert(Folder.CreateSelect(CC, TC, FC), Name); return Insert(SelectInst::Create(C, True, False), Name); } VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") { return Insert(new VAArgInst(List, Ty), Name); } Value *CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name = "") { if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(Vec)) if (Constant *IC = dyn_cast<Constant>(Idx)) return Insert(Folder.CreateExtractElement(VC, IC), Name); return Insert(ExtractElementInst::Create(Vec, Idx), Name); } Value *CreateExtractElement(Value *Vec, uint64_t Idx, const Twine &Name = "") { return CreateExtractElement(Vec, getInt64(Idx), Name); } Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, const Twine &Name = "") { if (AllowFolding) if (Constant *VC = dyn_cast<Constant>(Vec)) if (Constant *NC = dyn_cast<Constant>(NewElt)) if (Constant *IC = dyn_cast<Constant>(Idx)) return Insert(Folder.CreateInsertElement(VC, NC, IC), Name); return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name); } Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx, const Twine &Name = "") { return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name); } Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name = "") { if (AllowFolding) if (Constant *V1C = dyn_cast<Constant>(V1)) if (Constant *V2C = dyn_cast<Constant>(V2)) if (Constant *MC = dyn_cast<Constant>(Mask)) return Insert(Folder.CreateShuffleVector(V1C, V2C, MC), Name); return Insert(new ShuffleVectorInst(V1, V2, Mask), Name); } Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> IntMask, const Twine &Name = "") { size_t MaskSize = IntMask.size(); SmallVector<Constant*, 8> MaskVec(MaskSize); for (size_t i = 0; i != MaskSize; ++i) MaskVec[i] = getInt32(IntMask[i]); Value *Mask = ConstantVector::get(MaskVec); return CreateShuffleVector(V1, V2, Mask, Name); } Value *CreateExtractValue(Value *Agg, ArrayRef<unsigned> Idxs, const Twine &Name = "") { if (AllowFolding) if (Constant *AggC = dyn_cast<Constant>(Agg)) return Insert(Folder.CreateExtractValue(AggC, Idxs), Name); return Insert(ExtractValueInst::Create(Agg, Idxs), Name); } Value *CreateInsertValue(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const Twine &Name = "") { if (AllowFolding) if (Constant *AggC = dyn_cast<Constant>(Agg)) if (Constant *ValC = dyn_cast<Constant>(Val)) return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name); return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name); } LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses, const Twine &Name = "") { return Insert(LandingPadInst::Create(Ty, NumClauses), Name); } //===--------------------------------------------------------------------===// // Utility creation methods //===--------------------------------------------------------------------===// /// \brief Return an i1 value testing if \p Arg is null. Value *CreateIsNull(Value *Arg, const Twine &Name = "") { return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), Name); } /// \brief Return an i1 value testing if \p Arg is not null. Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") { return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), Name); } /// \brief Return the i64 difference between two pointer values, dividing out /// the size of the pointed-to objects. /// /// This is intended to implement C-style pointer subtraction. As such, the /// pointers must be appropriately aligned for their element types and /// pointing into the same object. Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "") { assert(LHS->getType() == RHS->getType() && "Pointer subtraction operand types must match!"); PointerType *ArgType = cast<PointerType>(LHS->getType()); Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context)); Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context)); Value *Difference = CreateSub(LHS_int, RHS_int); return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ArgType->getElementType()), Name); } /// \brief Return a vector value that contains \arg V broadcasted to \p /// NumElts elements. Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "") { assert(NumElts > 0 && "Cannot splat to an empty vector!"); // First insert it into an undef vector so we can shuffle it. Type *I32Ty = getInt32Ty(); Value *Undef = UndefValue::get(VectorType::get(V->getType(), NumElts)); V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0), Name + ".splatinsert"); // Shuffle the value across the desired number of elements. Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32Ty, NumElts)); return CreateShuffleVector(V, Undef, Zeros, Name + ".splat"); } /// \brief Return a value that has been extracted from a larger integer type. Value *CreateExtractInteger(const DataLayout &DL, Value *From, IntegerType *ExtractedTy, uint64_t Offset, const Twine &Name) { IntegerType *IntTy = cast<IntegerType>(From->getType()); assert(DL.getTypeStoreSize(ExtractedTy) + Offset <= DL.getTypeStoreSize(IntTy) && "Element extends past full value"); uint64_t ShAmt = 8 * Offset; Value *V = From; if (DL.isBigEndian()) ShAmt = 8 * (DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(ExtractedTy) - Offset); if (ShAmt) { V = CreateLShr(V, ShAmt, Name + ".shift"); } assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() && "Cannot extract to a larger integer!"); if (ExtractedTy != IntTy) { V = CreateTrunc(V, ExtractedTy, Name + ".trunc"); } return V; } /// \brief Create an assume intrinsic call that represents an alignment /// assumption on the provided pointer. /// /// An optional offset can be provided, and if it is provided, the offset /// must be subtracted from the provided pointer to get the pointer with the /// specified alignment. CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue = nullptr) { assert(isa<PointerType>(PtrValue->getType()) && "trying to create an alignment assumption on a non-pointer?"); PointerType *PtrTy = cast<PointerType>(PtrValue->getType()); Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace()); Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); Value *Mask = ConstantInt::get(IntPtrTy, Alignment > 0 ? Alignment - 1 : 0); if (OffsetValue) { bool IsOffsetZero = false; if (ConstantInt *CI = dyn_cast<ConstantInt>(OffsetValue)) IsOffsetZero = CI->isZero(); if (!IsOffsetZero) { if (OffsetValue->getType() != IntPtrTy) OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true, "offsetcast"); PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr"); } } Value *Zero = ConstantInt::get(IntPtrTy, 0); Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr"); Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond"); return CreateAssumption(InvCond); } }; // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef) } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/GVMaterializer.h
//===- GVMaterializer.h - Interface for GV materializers --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides an abstract interface for loading a module from some // place. This interface allows incremental or random access loading of // functions from the file. This is useful for applications like JIT compilers // or interprocedural optimizers that do not need the entire program in memory // at the same time. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_GVMATERIALIZER_H #define LLVM_IR_GVMATERIALIZER_H #include <system_error> #include <vector> #include "llvm/ADT/ArrayRef.h" // HLSL Change #include "llvm/ADT/StringRef.h" // HLSL Change namespace llvm { class Function; class GlobalValue; class Module; class StructType; class GVMaterializer { protected: GVMaterializer() {} public: virtual ~GVMaterializer(); /// True if GV has been materialized and can be dematerialized back to /// whatever backing store this GVMaterializer uses. virtual bool isDematerializable(const GlobalValue *GV) const = 0; /// Make sure the given GlobalValue is fully read. /// virtual std::error_code materialize(GlobalValue *GV) = 0; /// If the given GlobalValue is read in, and if the GVMaterializer supports /// it, release the memory for the GV, and set it up to be materialized /// lazily. If the Materializer doesn't support this capability, this method /// is a noop. /// virtual void dematerialize(GlobalValue *) {} /// Make sure the entire Module has been completely read. /// virtual std::error_code materializeModule(Module *M) = 0; virtual std::error_code materializeMetadata() = 0; virtual std::error_code materializeSelectNamedMetadata(llvm::ArrayRef<llvm::StringRef>) = 0; // HLSL Change virtual void setStripDebugInfo() = 0; virtual std::vector<StructType *> getIdentifiedStructTypes() const = 0; }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/ConstantRange.h
//===- ConstantRange.h - Represent a range ----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Represent a range of possible values that may occur when the program is run // for an integral value. This keeps track of a lower and upper bound for the // constant, which MAY wrap around the end of the numeric range. To do this, it // keeps track of a [lower, upper) bound, which specifies an interval just like // STL iterators. When used with boolean values, the following are important // ranges: : // // [F, F) = {} = Empty set // [T, F) = {T} // [F, T) = {F} // [T, T) = {F, T} = Full set // // The other integral ranges use min/max values for special range values. For // example, for 8-bit types, it uses: // [0, 0) = {} = Empty set // [255, 255) = {0..255} = Full Set // // Note that ConstantRange can be used to represent either signed or // unsigned ranges. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_CONSTANTRANGE_H #define LLVM_IR_CONSTANTRANGE_H #include "llvm/ADT/APInt.h" #include "llvm/IR/InstrTypes.h" #include "llvm/Support/DataTypes.h" namespace llvm { /// This class represents a range of values. /// class ConstantRange { APInt Lower, Upper; // If we have move semantics, pass APInts by value and move them into place. typedef APInt APIntMoveTy; public: /// Initialize a full (the default) or empty set for the specified bit width. /// explicit ConstantRange(uint32_t BitWidth, bool isFullSet = true); /// Initialize a range to hold the single specified value. /// ConstantRange(APIntMoveTy Value); /// @brief Initialize a range of values explicitly. This will assert out if /// Lower==Upper and Lower != Min or Max value for its type. It will also /// assert out if the two APInt's are not the same bit width. ConstantRange(APIntMoveTy Lower, APIntMoveTy Upper); /// Produce the smallest range such that all values that may satisfy the given /// predicate with any value contained within Other is contained in the /// returned range. Formally, this returns a superset of /// 'union over all y in Other . { x : icmp op x y is true }'. If the exact /// answer is not representable as a ConstantRange, the return value will be a /// proper superset of the above. /// /// Example: Pred = ult and Other = i8 [2, 5) returns Result = [0, 4) static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other); /// Produce the largest range such that all values in the returned range /// satisfy the given predicate with all values contained within Other. /// Formally, this returns a subset of /// 'intersection over all y in Other . { x : icmp op x y is true }'. If the /// exact answer is not representable as a ConstantRange, the return value /// will be a proper subset of the above. /// /// Example: Pred = ult and Other = i8 [2, 5) returns [0, 2) static ConstantRange makeSatisfyingICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other); /// Return the lower value for this range. /// const APInt &getLower() const { return Lower; } /// Return the upper value for this range. /// const APInt &getUpper() const { return Upper; } /// Get the bit width of this ConstantRange. /// uint32_t getBitWidth() const { return Lower.getBitWidth(); } /// Return true if this set contains all of the elements possible /// for this data-type. /// bool isFullSet() const; /// Return true if this set contains no members. /// bool isEmptySet() const; /// Return true if this set wraps around the top of the range. /// For example: [100, 8). /// bool isWrappedSet() const; /// Return true if this set wraps around the INT_MIN of /// its bitwidth. For example: i8 [120, 140). /// bool isSignWrappedSet() const; /// Return true if the specified value is in the set. /// bool contains(const APInt &Val) const; /// Return true if the other range is a subset of this one. /// bool contains(const ConstantRange &CR) const; /// If this set contains a single element, return it, otherwise return null. /// const APInt *getSingleElement() const { if (Upper == Lower + 1) return &Lower; return nullptr; } /// Return true if this set contains exactly one member. /// bool isSingleElement() const { return getSingleElement() != nullptr; } /// Return the number of elements in this set. /// APInt getSetSize() const; /// Return the largest unsigned value contained in the ConstantRange. /// APInt getUnsignedMax() const; /// Return the smallest unsigned value contained in the ConstantRange. /// APInt getUnsignedMin() const; /// Return the largest signed value contained in the ConstantRange. /// APInt getSignedMax() const; /// Return the smallest signed value contained in the ConstantRange. /// APInt getSignedMin() const; /// Return true if this range is equal to another range. /// bool operator==(const ConstantRange &CR) const { return Lower == CR.Lower && Upper == CR.Upper; } bool operator!=(const ConstantRange &CR) const { return !operator==(CR); } /// Subtract the specified constant from the endpoints of this constant range. ConstantRange subtract(const APInt &CI) const; /// \brief Subtract the specified range from this range (aka relative /// complement of the sets). ConstantRange difference(const ConstantRange &CR) const; /// Return the range that results from the intersection of /// this range with another range. The resultant range is guaranteed to /// include all elements contained in both input ranges, and to have the /// smallest possible set size that does so. Because there may be two /// intersections with the same set size, A.intersectWith(B) might not /// be equal to B.intersectWith(A). /// ConstantRange intersectWith(const ConstantRange &CR) const; /// Return the range that results from the union of this range /// with another range. The resultant range is guaranteed to include the /// elements of both sets, but may contain more. For example, [3, 9) union /// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included /// in either set before. /// ConstantRange unionWith(const ConstantRange &CR) const; /// Return a new range in the specified integer type, which must /// be strictly larger than the current type. The returned range will /// correspond to the possible range of values if the source range had been /// zero extended to BitWidth. ConstantRange zeroExtend(uint32_t BitWidth) const; /// Return a new range in the specified integer type, which must /// be strictly larger than the current type. The returned range will /// correspond to the possible range of values if the source range had been /// sign extended to BitWidth. ConstantRange signExtend(uint32_t BitWidth) const; /// Return a new range in the specified integer type, which must be /// strictly smaller than the current type. The returned range will /// correspond to the possible range of values if the source range had been /// truncated to the specified type. ConstantRange truncate(uint32_t BitWidth) const; /// Make this range have the bit width given by \p BitWidth. The /// value is zero extended, truncated, or left alone to make it that width. ConstantRange zextOrTrunc(uint32_t BitWidth) const; /// Make this range have the bit width given by \p BitWidth. The /// value is sign extended, truncated, or left alone to make it that width. ConstantRange sextOrTrunc(uint32_t BitWidth) const; /// Return a new range representing the possible values resulting /// from an addition of a value in this range and a value in \p Other. ConstantRange add(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from a subtraction of a value in this range and a value in \p Other. ConstantRange sub(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from a multiplication of a value in this range and a value in \p Other, /// treating both this and \p Other as unsigned ranges. ConstantRange multiply(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from a signed maximum of a value in this range and a value in \p Other. ConstantRange smax(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from an unsigned maximum of a value in this range and a value in \p Other. ConstantRange umax(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from an unsigned division of a value in this range and a value in /// \p Other. ConstantRange udiv(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from a binary-and of a value in this range by a value in \p Other. ConstantRange binaryAnd(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from a binary-or of a value in this range by a value in \p Other. ConstantRange binaryOr(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting /// from a left shift of a value in this range by a value in \p Other. /// TODO: This isn't fully implemented yet. ConstantRange shl(const ConstantRange &Other) const; /// Return a new range representing the possible values resulting from a /// logical right shift of a value in this range and a value in \p Other. ConstantRange lshr(const ConstantRange &Other) const; /// Return a new range that is the logical not of the current set. /// ConstantRange inverse() const; /// Print out the bounds to a stream. /// void print(raw_ostream &OS) const; /// Allow printing from a debugger easily. /// void dump() const; }; inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) { CR.print(OS); return OS; } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Dominators.h
//===- Dominators.h - Dominator Info Calculation ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the DominatorTree class, which provides fast and efficient // dominance queries. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DOMINATORS_H #define LLVM_IR_DOMINATORS_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Function.h" #include "llvm/Pass.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/GenericDomTree.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> namespace llvm { // FIXME: Replace this brittle forward declaration with the include of the new // PassManager.h when doing so doesn't break the PassManagerBuilder. template <typename IRUnitT> class AnalysisManager; class PreservedAnalyses; extern template class DomTreeNodeBase<BasicBlock>; extern template class DominatorTreeBase<BasicBlock>; extern template void Calculate<Function, BasicBlock *>( DominatorTreeBase<GraphTraits<BasicBlock *>::NodeType> &DT, Function &F); extern template void Calculate<Function, Inverse<BasicBlock *>>( DominatorTreeBase<GraphTraits<Inverse<BasicBlock *>>::NodeType> &DT, Function &F); typedef DomTreeNodeBase<BasicBlock> DomTreeNode; class BasicBlockEdge { const BasicBlock *Start; const BasicBlock *End; public: BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) : Start(Start_), End(End_) { } const BasicBlock *getStart() const { return Start; } const BasicBlock *getEnd() const { return End; } bool isSingleEdge() const; }; /// \brief Concrete subclass of DominatorTreeBase that is used to compute a /// normal dominator tree. class DominatorTree : public DominatorTreeBase<BasicBlock> { public: typedef DominatorTreeBase<BasicBlock> Base; DominatorTree() : DominatorTreeBase<BasicBlock>(false) {} DominatorTree(DominatorTree &&Arg) : Base(std::move(static_cast<Base &>(Arg))) {} DominatorTree &operator=(DominatorTree &&RHS) { Base::operator=(std::move(static_cast<Base &>(RHS))); return *this; } /// \brief Returns *false* if the other dominator tree matches this dominator /// tree. inline bool compare(const DominatorTree &Other) const { const DomTreeNode *R = getRootNode(); const DomTreeNode *OtherR = Other.getRootNode(); if (!R || !OtherR || R->getBlock() != OtherR->getBlock()) return true; if (Base::compare(Other)) return true; return false; } // Ensure base-class overloads are visible. using Base::dominates; /// \brief Return true if Def dominates a use in User. /// /// This performs the special checks necessary if Def and User are in the same /// basic block. Note that Def doesn't dominate a use in Def itself! bool dominates(const Instruction *Def, const Use &U) const; bool dominates(const Instruction *Def, const Instruction *User) const; bool dominates(const Instruction *Def, const BasicBlock *BB) const; bool dominates(const BasicBlockEdge &BBE, const Use &U) const; bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const; // Ensure base class overloads are visible. using Base::isReachableFromEntry; /// \brief Provide an overload for a Use. bool isReachableFromEntry(const Use &U) const; /// \brief Verify the correctness of the domtree by re-computing it. /// /// This should only be used for debugging as it aborts the program if the /// verification fails. void verifyDomTree() const; }; //===------------------------------------- // DominatorTree GraphTraits specializations so the DominatorTree can be // iterable by generic graph iterators. template <> struct GraphTraits<DomTreeNode*> { typedef DomTreeNode NodeType; typedef NodeType::iterator ChildIteratorType; static NodeType *getEntryNode(NodeType *N) { return N; } static inline ChildIteratorType child_begin(NodeType *N) { return N->begin(); } static inline ChildIteratorType child_end(NodeType *N) { return N->end(); } typedef df_iterator<DomTreeNode*> nodes_iterator; static nodes_iterator nodes_begin(DomTreeNode *N) { return df_begin(getEntryNode(N)); } static nodes_iterator nodes_end(DomTreeNode *N) { return df_end(getEntryNode(N)); } }; template <> struct GraphTraits<DominatorTree*> : public GraphTraits<DomTreeNode*> { static NodeType *getEntryNode(DominatorTree *DT) { return DT->getRootNode(); } static nodes_iterator nodes_begin(DominatorTree *N) { return df_begin(getEntryNode(N)); } static nodes_iterator nodes_end(DominatorTree *N) { return df_end(getEntryNode(N)); } }; /// \brief Analysis pass which computes a \c DominatorTree. class DominatorTreeAnalysis { public: /// \brief Provide the result typedef for this analysis pass. typedef DominatorTree Result; /// \brief Opaque, unique identifier for this analysis pass. static void *ID() { return (void *)&PassID; } /// \brief Run the analysis pass over a function and produce a dominator tree. DominatorTree run(Function &F); /// \brief Provide access to a name for this pass for debugging purposes. static StringRef name() { return "DominatorTreeAnalysis"; } private: static char PassID; }; /// \brief Printer pass for the \c DominatorTree. class DominatorTreePrinterPass { raw_ostream &OS; public: explicit DominatorTreePrinterPass(raw_ostream &OS); PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM); static StringRef name() { return "DominatorTreePrinterPass"; } }; /// \brief Verifier pass for the \c DominatorTree. struct DominatorTreeVerifierPass { PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM); static StringRef name() { return "DominatorTreeVerifierPass"; } }; /// \brief Legacy analysis pass which computes a \c DominatorTree. class DominatorTreeWrapperPass : public FunctionPass { DominatorTree DT; public: static char ID; DominatorTreeWrapperPass() : FunctionPass(ID) { initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry()); } DominatorTree &getDomTree() { return DT; } const DominatorTree &getDomTree() const { return DT; } bool runOnFunction(Function &F) override; void verifyAnalysis() const override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } void releaseMemory() override { DT.releaseMemory(); } void print(raw_ostream &OS, const Module *M = nullptr) const override; }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/User.h
//===-- llvm/User.h - User class definition ---------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This class defines the interface that one who uses a Value must implement. // Each instance of the Value class keeps track of what User's have handles // to it. // // * Instructions are the largest class of Users. // * Constants may be users of other constants (think arrays and stuff) // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_USER_H #define LLVM_IR_USER_H #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/Value.h" #include "llvm/Support/AlignOf.h" #include "llvm/Support/ErrorHandling.h" namespace llvm { /// \brief Compile-time customization of User operands. /// /// Customizes operand-related allocators and accessors. template <class> struct OperandTraits; class User : public Value { User(const User &) = delete; template <unsigned> friend struct HungoffOperandTraits; virtual void anchor(); protected: /// Allocate a User with an operand pointer co-allocated. /// /// This is used for subclasses which need to allocate a variable number /// of operands, ie, 'hung off uses'. void *operator new(size_t Size); /// Allocate a User with the operands co-allocated. /// /// This is used for subclasses which have a fixed number of operands. void *operator new(size_t Size, unsigned Us); User(Type *ty, unsigned vty, Use *OpList, unsigned NumOps) : Value(ty, vty) { assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands"); NumUserOperands = NumOps; // If we have hung off uses, then the operand list should initially be // null. assert((!HasHungOffUses || !getOperandList()) && "Error in initializing hung off uses for User"); } /// \brief Allocate the array of Uses, followed by a pointer /// (with bottom bit set) to the User. /// \param IsPhi identifies callers which are phi nodes and which need /// N BasicBlock* allocated along with N void allocHungoffUses(unsigned N, bool IsPhi = false); /// \brief Grow the number of hung off uses. Note that allocHungoffUses /// should be called if there are no uses. void growHungoffUses(unsigned N, bool IsPhi = false); public: ~User() override { } /// \brief Free memory allocated for User and Use objects. void operator delete(void *Usr); /// \brief Placement delete - required by std, but never called. void operator delete(void*, unsigned); // llvm_unreachable("Constructor throws?"); - HLSL Change: it does on OOM /// \brief Placement delete - required by std, but never called. void operator delete(void*, unsigned, bool) { llvm_unreachable("Constructor throws?"); } protected: template <int Idx, typename U> static Use &OpFrom(const U *that) { return Idx < 0 ? OperandTraits<U>::op_end(const_cast<U*>(that))[Idx] : OperandTraits<U>::op_begin(const_cast<U*>(that))[Idx]; } template <int Idx> Use &Op() { return OpFrom<Idx>(this); } template <int Idx> const Use &Op() const { return OpFrom<Idx>(this); } private: Use *&getHungOffOperands() { return *(reinterpret_cast<Use **>(this) - 1); } Use *getIntrusiveOperands() { return reinterpret_cast<Use *>(this) - NumUserOperands; } void setOperandList(Use *NewList) { assert(HasHungOffUses && "Setting operand list only required for hung off uses"); getHungOffOperands() = NewList; } public: Use *getOperandList() { return HasHungOffUses ? getHungOffOperands() : getIntrusiveOperands(); } const Use *getOperandList() const { return const_cast<User *>(this)->getOperandList(); } Value *getOperand(unsigned i) const { assert(i < NumUserOperands && "getOperand() out of range!"); return getOperandList()[i]; } void setOperand(unsigned i, Value *Val) { assert(i < NumUserOperands && "setOperand() out of range!"); assert((!isa<Constant>((const Value*)this) || isa<GlobalValue>((const Value*)this)) && "Cannot mutate a constant with setOperand!"); getOperandList()[i] = Val; } const Use &getOperandUse(unsigned i) const { assert(i < NumUserOperands && "getOperandUse() out of range!"); return getOperandList()[i]; } Use &getOperandUse(unsigned i) { assert(i < NumUserOperands && "getOperandUse() out of range!"); return getOperandList()[i]; } unsigned getNumOperands() const { return NumUserOperands; } /// Set the number of operands on a GlobalVariable. /// /// GlobalVariable always allocates space for a single operands, but /// doesn't always use it. /// /// FIXME: As that the number of operands is used to find the start of /// the allocated memory in operator delete, we need to always think we have /// 1 operand before delete. void setGlobalVariableNumOperands(unsigned NumOps) { assert(NumOps <= 1 && "GlobalVariable can only have 0 or 1 operands"); NumUserOperands = NumOps; } /// Set the number of operands on a Function. /// /// Function always allocates space for a single operands, but /// doesn't always use it. /// /// FIXME: As that the number of operands is used to find the start of /// the allocated memory in operator delete, we need to always think we have /// 1 operand before delete. void setFunctionNumOperands(unsigned NumOps) { assert(NumOps <= 1 && "Function can only have 0 or 1 operands"); NumUserOperands = NumOps; } /// \brief Subclasses with hung off uses need to manage the operand count /// themselves. In these instances, the operand count isn't used to find the /// OperandList, so there's no issue in having the operand count change. void setNumHungOffUseOperands(unsigned NumOps) { assert(HasHungOffUses && "Must have hung off uses to use this method"); assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands"); NumUserOperands = NumOps; } // --------------------------------------------------------------------------- // Operand Iterator interface... // typedef Use* op_iterator; typedef const Use* const_op_iterator; typedef iterator_range<op_iterator> op_range; typedef iterator_range<const_op_iterator> const_op_range; op_iterator op_begin() { return getOperandList(); } const_op_iterator op_begin() const { return getOperandList(); } op_iterator op_end() { return getOperandList() + NumUserOperands; } const_op_iterator op_end() const { return getOperandList() + NumUserOperands; } op_range operands() { return op_range(op_begin(), op_end()); } const_op_range operands() const { return const_op_range(op_begin(), op_end()); } /// \brief Iterator for directly iterating over the operand Values. struct value_op_iterator : iterator_adaptor_base<value_op_iterator, op_iterator, std::random_access_iterator_tag, Value *, ptrdiff_t, Value *, Value *> { explicit value_op_iterator(Use *U = nullptr) : iterator_adaptor_base(U) {} Value *operator*() const { return *I; } Value *operator->() const { return operator*(); } }; value_op_iterator value_op_begin() { return value_op_iterator(op_begin()); } value_op_iterator value_op_end() { return value_op_iterator(op_end()); } iterator_range<value_op_iterator> operand_values() { return iterator_range<value_op_iterator>(value_op_begin(), value_op_end()); } /// \brief Drop all references to operands. /// /// This function is in charge of "letting go" of all objects that this User /// refers to. This allows one to 'delete' a whole class at a time, even /// though there may be circular references... First all references are /// dropped, and all use counts go to zero. Then everything is deleted for /// real. Note that no operations are valid on an object that has "dropped /// all references", except operator delete. void dropAllReferences() { for (Use &U : operands()) U.set(nullptr); } /// \brief Replace uses of one Value with another. /// /// Replaces all references to the "From" definition with references to the /// "To" definition. void replaceUsesOfWith(Value *From, Value *To); // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Value *V) { return isa<Instruction>(V) || isa<Constant>(V); } }; // Either Use objects, or a Use pointer can be prepended to User. // HLSL Change Starts - comment out static asserts, as they are causing errors //static_assert(AlignOf<Use>::Alignment >= AlignOf<User>::Alignment, // "Alignment is insufficient after objects prepended to User"); //static_assert(AlignOf<Use *>::Alignment >= AlignOf<User>::Alignment, // "Alignment is insufficient after objects prepended to User"); // HLSL Change Ends template<> struct simplify_type<User::op_iterator> { typedef Value* SimpleType; static SimpleType getSimplifiedValue(User::op_iterator &Val) { return Val->get(); } }; template<> struct simplify_type<User::const_op_iterator> { typedef /*const*/ Value* SimpleType; static SimpleType getSimplifiedValue(User::const_op_iterator &Val) { return Val->get(); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/ValueHandle.h
//===- ValueHandle.h - Value Smart Pointer classes --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the ValueHandle class and its sub-classes. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_VALUEHANDLE_H #define LLVM_IR_VALUEHANDLE_H #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/IR/Value.h" namespace llvm { class ValueHandleBase; template<typename From> struct simplify_type; // ValueHandleBase** is only 4-byte aligned. template<> class PointerLikeTypeTraits<ValueHandleBase**> { public: static inline void *getAsVoidPointer(ValueHandleBase** P) { return P; } static inline ValueHandleBase **getFromVoidPointer(void *P) { return static_cast<ValueHandleBase**>(P); } enum { NumLowBitsAvailable = 2 }; }; /// \brief This is the common base class of value handles. /// /// ValueHandle's are smart pointers to Value's that have special behavior when /// the value is deleted or ReplaceAllUsesWith'd. See the specific handles /// below for details. class ValueHandleBase { friend class Value; protected: /// \brief This indicates what sub class the handle actually is. /// /// This is to avoid having a vtable for the light-weight handle pointers. The /// fully general Callback version does have a vtable. enum HandleBaseKind { Assert, Callback, Weak, WeakTracking }; private: PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair; ValueHandleBase *Next; Value *Val; void setValPtr(Value *V) { Val = V; } ValueHandleBase(const ValueHandleBase&) = delete; public: explicit ValueHandleBase(HandleBaseKind Kind) : PrevPair(nullptr, Kind), Next(nullptr), Val(nullptr) {} ValueHandleBase(HandleBaseKind Kind, Value *V) : PrevPair(nullptr, Kind), Next(nullptr), Val(V) { if (isValid(getValPtr())) AddToUseList(); } ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS) : PrevPair(nullptr, Kind), Next(nullptr), Val(RHS.getValPtr()) { if (isValid(getValPtr())) AddToExistingUseList(RHS.getPrevPtr()); } ~ValueHandleBase() { if (isValid(getValPtr())) RemoveFromUseList(); } Value *operator=(Value *RHS) { if (getValPtr() == RHS) return RHS; if (isValid(getValPtr())) RemoveFromUseList(); setValPtr(RHS); if (isValid(getValPtr())) AddToUseList(); return RHS; } Value *operator=(const ValueHandleBase &RHS) { if (getValPtr() == RHS.getValPtr()) return RHS.getValPtr(); if (isValid(getValPtr())) RemoveFromUseList(); setValPtr(RHS.getValPtr()); if (isValid(getValPtr())) AddToExistingUseList(RHS.getPrevPtr()); return getValPtr(); } Value *operator->() const { return getValPtr(); } Value &operator*() const { return *getValPtr(); } protected: Value *getValPtr() const { return Val; } static bool isValid(Value *V) { return V && V != DenseMapInfo<Value *>::getEmptyKey() && V != DenseMapInfo<Value *>::getTombstoneKey(); } public: // Callbacks made from Value. static void ValueIsDeleted(Value *V); static void ValueIsRAUWd(Value *Old, Value *New); private: // Internal implementation details. ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); } HandleBaseKind getKind() const { return PrevPair.getInt(); } void setPrevPtr(ValueHandleBase **Ptr) { PrevPair.setPointer(Ptr); } /// \brief Add this ValueHandle to the use list for V. /// /// List is the address of either the head of the list or a Next node within /// the existing use list. void AddToExistingUseList(ValueHandleBase **List); /// \brief Add this ValueHandle to the use list after Node. void AddToExistingUseListAfter(ValueHandleBase *Node); /// \brief Add this ValueHandle to the use list for V. void AddToUseList(); /// \brief Remove this ValueHandle from its current use list. void RemoveFromUseList(); }; /// \brief A nullable Value handle that is nullable. /// /// This is a value handle that points to a value, and nulls itself /// out if that value is deleted. class WeakVH : public ValueHandleBase { public: WeakVH() : ValueHandleBase(Weak) {} WeakVH(Value *P) : ValueHandleBase(Weak, P) {} WeakVH(const WeakVH &RHS) : ValueHandleBase(Weak, RHS) {} WeakVH &operator=(const WeakVH &RHS) = default; Value *operator=(Value *RHS) { return ValueHandleBase::operator=(RHS); } Value *operator=(const ValueHandleBase &RHS) { return ValueHandleBase::operator=(RHS); } operator Value *() const { return getValPtr(); } }; // Specialize simplify_type to allow WeakVH to participate in // dyn_cast, isa, etc. template <> struct simplify_type<WeakVH> { typedef Value *SimpleType; static SimpleType getSimplifiedValue(WeakVH &WVH) { return WVH; } }; template <> struct simplify_type<const WeakVH> { typedef Value *SimpleType; static SimpleType getSimplifiedValue(const WeakVH &WVH) { return WVH; } }; /// \brief Value handle that is nullable, but tries to track the Value. /// /// This is a value handle that tries hard to point to a Value, even across /// RAUW operations, but will null itself out if the value is destroyed. this /// is useful for advisory sorts of information, but should not be used as the /// key of a map (since the map would have to rearrange itself when the pointer /// changes). class WeakTrackingVH : public ValueHandleBase { public: WeakTrackingVH() : ValueHandleBase(WeakTracking) {} WeakTrackingVH(Value *P) : ValueHandleBase(WeakTracking, P) {} WeakTrackingVH(const WeakTrackingVH &RHS) : ValueHandleBase(WeakTracking, RHS) {} WeakTrackingVH &operator=(const WeakTrackingVH &RHS) = default; Value *operator=(Value *RHS) { return ValueHandleBase::operator=(RHS); } Value *operator=(const ValueHandleBase &RHS) { return ValueHandleBase::operator=(RHS); } operator Value*() const { return getValPtr(); } bool pointsToAliveValue() const { return ValueHandleBase::isValid(getValPtr()); } }; // Specialize simplify_type to allow WeakTrackingVH to participate in // dyn_cast, isa, etc. template <> struct simplify_type<WeakTrackingVH> { typedef Value *SimpleType; static SimpleType getSimplifiedValue(WeakTrackingVH &WVH) { return WVH; } }; template <> struct simplify_type<const WeakTrackingVH> { typedef Value *SimpleType; static SimpleType getSimplifiedValue(const WeakTrackingVH &WVH) { return WVH; } }; /// \brief Value handle that asserts if the Value is deleted. /// /// This is a Value Handle that points to a value and asserts out if the value /// is destroyed while the handle is still live. This is very useful for /// catching dangling pointer bugs and other things which can be non-obvious. /// One particularly useful place to use this is as the Key of a map. Dangling /// pointer bugs often lead to really subtle bugs that only occur if another /// object happens to get allocated to the same address as the old one. Using /// an AssertingVH ensures that an assert is triggered as soon as the bad /// delete occurs. /// /// Note that an AssertingVH handle does *not* follow values across RAUW /// operations. This means that RAUW's need to explicitly update the /// AssertingVH's as it moves. This is required because in non-assert mode this /// class turns into a trivial wrapper around a pointer. template <typename ValueTy> class AssertingVH #ifndef NDEBUG : public ValueHandleBase #endif { friend struct DenseMapInfo<AssertingVH<ValueTy> >; #ifndef NDEBUG Value *getRawValPtr() const { return ValueHandleBase::getValPtr(); } void setRawValPtr(Value *P) { ValueHandleBase::operator=(P); } #else Value *ThePtr; Value *getRawValPtr() const { return ThePtr; } void setRawValPtr(Value *P) { ThePtr = P; } #endif // Convert a ValueTy*, which may be const, to the raw Value*. static Value *GetAsValue(Value *V) { return V; } static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); } ValueTy *getValPtr() const { return static_cast<ValueTy *>(getRawValPtr()); } void setValPtr(ValueTy *P) { setRawValPtr(GetAsValue(P)); } public: #ifndef NDEBUG AssertingVH() : ValueHandleBase(Assert) {} AssertingVH(ValueTy *P) : ValueHandleBase(Assert, GetAsValue(P)) {} AssertingVH(const AssertingVH &RHS) : ValueHandleBase(Assert, RHS) {} #else AssertingVH() : ThePtr(nullptr) {} AssertingVH(ValueTy *P) : ThePtr(GetAsValue(P)) {} AssertingVH(const AssertingVH<ValueTy> &) = default; #endif operator ValueTy*() const { return getValPtr(); } ValueTy *operator=(ValueTy *RHS) { setValPtr(RHS); return getValPtr(); } ValueTy *operator=(const AssertingVH<ValueTy> &RHS) { setValPtr(RHS.getValPtr()); return getValPtr(); } ValueTy *operator->() const { return getValPtr(); } ValueTy &operator*() const { return *getValPtr(); } }; // Specialize DenseMapInfo to allow AssertingVH to participate in DenseMap. template<typename T> struct DenseMapInfo<AssertingVH<T> > { static inline AssertingVH<T> getEmptyKey() { AssertingVH<T> Res; Res.setRawValPtr(DenseMapInfo<Value *>::getEmptyKey()); return Res; } static inline AssertingVH<T> getTombstoneKey() { AssertingVH<T> Res; Res.setRawValPtr(DenseMapInfo<Value *>::getTombstoneKey()); return Res; } static unsigned getHashValue(const AssertingVH<T> &Val) { return DenseMapInfo<Value *>::getHashValue(Val.getRawValPtr()); } static bool isEqual(const AssertingVH<T> &LHS, const AssertingVH<T> &RHS) { return DenseMapInfo<Value *>::isEqual(LHS.getRawValPtr(), RHS.getRawValPtr()); } }; template <typename T> struct isPodLike<AssertingVH<T> > { #ifdef NDEBUG static const bool value = true; #else static const bool value = false; #endif }; /// \brief Value handle that tracks a Value across RAUW. /// /// TrackingVH is designed for situations where a client needs to hold a handle /// to a Value (or subclass) across some operations which may move that value, /// but should never destroy it or replace it with some unacceptable type. /// /// It is an error to attempt to replace a value with one of a type which is /// incompatible with any of its outstanding TrackingVHs. /// /// It is an error to read from a TrackingVH that does not point to a valid /// value. A TrackingVH is said to not point to a valid value if either it /// hasn't yet been assigned a value yet or because the value it was tracking /// has since been deleted. /// /// Assigning a value to a TrackingVH is always allowed, even if said TrackingVH /// no longer points to a valid value. template <typename ValueTy> class TrackingVH { WeakTrackingVH InnerHandle; public: ValueTy *getValPtr() const { // HLSL Change begin // The original upstream change will assert here when accessing a TrackingVH // is deleted. // // However, the llvm code that DXC forked has the implicit code like: // TrackingVH V = nullptr; // // It will invoke setValPtr(nullptr) and then getValPtr(nullptr). So pull in // the original upstream change in DXC will always assert here for debug // build even this code is valid. // // The original upstream change works because of another upstream change // https://github.com/llvm/llvm-project/commit/70a6051ddfd5f04777f2bc42503bb11bc8f1723a // cleaned up the problematic code in DXC already. // // Untill we decide to pull that upstream change into DXC, DXC should follow // the original TrackingVH implementation. return Null is always ok here // instead of assert it. if (InnerHandle.operator llvm::Value *() == nullptr) return nullptr; // HLSL Change end. assert(InnerHandle.pointsToAliveValue() && "TrackingVH must be non-null and valid on dereference!"); // Check that the value is a member of the correct subclass. We would like // to check this property on assignment for better debugging, but we don't // want to require a virtual interface on this VH. Instead we allow RAUW to // replace this value with a value of an invalid type, and check it here. assert(isa<ValueTy>(InnerHandle) && "Tracked Value was replaced by one with an invalid type!"); return cast<ValueTy>(InnerHandle); } void setValPtr(ValueTy *P) { // Assigning to non-valid TrackingVH's are fine so we just unconditionally // assign here. InnerHandle = GetAsValue(P); } // Convert a ValueTy*, which may be const, to the type the base // class expects. static Value *GetAsValue(Value *V) { return V; } static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); } public: TrackingVH() {} TrackingVH(ValueTy *P) { setValPtr(P); } TrackingVH(const TrackingVH &RHS) { setValPtr(RHS.getValPtr()); } // HLSL Change operator ValueTy*() const { return getValPtr(); } ValueTy *operator=(ValueTy *RHS) { setValPtr(RHS); return getValPtr(); } ValueTy *operator=(const TrackingVH<ValueTy> &RHS) { setValPtr(RHS.getValPtr()); return getValPtr(); } ValueTy *operator->() const { return getValPtr(); } ValueTy &operator*() const { return *getValPtr(); } }; /// \brief Value handle with callbacks on RAUW and destruction. /// /// This is a value handle that allows subclasses to define callbacks that run /// when the underlying Value has RAUW called on it or is destroyed. This /// class can be used as the key of a map, as long as the user takes it out of /// the map before calling setValPtr() (since the map has to rearrange itself /// when the pointer changes). Unlike ValueHandleBase, this class has a vtable /// and a virtual destructor. class CallbackVH : public ValueHandleBase { virtual void anchor(); protected: CallbackVH(const CallbackVH &RHS) : ValueHandleBase(Callback, RHS) {} virtual ~CallbackVH() {} void setValPtr(Value *P) { ValueHandleBase::operator=(P); } public: CallbackVH() : ValueHandleBase(Callback) {} CallbackVH(Value *P) : ValueHandleBase(Callback, P) {} operator Value*() const { return getValPtr(); } /// \brief Callback for Value destruction. /// /// Called when this->getValPtr() is destroyed, inside ~Value(), so you /// may call any non-virtual Value method on getValPtr(), but no subclass /// methods. If WeakTrackingVH were implemented as a CallbackVH, it would use /// this /// method to call setValPtr(NULL). AssertingVH would use this method to /// cause an assertion failure. /// /// All implementations must remove the reference from this object to the /// Value that's being destroyed. virtual void deleted() { setValPtr(nullptr); } /// \brief Callback for Value RAUW. /// /// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called, /// _before_ any of the uses have actually been replaced. If WeakTrackingVH /// were /// implemented as a CallbackVH, it would use this method to call /// setValPtr(new_value). AssertingVH would do nothing in this method. virtual void allUsesReplacedWith(Value *) {} }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Function.h
//===-- llvm/Function.h - Class to represent a single function --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of the Function class, which represents a // single function/procedure in LLVM. // // A function basically consists of a list of basic blocks, a list of arguments, // and a symbol table. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_FUNCTION_H #define LLVM_IR_FUNCTION_H #include "llvm/ADT/iterator_range.h" #include "llvm/ADT/Optional.h" #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/GlobalObject.h" #include "llvm/IR/OperandTraits.h" #include "llvm/Support/Compiler.h" namespace llvm { class FunctionType; class LLVMContext; template<> struct ilist_traits<Argument> : public SymbolTableListTraits<Argument, Function> { // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends Argument * createSentinel() const { return static_cast<Argument*>(&Sentinel); } static void destroySentinel(Argument*) {} Argument *provideInitialHead() const { return createSentinel(); } Argument *ensureHead(Argument*) const { return createSentinel(); } static void noteHead(Argument*, Argument*) {} static ValueSymbolTable *getSymTab(Function *ItemParent); private: mutable ilist_half_node<Argument> Sentinel; }; class Function : public GlobalObject, public ilist_node<Function> { public: typedef iplist<Argument> ArgumentListType; typedef iplist<BasicBlock> BasicBlockListType; // BasicBlock iterators... typedef BasicBlockListType::iterator iterator; typedef BasicBlockListType::const_iterator const_iterator; typedef ArgumentListType::iterator arg_iterator; typedef ArgumentListType::const_iterator const_arg_iterator; private: // Important things that make up a function! BasicBlockListType BasicBlocks; ///< The basic blocks mutable ArgumentListType ArgumentList; ///< The formal arguments std::unique_ptr<ValueSymbolTable> SymTab; ///< Symbol table of args/instructions // HLSL Change: use unique_ptr AttributeSet AttributeSets; ///< Parameter attributes FunctionType *Ty; /* * Value::SubclassData * * bit 0 : HasLazyArguments * bit 1 : HasPrefixData * bit 2 : HasPrologueData * bit 3-6: CallingConvention */ /// Bits from GlobalObject::GlobalObjectSubclassData. enum { /// Whether this function is materializable. IsMaterializableBit = 1 << 0, HasMetadataHashEntryBit = 1 << 1 }; void setGlobalObjectBit(unsigned Mask, bool Value) { setGlobalObjectSubClassData((~Mask & getGlobalObjectSubClassData()) | (Value ? Mask : 0u)); } friend class SymbolTableListTraits<Function, Module>; void setParent(Module *parent); /// hasLazyArguments/CheckLazyArguments - The argument list of a function is /// built on demand, so that the list isn't allocated until the first client /// needs it. The hasLazyArguments predicate returns true if the arg list /// hasn't been set up yet. bool hasLazyArguments() const { return getSubclassDataFromValue() & (1<<0); } void CheckLazyArguments() const { if (hasLazyArguments()) BuildLazyArguments(); } void BuildLazyArguments() const; Function(const Function&) = delete; void operator=(const Function&) = delete; /// Function ctor - If the (optional) Module argument is specified, the /// function is automatically inserted into the end of the function list for /// the module. /// Function(FunctionType *Ty, LinkageTypes Linkage, const Twine &N = "", Module *M = nullptr); public: static Function *Create(FunctionType *Ty, LinkageTypes Linkage, const Twine &N = "", Module *M = nullptr) { return new(1) Function(Ty, Linkage, N, M); } ~Function() override; /// \brief Provide fast operand accessors DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); /// \brief Get the personality function associated with this function. bool hasPersonalityFn() const { return getNumOperands() != 0; } Constant *getPersonalityFn() const { assert(hasPersonalityFn()); return cast<Constant>(Op<0>()); } void setPersonalityFn(Constant *C); Type *getReturnType() const; // Return the type of the ret val FunctionType *getFunctionType() const; // Return the FunctionType for me /// getContext - Return a reference to the LLVMContext associated with this /// function. LLVMContext &getContext() const; /// isVarArg - Return true if this function takes a variable number of /// arguments. bool isVarArg() const; bool isMaterializable() const; void setIsMaterializable(bool V); /// getIntrinsicID - This method returns the ID number of the specified /// function, or Intrinsic::not_intrinsic if the function is not an /// intrinsic, or if the pointer is null. This value is always defined to be /// zero to allow easy checking for whether a function is intrinsic or not. /// The particular intrinsic functions which correspond to this value are /// defined in llvm/Intrinsics.h. Intrinsic::ID getIntrinsicID() const LLVM_READONLY { return IntID; } bool isIntrinsic() const { return getName().startswith("llvm."); } /// \brief Recalculate the ID for this function if it is an Intrinsic defined /// in llvm/Intrinsics.h. Sets the intrinsic ID to Intrinsic::not_intrinsic /// if the name of this function does not match an intrinsic in that header. /// Note, this method does not need to be called directly, as it is called /// from Value::setName() whenever the name of this function changes. void recalculateIntrinsicID(); /// getCallingConv()/setCallingConv(CC) - These method get and set the /// calling convention of this function. The enum values for the known /// calling conventions are defined in CallingConv.h. CallingConv::ID getCallingConv() const { return static_cast<CallingConv::ID>(getSubclassDataFromValue() >> 3); } void setCallingConv(CallingConv::ID CC) { setValueSubclassData((getSubclassDataFromValue() & 7) | (static_cast<unsigned>(CC) << 3)); } /// @brief Return the attribute list for this Function. AttributeSet getAttributes() const { return AttributeSets; } /// @brief Set the attribute list for this Function. void setAttributes(AttributeSet attrs) { AttributeSets = attrs; } /// @brief Add function attributes to this function. void addFnAttr(Attribute::AttrKind N) { setAttributes(AttributeSets.addAttribute(getContext(), AttributeSet::FunctionIndex, N)); } /// @brief Remove function attributes from this function. void removeFnAttr(Attribute::AttrKind N) { setAttributes(AttributeSets.removeAttribute( getContext(), AttributeSet::FunctionIndex, N)); } /// @brief Add function attributes to this function. void addFnAttr(StringRef Kind) { setAttributes( AttributeSets.addAttribute(getContext(), AttributeSet::FunctionIndex, Kind)); } void addFnAttr(StringRef Kind, StringRef Value) { setAttributes( AttributeSets.addAttribute(getContext(), AttributeSet::FunctionIndex, Kind, Value)); } /// Set the entry count for this function. void setEntryCount(uint64_t Count); /// Get the entry count for this function. Optional<uint64_t> getEntryCount() const; /// @brief Return true if the function has the attribute. bool hasFnAttribute(Attribute::AttrKind Kind) const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Kind); } bool hasFnAttribute(StringRef Kind) const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Kind); } /// @brief Return the attribute for the given attribute kind. Attribute getFnAttribute(Attribute::AttrKind Kind) const { return AttributeSets.getAttribute(AttributeSet::FunctionIndex, Kind); } Attribute getFnAttribute(StringRef Kind) const { return AttributeSets.getAttribute(AttributeSet::FunctionIndex, Kind); } /// \brief Return the stack alignment for the function. unsigned getFnStackAlignment() const { return AttributeSets.getStackAlignment(AttributeSet::FunctionIndex); } /// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm /// to use during code generation. bool hasGC() const; const char *getGC() const; void setGC(const char *Str); void clearGC(); /// @brief adds the attribute to the list of attributes. void addAttribute(unsigned i, Attribute::AttrKind attr); /// @brief adds the attributes to the list of attributes. void addAttributes(unsigned i, AttributeSet attrs); /// @brief removes the attributes from the list of attributes. void removeAttributes(unsigned i, AttributeSet attr); /// @brief adds the dereferenceable attribute to the list of attributes. void addDereferenceableAttr(unsigned i, uint64_t Bytes); /// @brief adds the dereferenceable_or_null attribute to the list of /// attributes. void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes); /// @brief Extract the alignment for a call or parameter (0=unknown). unsigned getParamAlignment(unsigned i) const { return AttributeSets.getParamAlignment(i); } /// @brief Extract the number of dereferenceable bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableBytes(unsigned i) const { return AttributeSets.getDereferenceableBytes(i); } /// @brief Extract the number of dereferenceable_or_null bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableOrNullBytes(unsigned i) const { return AttributeSets.getDereferenceableOrNullBytes(i); } /// @brief Determine if the function does not access memory. bool doesNotAccessMemory() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone); } void setDoesNotAccessMemory() { addFnAttr(Attribute::ReadNone); } /// @brief Determine if the function does not access or only reads memory. bool onlyReadsMemory() const { return doesNotAccessMemory() || AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly); } void setOnlyReadsMemory() { addFnAttr(Attribute::ReadOnly); } /// @brief Determine if the call can access memmory only using pointers based /// on its arguments. bool onlyAccessesArgMemory() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly); } void setOnlyAccessesArgMemory() { addFnAttr(Attribute::ArgMemOnly); } /// @brief Determine if the function cannot return. bool doesNotReturn() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn); } void setDoesNotReturn() { addFnAttr(Attribute::NoReturn); } /// @brief Determine if the function cannot unwind. bool doesNotThrow() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind); } void setDoesNotThrow() { addFnAttr(Attribute::NoUnwind); } /// @brief Determine if the call cannot be duplicated. bool cannotDuplicate() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate); } void setCannotDuplicate() { addFnAttr(Attribute::NoDuplicate); } /// @brief Determine if the call is convergent. bool isConvergent() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::Convergent); } void setConvergent() { addFnAttr(Attribute::Convergent); } /// @brief True if the ABI mandates (or the user requested) that this /// function be in a unwind table. bool hasUWTable() const { return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Attribute::UWTable); } void setHasUWTable() { addFnAttr(Attribute::UWTable); } /// @brief True if this function needs an unwind table. bool needsUnwindTableEntry() const { return hasUWTable() || !doesNotThrow(); } /// @brief Determine if the function returns a structure through first /// pointer argument. bool hasStructRetAttr() const { return AttributeSets.hasAttribute(1, Attribute::StructRet) || AttributeSets.hasAttribute(2, Attribute::StructRet); } /// @brief Determine if the parameter does not alias other parameters. /// @param n The parameter to check. 1 is the first parameter, 0 is the return bool doesNotAlias(unsigned n) const { return AttributeSets.hasAttribute(n, Attribute::NoAlias); } void setDoesNotAlias(unsigned n) { addAttribute(n, Attribute::NoAlias); } /// @brief Determine if the parameter can be captured. /// @param n The parameter to check. 1 is the first parameter, 0 is the return bool doesNotCapture(unsigned n) const { return AttributeSets.hasAttribute(n, Attribute::NoCapture); } void setDoesNotCapture(unsigned n) { addAttribute(n, Attribute::NoCapture); } bool doesNotAccessMemory(unsigned n) const { return AttributeSets.hasAttribute(n, Attribute::ReadNone); } void setDoesNotAccessMemory(unsigned n) { addAttribute(n, Attribute::ReadNone); } bool onlyReadsMemory(unsigned n) const { return doesNotAccessMemory(n) || AttributeSets.hasAttribute(n, Attribute::ReadOnly); } void setOnlyReadsMemory(unsigned n) { addAttribute(n, Attribute::ReadOnly); } /// copyAttributesFrom - copy all additional attributes (those not needed to /// create a Function) from the Function Src to this one. void copyAttributesFrom(const GlobalValue *Src) override; /// deleteBody - This method deletes the body of the function, and converts /// the linkage to external. /// void deleteBody() { dropAllReferences(); setLinkage(ExternalLinkage); } /// removeFromParent - This method unlinks 'this' from the containing module, /// but does not delete it. /// void removeFromParent() override; /// eraseFromParent - This method unlinks 'this' from the containing module /// and deletes it. /// void eraseFromParent() override; /// Get the underlying elements of the Function... the basic block list is /// empty for external functions. /// const ArgumentListType &getArgumentList() const { CheckLazyArguments(); return ArgumentList; } ArgumentListType &getArgumentList() { CheckLazyArguments(); return ArgumentList; } static iplist<Argument> Function::*getSublistAccess(Argument*) { return &Function::ArgumentList; } const BasicBlockListType &getBasicBlockList() const { return BasicBlocks; } BasicBlockListType &getBasicBlockList() { return BasicBlocks; } static iplist<BasicBlock> Function::*getSublistAccess(BasicBlock*) { return &Function::BasicBlocks; } const BasicBlock &getEntryBlock() const { return front(); } BasicBlock &getEntryBlock() { return front(); } //===--------------------------------------------------------------------===// // Symbol Table Accessing functions... /// getSymbolTable() - Return the symbol table... /// inline ValueSymbolTable &getValueSymbolTable() { return *SymTab; } inline const ValueSymbolTable &getValueSymbolTable() const { return *SymTab; } //===--------------------------------------------------------------------===// // BasicBlock iterator forwarding functions // iterator begin() { return BasicBlocks.begin(); } const_iterator begin() const { return BasicBlocks.begin(); } iterator end () { return BasicBlocks.end(); } const_iterator end () const { return BasicBlocks.end(); } size_t size() const { return BasicBlocks.size(); } bool empty() const { return BasicBlocks.empty(); } const BasicBlock &front() const { return BasicBlocks.front(); } BasicBlock &front() { return BasicBlocks.front(); } const BasicBlock &back() const { return BasicBlocks.back(); } BasicBlock &back() { return BasicBlocks.back(); } /// @name Function Argument Iteration /// @{ arg_iterator arg_begin() { CheckLazyArguments(); return ArgumentList.begin(); } const_arg_iterator arg_begin() const { CheckLazyArguments(); return ArgumentList.begin(); } arg_iterator arg_end() { CheckLazyArguments(); return ArgumentList.end(); } const_arg_iterator arg_end() const { CheckLazyArguments(); return ArgumentList.end(); } iterator_range<arg_iterator> args() { return iterator_range<arg_iterator>(arg_begin(), arg_end()); } iterator_range<const_arg_iterator> args() const { return iterator_range<const_arg_iterator>(arg_begin(), arg_end()); } /// @} size_t arg_size() const; bool arg_empty() const; bool hasPrefixData() const { return getSubclassDataFromValue() & (1<<1); } Constant *getPrefixData() const; void setPrefixData(Constant *PrefixData); bool hasPrologueData() const { return getSubclassDataFromValue() & (1<<2); } Constant *getPrologueData() const; void setPrologueData(Constant *PrologueData); /// Print the function to an output stream with an optional /// AssemblyAnnotationWriter. void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW = nullptr) const; /// viewCFG - This function is meant for use from the debugger. You can just /// say 'call F->viewCFG()' and a ghostview window should pop up from the /// program, displaying the CFG of the current function with the code for each /// basic block inside. This depends on there being a 'dot' and 'gv' program /// in your path. /// LLVM_DUMP_METHOD void viewCFG() const; // HLSL Change - Add LLVM_DUMP_METHOD /// viewCFGOnly - This function is meant for use from the debugger. It works /// just like viewCFG, but it does not include the contents of basic blocks /// into the nodes, just the label. If you are only interested in the CFG /// this can make the graph smaller. /// // HLSL Change - Add LLVM_DUMP_METHOD LLVM_DUMP_METHOD void viewCFGOnly() const; /// Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Value *V) { return V->getValueID() == Value::FunctionVal; } /// dropAllReferences() - This method causes all the subinstructions to "let /// go" of all references that they are maintaining. This allows one to /// 'delete' a whole module at a time, even though there may be circular /// references... first all references are dropped, and all use counts go to /// zero. Then everything is deleted for real. Note that no operations are /// valid on an object that has "dropped all references", except operator /// delete. /// /// Since no other object in the module can have references into the body of a /// function, dropping all references deletes the entire body of the function, /// including any contained basic blocks. /// void dropAllReferences(); /// hasAddressTaken - returns true if there are any uses of this function /// other than direct calls or invokes to it, or blockaddress expressions. /// Optionally passes back an offending user for diagnostic purposes. /// bool hasAddressTaken(const User** = nullptr) const; /// isDefTriviallyDead - Return true if it is trivially safe to remove /// this function definition from the module (because it isn't externally /// visible, does not have its address taken, and has no callers). To make /// this more accurate, call removeDeadConstantUsers first. bool isDefTriviallyDead() const; /// callsFunctionThatReturnsTwice - Return true if the function has a call to /// setjmp or other function that gcc recognizes as "returning twice". bool callsFunctionThatReturnsTwice() const; /// \brief Check if this has any metadata. bool hasMetadata() const { return hasMetadataHashEntry(); } /// \brief Get the current metadata attachment, if any. /// /// Returns \c nullptr if such an attachment is missing. /// @{ MDNode *getMetadata(unsigned KindID) const; MDNode *getMetadata(StringRef Kind) const; /// @} /// \brief Set a particular kind of metadata attachment. /// /// Sets the given attachment to \c MD, erasing it if \c MD is \c nullptr or /// replacing it if it already exists. /// @{ void setMetadata(unsigned KindID, MDNode *MD); void setMetadata(StringRef Kind, MDNode *MD); /// @} /// \brief Get all current metadata attachments. void getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const; /// \brief Drop metadata not in the given list. /// /// Drop all metadata from \c this not included in \c KnownIDs. void dropUnknownMetadata(ArrayRef<unsigned> KnownIDs); private: // Shadow Value::setValueSubclassData with a private forwarding method so that // subclasses cannot accidentally use it. void setValueSubclassData(unsigned short D) { Value::setValueSubclassData(D); } bool hasMetadataHashEntry() const { return getGlobalObjectSubClassData() & HasMetadataHashEntryBit; } void setHasMetadataHashEntry(bool HasEntry) { setGlobalObjectBit(HasMetadataHashEntryBit, HasEntry); } void clearMetadata(); }; inline ValueSymbolTable * ilist_traits<BasicBlock>::getSymTab(Function *F) { return F ? &F->getValueSymbolTable() : nullptr; } inline ValueSymbolTable * ilist_traits<Argument>::getSymTab(Function *F) { return F ? &F->getValueSymbolTable() : nullptr; } template <> struct OperandTraits<Function> : public OptionalOperandTraits<Function> {}; DEFINE_TRANSPARENT_OPERAND_ACCESSORS(Function, Value) } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/LegacyPassManagers.h
//===- LegacyPassManagers.h - Legacy Pass Infrastructure --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the LLVM Pass Manager infrastructure. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_LEGACYPASSMANAGERS_H #define LLVM_IR_LEGACYPASSMANAGERS_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Pass.h" #include <map> #include <vector> #include <set> // HLSL change //===----------------------------------------------------------------------===// // Overview: // The Pass Manager Infrastructure manages passes. It's responsibilities are: // // o Manage optimization pass execution order // o Make required Analysis information available before pass P is run // o Release memory occupied by dead passes // o If Analysis information is dirtied by a pass then regenerate Analysis // information before it is consumed by another pass. // // Pass Manager Infrastructure uses multiple pass managers. They are // PassManager, FunctionPassManager, MPPassManager, FPPassManager, BBPassManager. // This class hierarchy uses multiple inheritance but pass managers do not // derive from another pass manager. // // PassManager and FunctionPassManager are two top-level pass manager that // represents the external interface of this entire pass manager infrastucture. // // Important classes : // // [o] class PMTopLevelManager; // // Two top level managers, PassManager and FunctionPassManager, derive from // PMTopLevelManager. PMTopLevelManager manages information used by top level // managers such as last user info. // // [o] class PMDataManager; // // PMDataManager manages information, e.g. list of available analysis info, // used by a pass manager to manage execution order of passes. It also provides // a place to implement common pass manager APIs. All pass managers derive from // PMDataManager. // // [o] class BBPassManager : public FunctionPass, public PMDataManager; // // BBPassManager manages BasicBlockPasses. // // [o] class FunctionPassManager; // // This is a external interface used to manage FunctionPasses. This // interface relies on FunctionPassManagerImpl to do all the tasks. // // [o] class FunctionPassManagerImpl : public ModulePass, PMDataManager, // public PMTopLevelManager; // // FunctionPassManagerImpl is a top level manager. It manages FPPassManagers // // [o] class FPPassManager : public ModulePass, public PMDataManager; // // FPPassManager manages FunctionPasses and BBPassManagers // // [o] class MPPassManager : public Pass, public PMDataManager; // // MPPassManager manages ModulePasses and FPPassManagers // // [o] class PassManager; // // This is a external interface used by various tools to manages passes. It // relies on PassManagerImpl to do all the tasks. // // [o] class PassManagerImpl : public Pass, public PMDataManager, // public PMTopLevelManager // // PassManagerImpl is a top level pass manager responsible for managing // MPPassManagers. //===----------------------------------------------------------------------===// #include "llvm/Support/PrettyStackTrace.h" namespace llvm { class Module; class Pass; class StringRef; class Value; class Timer; class PMDataManager; // enums for debugging strings enum PassDebuggingString { EXECUTION_MSG, // "Executing Pass '" + PassName MODIFICATION_MSG, // "Made Modification '" + PassName FREEING_MSG, // " Freeing Pass '" + PassName ON_BASICBLOCK_MSG, // "' on BasicBlock '" + InstructionName + "'...\n" ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n" ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n" ON_REGION_MSG, // "' on Region '" + Msg + "'...\n'" ON_LOOP_MSG, // "' on Loop '" + Msg + "'...\n'" ON_CG_MSG // "' on Call Graph Nodes '" + Msg + "'...\n'" }; /// PassManagerPrettyStackEntry - This is used to print informative information /// about what pass is running when/if a stack trace is generated. class PassManagerPrettyStackEntry : public PrettyStackTraceEntry { Pass *P; Value *V; Module *M; public: explicit PassManagerPrettyStackEntry(Pass *p) : P(p), V(nullptr), M(nullptr) {} // When P is releaseMemory'd. PassManagerPrettyStackEntry(Pass *p, Value &v) : P(p), V(&v), M(nullptr) {} // When P is run on V PassManagerPrettyStackEntry(Pass *p, Module &m) : P(p), V(nullptr), M(&m) {} // When P is run on M /// print - Emit information about this stack frame to OS. void print(raw_ostream &OS) const override; }; //===----------------------------------------------------------------------===// // PMStack // /// PMStack - This class implements a stack data structure of PMDataManager /// pointers. /// /// Top level pass managers (see PassManager.cpp) maintain active Pass Managers /// using PMStack. Each Pass implements assignPassManager() to connect itself /// with appropriate manager. assignPassManager() walks PMStack to find /// suitable manager. class PMStack { public: typedef llvm::SmallVector<PMDataManager *, 2>::const_reverse_iterator iterator; // HLSL Change - SmallVector rather than vector iterator begin() const { return S.rbegin(); } iterator end() const { return S.rend(); } void pop(); PMDataManager *top() const { return S.back(); } void push(PMDataManager *PM); bool empty() const { return S.empty(); } void dump() const; private: llvm::SmallVector<PMDataManager *, 2> S; // HLSL Change - SmallVector rather than vector }; //===----------------------------------------------------------------------===// // PMTopLevelManager // /// PMTopLevelManager manages LastUser info and collects common APIs used by /// top level pass managers. class PMTopLevelManager { protected: explicit PMTopLevelManager(PMDataManager *PMDM); unsigned getNumContainedManagers() const { return (unsigned)PassManagers.size(); } void initializeAllAnalysisInfo(); private: virtual PMDataManager *getAsPMDataManager() = 0; virtual PassManagerType getTopLevelPassManagerType() = 0; public: bool HLSLPrintBeforeAll = false; // HLSL Change std::set<std::string> HLSLPrintBefore; // HLSL Change bool HLSLPrintAfterAll = false; // HLSL Change std::set<std::string> HLSLPrintAfter; // HLSL Change /// Schedule pass P for execution. Make sure that passes required by /// P are run before P is run. Update analysis info maintained by /// the manager. Remove dead passes. This is a recursive function. void schedulePass(Pass *P); /// Set pass P as the last user of the given analysis passes. void setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P); /// Collect passes whose last user is P void collectLastUses(SmallVectorImpl<Pass *> &LastUses, Pass *P); /// Find the pass that implements Analysis AID. Search immutable /// passes and all pass managers. If desired pass is not found /// then return NULL. Pass *findAnalysisPass(AnalysisID AID); /// Retrieve the PassInfo for an analysis. const PassInfo *findAnalysisPassInfo(AnalysisID AID) const; /// Find analysis usage information for the pass P. AnalysisUsage *findAnalysisUsage(Pass *P); virtual ~PMTopLevelManager(); /// Add immutable pass and initialize it. inline void addImmutablePass(ImmutablePass *P) { P->initializePass(); ImmutablePasses.push_back(P); } inline SmallVectorImpl<ImmutablePass *>& getImmutablePasses() { return ImmutablePasses; } void addPassManager(PMDataManager *Manager) { PassManagers.push_back(Manager); } // Add Manager into the list of managers that are not directly // maintained by this top level pass manager inline void addIndirectPassManager(PMDataManager *Manager) { IndirectPassManagers.push_back(Manager); } // Print passes managed by this top level manager. void dumpPasses() const; void dumpArguments() const; // Active Pass Managers PMStack activeStack; protected: /// Collection of pass managers SmallVector<PMDataManager *, 8> PassManagers; private: /// Collection of pass managers that are not directly maintained /// by this pass manager SmallVector<PMDataManager *, 8> IndirectPassManagers; // Map to keep track of last user of the analysis pass. // LastUser->second is the last user of Lastuser->first. DenseMap<Pass *, Pass *> LastUser; // Map to keep track of passes that are last used by a pass. // This inverse map is initialized at PM->run() based on // LastUser map. DenseMap<Pass *, SmallPtrSet<Pass *, 8> > InversedLastUser; /// Immutable passes are managed by top level manager. SmallVector<ImmutablePass *, 16> ImmutablePasses; DenseMap<Pass *, AnalysisUsage *> AnUsageMap; /// Collection of PassInfo objects found via analysis IDs and in this top /// level manager. This is used to memoize queries to the pass registry. /// FIXME: This is an egregious hack because querying the pass registry is /// either slow or racy. mutable DenseMap<AnalysisID, const PassInfo *> AnalysisPassInfos; }; //===----------------------------------------------------------------------===// // PMDataManager /// PMDataManager provides the common place to manage the analysis data /// used by pass managers. class PMDataManager { public: explicit PMDataManager() : TPM(nullptr), Depth(0) { initializeAnalysisInfo(); } virtual ~PMDataManager(); virtual Pass *getAsPass() = 0; /// Augment AvailableAnalysis by adding analysis made available by pass P. void recordAvailableAnalysis(Pass *P); /// verifyPreservedAnalysis -- Verify analysis presreved by pass P. void verifyPreservedAnalysis(Pass *P); /// Remove Analysis that is not preserved by the pass void removeNotPreservedAnalysis(Pass *P); /// Remove dead passes used by P. void removeDeadPasses(Pass *P, StringRef Msg, enum PassDebuggingString); /// Remove P. void freePass(Pass *P, StringRef Msg, enum PassDebuggingString); /// Add pass P into the PassVector. Update /// AvailableAnalysis appropriately if ProcessAnalysis is true. void add(Pass *P, bool ProcessAnalysis = true); /// Add RequiredPass into list of lower level passes required by pass P. /// RequiredPass is run on the fly by Pass Manager when P requests it /// through getAnalysis interface. virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass); virtual Pass *getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F); /// Initialize available analysis information. void initializeAnalysisInfo() { AvailableAnalysis.clear(); for (unsigned i = 0; i < PMT_Last; ++i) InheritedAnalysis[i] = nullptr; } // Return true if P preserves high level analysis used by other // passes that are managed by this manager. bool preserveHigherLevelAnalysis(Pass *P); /// Populate RequiredPasses with analysis pass that are required by /// pass P and are available. Populate ReqPassNotAvailable with analysis /// pass that are required by pass P but are not available. void collectRequiredAnalysis(SmallVectorImpl<Pass *> &RequiredPasses, SmallVectorImpl<AnalysisID> &ReqPassNotAvailable, Pass *P); /// All Required analyses should be available to the pass as it runs! Here /// we fill in the AnalysisImpls member of the pass so that it can /// successfully use the getAnalysis() method to retrieve the /// implementations it needs. void initializeAnalysisImpl(Pass *P); /// Find the pass that implements Analysis AID. If desired pass is not found /// then return NULL. Pass *findAnalysisPass(AnalysisID AID, bool Direction); // Access toplevel manager PMTopLevelManager *getTopLevelManager() { return TPM; } void setTopLevelManager(PMTopLevelManager *T) { TPM = T; } unsigned getDepth() const { return Depth; } void setDepth(unsigned newDepth) { Depth = newDepth; } // Print routines used by debug-pass void dumpLastUses(Pass *P, unsigned Offset) const; void dumpPassArguments() const; void dumpPassInfo(Pass *P, enum PassDebuggingString S1, enum PassDebuggingString S2, StringRef Msg); void dumpRequiredSet(const Pass *P) const; void dumpPreservedSet(const Pass *P) const; unsigned getNumContainedPasses() const { return (unsigned)PassVector.size(); } virtual PassManagerType getPassManagerType() const { assert ( 0 && "Invalid use of getPassManagerType"); return PMT_Unknown; } DenseMap<AnalysisID, Pass*> *getAvailableAnalysis() { return &AvailableAnalysis; } // Collect AvailableAnalysis from all the active Pass Managers. void populateInheritedAnalysis(PMStack &PMS) { unsigned Index = 0; for (PMStack::iterator I = PMS.begin(), E = PMS.end(); I != E; ++I) InheritedAnalysis[Index++] = (*I)->getAvailableAnalysis(); } protected: // Top level manager. PMTopLevelManager *TPM; // Collection of pass that are managed by this manager SmallVector<Pass *, 16> PassVector; // Collection of Analysis provided by Parent pass manager and // used by current pass manager. At at time there can not be more // then PMT_Last active pass mangers. DenseMap<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last]; /// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions /// or higher is specified. bool isPassDebuggingExecutionsOrMore() const; private: void dumpAnalysisUsage(StringRef Msg, const Pass *P, const AnalysisUsage::VectorType &Set) const; // Set of available Analysis. This information is used while scheduling // pass. If a pass requires an analysis which is not available then // the required analysis pass is scheduled to run before the pass itself is // scheduled to run. DenseMap<AnalysisID, Pass*> AvailableAnalysis; // Collection of higher level analysis used by the pass managed by // this manager. SmallVector<Pass *, 16> HigherLevelAnalysis; unsigned Depth; }; // // /////////////////////////////////////////////////////////////////////////////// // FPPassManager // /// FPPassManager manages BBPassManagers and FunctionPasses. /// It batches all function passes and basic block pass managers together and /// sequence them to process one function at a time before processing next /// function. class FPPassManager : public ModulePass, public PMDataManager { public: static char ID; explicit FPPassManager() : ModulePass(ID), PMDataManager() { } /// run - Execute all of the passes scheduled for execution. Keep track of /// whether any of the passes modifies the module, and if so, return true. bool runOnFunction(Function &F); bool runOnModule(Module &M) override; /// cleanup - After running all passes, clean up pass manager cache. void cleanup(); /// doInitialization - Overrides ModulePass doInitialization for global /// initialization tasks /// using ModulePass::doInitialization; /// doInitialization - Run all of the initializers for the function passes. /// bool doInitialization(Module &M) override; /// doFinalization - Overrides ModulePass doFinalization for global /// finalization tasks /// using ModulePass::doFinalization; /// doFinalization - Run all of the finalizers for the function passes. /// bool doFinalization(Module &M) override; PMDataManager *getAsPMDataManager() override { return this; } Pass *getAsPass() override { return this; } /// Pass Manager itself does not invalidate any analysis info. void getAnalysisUsage(AnalysisUsage &Info) const override { Info.setPreservesAll(); } // Print passes managed by this manager void dumpPassStructure(unsigned Offset) override; StringRef getPassName() const override { return "Function Pass Manager"; } FunctionPass *getContainedPass(unsigned N) { assert ( N < PassVector.size() && "Pass number out of range!"); FunctionPass *FP = static_cast<FunctionPass *>(PassVector[N]); return FP; } PassManagerType getPassManagerType() const override { return PMT_FunctionPassManager; } }; Timer *getPassTimer(Pass *); } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/ConstantFolder.h
//===- ConstantFolder.h - Constant folding helper ---------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ConstantFolder class, a helper for IRBuilder. // It provides IRBuilder with a set of methods for creating constants // with minimal folding. For general constant creation and folding, // use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_CONSTANTFOLDER_H #define LLVM_IR_CONSTANTFOLDER_H #include "llvm/IR/Constants.h" #include "llvm/IR/InstrTypes.h" namespace llvm { /// ConstantFolder - Create constants with minimum, target independent, folding. class ConstantFolder { public: explicit ConstantFolder() {} //===--------------------------------------------------------------------===// // Binary Operators //===--------------------------------------------------------------------===// Constant *CreateAdd(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW); } Constant *CreateFAdd(Constant *LHS, Constant *RHS) const { return ConstantExpr::getFAdd(LHS, RHS); } Constant *CreateSub(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW); } Constant *CreateFSub(Constant *LHS, Constant *RHS) const { return ConstantExpr::getFSub(LHS, RHS); } Constant *CreateMul(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW); } Constant *CreateFMul(Constant *LHS, Constant *RHS) const { return ConstantExpr::getFMul(LHS, RHS); } Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false) const { return ConstantExpr::getUDiv(LHS, RHS, isExact); } Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false) const { return ConstantExpr::getSDiv(LHS, RHS, isExact); } Constant *CreateFDiv(Constant *LHS, Constant *RHS) const { return ConstantExpr::getFDiv(LHS, RHS); } Constant *CreateURem(Constant *LHS, Constant *RHS) const { return ConstantExpr::getURem(LHS, RHS); } Constant *CreateSRem(Constant *LHS, Constant *RHS) const { return ConstantExpr::getSRem(LHS, RHS); } Constant *CreateFRem(Constant *LHS, Constant *RHS) const { return ConstantExpr::getFRem(LHS, RHS); } Constant *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW); } Constant *CreateLShr(Constant *LHS, Constant *RHS, bool isExact = false) const { return ConstantExpr::getLShr(LHS, RHS, isExact); } Constant *CreateAShr(Constant *LHS, Constant *RHS, bool isExact = false) const { return ConstantExpr::getAShr(LHS, RHS, isExact); } Constant *CreateAnd(Constant *LHS, Constant *RHS) const { return ConstantExpr::getAnd(LHS, RHS); } Constant *CreateOr(Constant *LHS, Constant *RHS) const { return ConstantExpr::getOr(LHS, RHS); } Constant *CreateXor(Constant *LHS, Constant *RHS) const { return ConstantExpr::getXor(LHS, RHS); } Constant *CreateBinOp(Instruction::BinaryOps Opc, Constant *LHS, Constant *RHS) const { return ConstantExpr::get(Opc, LHS, RHS); } //===--------------------------------------------------------------------===// // Unary Operators //===--------------------------------------------------------------------===// Constant *CreateNeg(Constant *C, bool HasNUW = false, bool HasNSW = false) const { return ConstantExpr::getNeg(C, HasNUW, HasNSW); } Constant *CreateFNeg(Constant *C) const { return ConstantExpr::getFNeg(C); } Constant *CreateNot(Constant *C) const { return ConstantExpr::getNot(C); } //===--------------------------------------------------------------------===// // Memory Instructions //===--------------------------------------------------------------------===// Constant *CreateGetElementPtr(Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const { return ConstantExpr::getGetElementPtr(Ty, C, IdxList); } Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const { // This form of the function only exists to avoid ambiguous overload // warnings about whether to convert Idx to ArrayRef<Constant *> or // ArrayRef<Value *>. return ConstantExpr::getGetElementPtr(Ty, C, Idx); } Constant *CreateGetElementPtr(Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const { return ConstantExpr::getGetElementPtr(Ty, C, IdxList); } Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const { return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList); } Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const { // This form of the function only exists to avoid ambiguous overload // warnings about whether to convert Idx to ArrayRef<Constant *> or // ArrayRef<Value *>. return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx); } Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const { return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList); } //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// Constant *CreateCast(Instruction::CastOps Op, Constant *C, Type *DestTy) const { return ConstantExpr::getCast(Op, C, DestTy); } Constant *CreatePointerCast(Constant *C, Type *DestTy) const { return ConstantExpr::getPointerCast(C, DestTy); } Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C, Type *DestTy) const { return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy); } Constant *CreateIntCast(Constant *C, Type *DestTy, bool isSigned) const { return ConstantExpr::getIntegerCast(C, DestTy, isSigned); } Constant *CreateFPCast(Constant *C, Type *DestTy) const { return ConstantExpr::getFPCast(C, DestTy); } Constant *CreateBitCast(Constant *C, Type *DestTy) const { return CreateCast(Instruction::BitCast, C, DestTy); } Constant *CreateIntToPtr(Constant *C, Type *DestTy) const { return CreateCast(Instruction::IntToPtr, C, DestTy); } Constant *CreatePtrToInt(Constant *C, Type *DestTy) const { return CreateCast(Instruction::PtrToInt, C, DestTy); } Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const { return ConstantExpr::getZExtOrBitCast(C, DestTy); } Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const { return ConstantExpr::getSExtOrBitCast(C, DestTy); } Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const { return ConstantExpr::getTruncOrBitCast(C, DestTy); } //===--------------------------------------------------------------------===// // Compare Instructions //===--------------------------------------------------------------------===// Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { return ConstantExpr::getCompare(P, LHS, RHS); } Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { return ConstantExpr::getCompare(P, LHS, RHS); } //===--------------------------------------------------------------------===// // Other Instructions //===--------------------------------------------------------------------===// Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const { return ConstantExpr::getSelect(C, True, False); } Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const { return ConstantExpr::getExtractElement(Vec, Idx); } Constant *CreateInsertElement(Constant *Vec, Constant *NewElt, Constant *Idx) const { return ConstantExpr::getInsertElement(Vec, NewElt, Idx); } Constant *CreateShuffleVector(Constant *V1, Constant *V2, Constant *Mask) const { return ConstantExpr::getShuffleVector(V1, V2, Mask); } Constant *CreateExtractValue(Constant *Agg, ArrayRef<unsigned> IdxList) const { return ConstantExpr::getExtractValue(Agg, IdxList); } Constant *CreateInsertValue(Constant *Agg, Constant *Val, ArrayRef<unsigned> IdxList) const { return ConstantExpr::getInsertValue(Agg, Val, IdxList); } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/ValueSymbolTable.h
//===-- llvm/ValueSymbolTable.h - Implement a Value Symtab ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the name/Value symbol table for LLVM. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_VALUESYMBOLTABLE_H #define LLVM_IR_VALUESYMBOLTABLE_H #include "llvm/ADT/StringMap.h" #include "llvm/IR/Value.h" #include "llvm/Support/DataTypes.h" namespace llvm { template<typename ValueSubClass, typename ItemParentClass> class SymbolTableListTraits; class BasicBlock; class Function; class NamedMDNode; class Module; class StringRef; /// This class provides a symbol table of name/value pairs. It is essentially /// a std::map<std::string,Value*> but has a controlled interface provided by /// LLVM as well as ensuring uniqueness of names. /// class ValueSymbolTable { friend class Value; friend class SymbolTableListTraits<Argument, Function>; friend class SymbolTableListTraits<BasicBlock, Function>; friend class SymbolTableListTraits<Instruction, BasicBlock>; friend class SymbolTableListTraits<Function, Module>; friend class SymbolTableListTraits<GlobalVariable, Module>; friend class SymbolTableListTraits<GlobalAlias, Module>; /// @name Types /// @{ public: /// @brief A mapping of names to values. typedef StringMap<Value*> ValueMap; /// @brief An iterator over a ValueMap. typedef ValueMap::iterator iterator; /// @brief A const_iterator over a ValueMap. typedef ValueMap::const_iterator const_iterator; /// @} /// @name Constructors /// @{ public: ValueSymbolTable() : vmap(0), LastUnique(0) {} ~ValueSymbolTable(); /// @} /// @name Accessors /// @{ public: /// This method finds the value with the given \p Name in the /// the symbol table. /// @returns the value associated with the \p Name /// @brief Lookup a named Value. Value *lookup(StringRef Name) const { return vmap.lookup(Name); } /// @returns true iff the symbol table is empty /// @brief Determine if the symbol table is empty inline bool empty() const { return vmap.empty(); } /// @brief The number of name/type pairs is returned. inline unsigned size() const { return unsigned(vmap.size()); } /// This function can be used from the debugger to display the /// content of the symbol table while debugging. /// @brief Print out symbol table on stderr void dump() const; /// @} /// @name Iteration /// @{ public: /// @brief Get an iterator that from the beginning of the symbol table. inline iterator begin() { return vmap.begin(); } /// @brief Get a const_iterator that from the beginning of the symbol table. inline const_iterator begin() const { return vmap.begin(); } /// @brief Get an iterator to the end of the symbol table. inline iterator end() { return vmap.end(); } /// @brief Get a const_iterator to the end of the symbol table. inline const_iterator end() const { return vmap.end(); } /// @} /// @name Mutators /// @{ private: /// This method adds the provided value \p N to the symbol table. The Value /// must have a name which is used to place the value in the symbol table. /// If the inserted name conflicts, this renames the value. /// @brief Add a named value to the symbol table void reinsertValue(Value *V); /// createValueName - This method attempts to create a value name and insert /// it into the symbol table with the specified name. If it conflicts, it /// auto-renames the name and returns that instead. ValueName *createValueName(StringRef Name, Value *V); /// This method removes a value from the symbol table. It leaves the /// ValueName attached to the value, but it is no longer inserted in the /// symtab. void removeValueName(ValueName *V); /// @} /// @name Internal Data /// @{ private: ValueMap vmap; ///< The map that holds the symbol table. mutable uint32_t LastUnique; ///< Counter for tracking unique names /// @} }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/CallingConv.h
//===-- llvm/CallingConv.h - LLVM Calling Conventions -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines LLVM's set of calling conventions. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_CALLINGCONV_H #define LLVM_IR_CALLINGCONV_H namespace llvm { /// CallingConv Namespace - This namespace contains an enum with a value for /// the well-known calling conventions. /// namespace CallingConv { /// LLVM IR allows to use arbitrary numbers as calling convention identifiers. typedef unsigned ID; /// A set of enums which specify the assigned numeric values for known llvm /// calling conventions. /// @brief LLVM Calling Convention Representation enum { /// C - The default llvm calling convention, compatible with C. This /// convention is the only calling convention that supports varargs calls. /// As with typical C calling conventions, the callee/caller have to /// tolerate certain amounts of prototype mismatch. C = 0, // Generic LLVM calling conventions. None of these calling conventions // support varargs calls, and all assume that the caller and callee // prototype exactly match. /// Fast - This calling convention attempts to make calls as fast as /// possible (e.g. by passing things in registers). Fast = 8, // Cold - This calling convention attempts to make code in the caller as // efficient as possible under the assumption that the call is not commonly // executed. As such, these calls often preserve all registers so that the // call does not break any live ranges in the caller side. Cold = 9, // GHC - Calling convention used by the Glasgow Haskell Compiler (GHC). GHC = 10, // HiPE - Calling convention used by the High-Performance Erlang Compiler // (HiPE). HiPE = 11, // WebKit JS - Calling convention for stack based JavaScript calls WebKit_JS = 12, // AnyReg - Calling convention for dynamic register based calls (e.g. // stackmap and patchpoint intrinsics). AnyReg = 13, // PreserveMost - Calling convention for runtime calls that preserves most // registers. PreserveMost = 14, // PreserveAll - Calling convention for runtime calls that preserves // (almost) all registers. PreserveAll = 15, // Target - This is the start of the target-specific calling conventions, // e.g. fastcall and thiscall on X86. FirstTargetCC = 64, /// X86_StdCall - stdcall is the calling conventions mostly used by the /// Win32 API. It is basically the same as the C convention with the /// difference in that the callee is responsible for popping the arguments /// from the stack. X86_StdCall = 64, /// X86_FastCall - 'fast' analog of X86_StdCall. Passes first two arguments /// in ECX:EDX registers, others - via stack. Callee is responsible for /// stack cleaning. X86_FastCall = 65, /// ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, /// but still used on some targets). ARM_APCS = 66, /// ARM_AAPCS - ARM Architecture Procedure Calling Standard calling /// convention (aka EABI). Soft float variant. ARM_AAPCS = 67, /// ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI. ARM_AAPCS_VFP = 68, /// MSP430_INTR - Calling convention used for MSP430 interrupt routines. MSP430_INTR = 69, /// X86_ThisCall - Similar to X86_StdCall. Passes first argument in ECX, /// others via stack. Callee is responsible for stack cleaning. MSVC uses /// this by default for methods in its ABI. X86_ThisCall = 70, /// PTX_Kernel - Call to a PTX kernel. /// Passes all arguments in parameter space. PTX_Kernel = 71, /// PTX_Device - Call to a PTX device function. /// Passes all arguments in register or parameter space. PTX_Device = 72, /// SPIR_FUNC - Calling convention for SPIR non-kernel device functions. /// No lowering or expansion of arguments. /// Structures are passed as a pointer to a struct with the byval attribute. /// Functions can only call SPIR_FUNC and SPIR_KERNEL functions. /// Functions can only have zero or one return values. /// Variable arguments are not allowed, except for printf. /// How arguments/return values are lowered are not specified. /// Functions are only visible to the devices. SPIR_FUNC = 75, /// SPIR_KERNEL - Calling convention for SPIR kernel functions. /// Inherits the restrictions of SPIR_FUNC, except /// Cannot have non-void return values. /// Cannot have variable arguments. /// Can also be called by the host. /// Is externally visible. SPIR_KERNEL = 76, /// Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins Intel_OCL_BI = 77, /// \brief The C convention as specified in the x86-64 supplement to the /// System V ABI, used on most non-Windows systems. X86_64_SysV = 78, /// \brief The C convention as implemented on Windows/x86-64. This /// convention differs from the more common \c X86_64_SysV convention /// in a number of ways, most notably in that XMM registers used to pass /// arguments are shadowed by GPRs, and vice versa. X86_64_Win64 = 79, /// \brief MSVC calling convention that passes vectors and vector aggregates /// in SSE registers. X86_VectorCall = 80 }; } // End CallingConv namespace } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/ValueMap.h
//===- ValueMap.h - Safe map from Values to data ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ValueMap class. ValueMap maps Value* or any subclass // to an arbitrary other type. It provides the DenseMap interface but updates // itself to remain safe when keys are RAUWed or deleted. By default, when a // key is RAUWed from V1 to V2, the old mapping V1->target is removed, and a new // mapping V2->target is added. If V2 already existed, its old target is // overwritten. When a key is deleted, its mapping is removed. // // You can override a ValueMap's Config parameter to control exactly what // happens on RAUW and destruction and to get called back on each event. It's // legal to call back into the ValueMap from a Config's callbacks. Config // parameters should inherit from ValueMapConfig<KeyT> to get default // implementations of all the methods ValueMap uses. See ValueMapConfig for // documentation of the functions you can override. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_VALUEMAP_H #define LLVM_IR_VALUEMAP_H #include "llvm/ADT/DenseMap.h" #include "llvm/IR/TrackingMDRef.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/Mutex.h" #include "llvm/Support/UniqueLock.h" #include "llvm/Support/type_traits.h" #include <iterator> #include <memory> namespace llvm { template<typename KeyT, typename ValueT, typename Config> class ValueMapCallbackVH; template<typename DenseMapT, typename KeyT> class ValueMapIterator; template<typename DenseMapT, typename KeyT> class ValueMapConstIterator; /// This class defines the default behavior for configurable aspects of /// ValueMap<>. User Configs should inherit from this class to be as compatible /// as possible with future versions of ValueMap. template<typename KeyT, typename MutexT = sys::Mutex> struct ValueMapConfig { typedef MutexT mutex_type; /// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's /// false, the ValueMap will leave the original mapping in place. enum { FollowRAUW = true }; // All methods will be called with a first argument of type ExtraData. The // default implementations in this class take a templated first argument so // that users' subclasses can use any type they want without having to // override all the defaults. struct ExtraData {}; template<typename ExtraDataT> static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {} template<typename ExtraDataT> static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {} /// Returns a mutex that should be acquired around any changes to the map. /// This is only acquired from the CallbackVH (and held around calls to onRAUW /// and onDelete) and not inside other ValueMap methods. NULL means that no /// mutex is necessary. template<typename ExtraDataT> static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; } }; /// See the file comment. template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT> > class ValueMap { friend class ValueMapCallbackVH<KeyT, ValueT, Config>; typedef ValueMapCallbackVH<KeyT, ValueT, Config> ValueMapCVH; typedef DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH> > MapT; typedef DenseMap<const Metadata *, TrackingMDRef> MDMapT; typedef typename Config::ExtraData ExtraData; MapT Map; std::unique_ptr<MDMapT> MDMap; ExtraData Data; ValueMap(const ValueMap&) = delete; ValueMap& operator=(const ValueMap&) = delete; public: typedef KeyT key_type; typedef ValueT mapped_type; typedef std::pair<KeyT, ValueT> value_type; typedef unsigned size_type; explicit ValueMap(unsigned NumInitBuckets = 64) : Map(NumInitBuckets), Data() {} explicit ValueMap(const ExtraData &Data, unsigned NumInitBuckets = 64) : Map(NumInitBuckets), Data(Data) {} bool hasMD() const { return bool(MDMap); } MDMapT &MD() { if (!MDMap) MDMap.reset(new MDMapT); return *MDMap; } typedef ValueMapIterator<MapT, KeyT> iterator; typedef ValueMapConstIterator<MapT, KeyT> const_iterator; inline iterator begin() { return iterator(Map.begin()); } inline iterator end() { return iterator(Map.end()); } inline const_iterator begin() const { return const_iterator(Map.begin()); } inline const_iterator end() const { return const_iterator(Map.end()); } bool empty() const { return Map.empty(); } size_type size() const { return Map.size(); } /// Grow the map so that it has at least Size buckets. Does not shrink void resize(size_t Size) { Map.resize(Size); } void clear() { Map.clear(); MDMap.reset(); } /// Return 1 if the specified key is in the map, 0 otherwise. size_type count(const KeyT &Val) const { return Map.find_as(Val) == Map.end() ? 0 : 1; } iterator find(const KeyT &Val) { return iterator(Map.find_as(Val)); } const_iterator find(const KeyT &Val) const { return const_iterator(Map.find_as(Val)); } /// lookup - Return the entry for the specified key, or a default /// constructed value if no such entry exists. ValueT lookup(const KeyT &Val) const { typename MapT::const_iterator I = Map.find_as(Val); return I != Map.end() ? I->second : ValueT(); } // Inserts key,value pair into the map if the key isn't already in the map. // If the key is already in the map, it returns false and doesn't update the // value. std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { auto MapResult = Map.insert(std::make_pair(Wrap(KV.first), KV.second)); return std::make_pair(iterator(MapResult.first), MapResult.second); } std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) { auto MapResult = Map.insert(std::make_pair(Wrap(KV.first), std::move(KV.second))); return std::make_pair(iterator(MapResult.first), MapResult.second); } /// insert - Range insertion of pairs. template<typename InputIt> void insert(InputIt I, InputIt E) { for (; I != E; ++I) insert(*I); } bool erase(const KeyT &Val) { typename MapT::iterator I = Map.find_as(Val); if (I == Map.end()) return false; Map.erase(I); return true; } void erase(iterator I) { return Map.erase(I.base()); } value_type& FindAndConstruct(const KeyT &Key) { return Map.FindAndConstruct(Wrap(Key)); } ValueT &operator[](const KeyT &Key) { return Map[Wrap(Key)]; } /// isPointerIntoBucketsArray - Return true if the specified pointer points /// somewhere into the ValueMap's array of buckets (i.e. either to a key or /// value in the ValueMap). bool isPointerIntoBucketsArray(const void *Ptr) const { return Map.isPointerIntoBucketsArray(Ptr); } /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets /// array. In conjunction with the previous method, this can be used to /// determine whether an insertion caused the ValueMap to reallocate. const void *getPointerIntoBucketsArray() const { return Map.getPointerIntoBucketsArray(); } private: // Takes a key being looked up in the map and wraps it into a // ValueMapCallbackVH, the actual key type of the map. We use a helper // function because ValueMapCVH is constructed with a second parameter. ValueMapCVH Wrap(KeyT key) const { // The only way the resulting CallbackVH could try to modify *this (making // the const_cast incorrect) is if it gets inserted into the map. But then // this function must have been called from a non-const method, making the // const_cast ok. return ValueMapCVH(key, const_cast<ValueMap*>(this)); } }; // This CallbackVH updates its ValueMap when the contained Value changes, // according to the user's preferences expressed through the Config object. template<typename KeyT, typename ValueT, typename Config> class ValueMapCallbackVH : public CallbackVH { friend class ValueMap<KeyT, ValueT, Config>; friend struct DenseMapInfo<ValueMapCallbackVH>; typedef ValueMap<KeyT, ValueT, Config> ValueMapT; typedef typename std::remove_pointer<KeyT>::type KeySansPointerT; ValueMapT *Map; ValueMapCallbackVH(KeyT Key, ValueMapT *Map) : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))), Map(Map) {} // Private constructor used to create empty/tombstone DenseMap keys. ValueMapCallbackVH(Value *V) : CallbackVH(V), Map(nullptr) {} public: KeyT Unwrap() const { return cast_or_null<KeySansPointerT>(getValPtr()); } void deleted() override { // Make a copy that won't get changed even when *this is destroyed. ValueMapCallbackVH Copy(*this); typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data); unique_lock<typename Config::mutex_type> Guard; if (M) Guard = unique_lock<typename Config::mutex_type>(*M); Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this. Copy.Map->Map.erase(Copy); // Definitely destroys *this. } void allUsesReplacedWith(Value *new_key) override { assert(isa<KeySansPointerT>(new_key) && "Invalid RAUW on key of ValueMap<>"); // Make a copy that won't get changed even when *this is destroyed. ValueMapCallbackVH Copy(*this); typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data); unique_lock<typename Config::mutex_type> Guard; if (M) Guard = unique_lock<typename Config::mutex_type>(*M); KeyT typed_new_key = cast<KeySansPointerT>(new_key); // Can destroy *this: Config::onRAUW(Copy.Map->Data, Copy.Unwrap(), typed_new_key); if (Config::FollowRAUW) { typename ValueMapT::MapT::iterator I = Copy.Map->Map.find(Copy); // I could == Copy.Map->Map.end() if the onRAUW callback already // removed the old mapping. if (I != Copy.Map->Map.end()) { ValueT Target(std::move(I->second)); Copy.Map->Map.erase(I); // Definitely destroys *this. Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target))); } } } }; template<typename KeyT, typename ValueT, typename Config> struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > { typedef ValueMapCallbackVH<KeyT, ValueT, Config> VH; static inline VH getEmptyKey() { return VH(DenseMapInfo<Value *>::getEmptyKey()); } static inline VH getTombstoneKey() { return VH(DenseMapInfo<Value *>::getTombstoneKey()); } static unsigned getHashValue(const VH &Val) { return DenseMapInfo<KeyT>::getHashValue(Val.Unwrap()); } static unsigned getHashValue(const KeyT &Val) { return DenseMapInfo<KeyT>::getHashValue(Val); } static bool isEqual(const VH &LHS, const VH &RHS) { return LHS == RHS; } static bool isEqual(const KeyT &LHS, const VH &RHS) { return LHS == RHS.getValPtr(); } }; template<typename DenseMapT, typename KeyT> class ValueMapIterator { typedef typename DenseMapT::iterator BaseT; typedef typename DenseMapT::mapped_type ValueT; BaseT I; public: using iterator_category = std::forward_iterator_tag; using value_type = std::pair<KeyT, typename DenseMapT::mapped_type>; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; ValueMapIterator() : I() {} ValueMapIterator(BaseT I) : I(I) {} BaseT base() const { return I; } struct ValueTypeProxy { const KeyT first; ValueT& second; ValueTypeProxy *operator->() { return this; } operator std::pair<KeyT, ValueT>() const { return std::make_pair(first, second); } }; ValueTypeProxy operator*() const { ValueTypeProxy Result = {I->first.Unwrap(), I->second}; return Result; } ValueTypeProxy operator->() const { return operator*(); } bool operator==(const ValueMapIterator &RHS) const { return I == RHS.I; } bool operator!=(const ValueMapIterator &RHS) const { return I != RHS.I; } inline ValueMapIterator& operator++() { // Preincrement ++I; return *this; } ValueMapIterator operator++(int) { // Postincrement ValueMapIterator tmp = *this; ++*this; return tmp; } }; template<typename DenseMapT, typename KeyT> class ValueMapConstIterator { typedef typename DenseMapT::const_iterator BaseT; typedef typename DenseMapT::mapped_type ValueT; BaseT I; public: using iterator_category = std::forward_iterator_tag; using value_type = std::pair<KeyT, typename DenseMapT::mapped_type>; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; ValueMapConstIterator() : I() {} ValueMapConstIterator(BaseT I) : I(I) {} ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other) : I(Other.base()) {} BaseT base() const { return I; } struct ValueTypeProxy { const KeyT first; const ValueT& second; ValueTypeProxy *operator->() { return this; } operator std::pair<KeyT, ValueT>() const { return std::make_pair(first, second); } }; ValueTypeProxy operator*() const { ValueTypeProxy Result = {I->first.Unwrap(), I->second}; return Result; } ValueTypeProxy operator->() const { return operator*(); } bool operator==(const ValueMapConstIterator &RHS) const { return I == RHS.I; } bool operator!=(const ValueMapConstIterator &RHS) const { return I != RHS.I; } inline ValueMapConstIterator& operator++() { // Preincrement ++I; return *this; } ValueMapConstIterator operator++(int) { // Postincrement ValueMapConstIterator tmp = *this; ++*this; return tmp; } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DiagnosticPrinter.h
//===- llvm/Support/DiagnosticPrinter.h - Diagnostic Printer ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the main interface for printer backend diagnostic. // // Clients of the backend diagnostics should overload this interface based // on their needs. //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DIAGNOSTICPRINTER_H #define LLVM_IR_DIAGNOSTICPRINTER_H #include <string> namespace llvm { // Forward declarations. class Module; class raw_ostream; class SMDiagnostic; class StringRef; class Twine; class Value; /// \brief Interface for custom diagnostic printing. class DiagnosticPrinter { public: virtual ~DiagnosticPrinter() {} // Simple types. virtual DiagnosticPrinter &operator<<(char C) = 0; virtual DiagnosticPrinter &operator<<(unsigned char C) = 0; virtual DiagnosticPrinter &operator<<(signed char C) = 0; virtual DiagnosticPrinter &operator<<(StringRef Str) = 0; virtual DiagnosticPrinter &operator<<(const char *Str) = 0; virtual DiagnosticPrinter &operator<<(const std::string &Str) = 0; virtual DiagnosticPrinter &operator<<(unsigned long N) = 0; virtual DiagnosticPrinter &operator<<(long N) = 0; virtual DiagnosticPrinter &operator<<(unsigned long long N) = 0; virtual DiagnosticPrinter &operator<<(long long N) = 0; virtual DiagnosticPrinter &operator<<(const void *P) = 0; virtual DiagnosticPrinter &operator<<(unsigned int N) = 0; virtual DiagnosticPrinter &operator<<(int N) = 0; virtual DiagnosticPrinter &operator<<(double N) = 0; virtual DiagnosticPrinter &operator<<(const Twine &Str) = 0; // IR related types. virtual DiagnosticPrinter &operator<<(const Value &V) = 0; virtual DiagnosticPrinter &operator<<(const Module &M) = 0; // Other types. virtual DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) = 0; virtual DiagnosticPrinter & operator<<(std::ios_base &(*iomanip)(std::ios_base &)) = 0; // HLSL Change }; /// \brief Basic diagnostic printer that uses an underlying raw_ostream. class DiagnosticPrinterRawOStream : public DiagnosticPrinter { protected: raw_ostream &Stream; public: DiagnosticPrinterRawOStream(raw_ostream &Stream) : Stream(Stream) {}; // Simple types. DiagnosticPrinter &operator<<(char C) override; DiagnosticPrinter &operator<<(unsigned char C) override; DiagnosticPrinter &operator<<(signed char C) override; DiagnosticPrinter &operator<<(StringRef Str) override; DiagnosticPrinter &operator<<(const char *Str) override; DiagnosticPrinter &operator<<(const std::string &Str) override; DiagnosticPrinter &operator<<(unsigned long N) override; DiagnosticPrinter &operator<<(long N) override; DiagnosticPrinter &operator<<(unsigned long long N) override; DiagnosticPrinter &operator<<(long long N) override; DiagnosticPrinter &operator<<(const void *P) override; DiagnosticPrinter &operator<<(unsigned int N) override; DiagnosticPrinter &operator<<(int N) override; DiagnosticPrinter &operator<<(double N) override; DiagnosticPrinter &operator<<(const Twine &Str) override; // IR related types. DiagnosticPrinter &operator<<(const Value &V) override; DiagnosticPrinter &operator<<(const Module &M) override; // Other types. DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) override; DiagnosticPrinter &operator<<( std::ios_base &(*iomanip)(std::ios_base &)) override; // HLSL Change }; } // End namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/SymbolTableListTraits.h
//===-- llvm/SymbolTableListTraits.h - Traits for iplist --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a generic class that is used to implement the automatic // symbol table manipulation that occurs when you put (for example) a named // instruction into a basic block. // // The way that this is implemented is by using a special traits class with the // intrusive list that makes up the list of instructions in a basic block. When // a new element is added to the list of instructions, the traits class is // notified, allowing the symbol table to be updated. // // This generic class implements the traits class. It must be generic so that // it can work for all uses it, which include lists of instructions, basic // blocks, arguments, functions, global variables, etc... // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_SYMBOLTABLELISTTRAITS_H #define LLVM_IR_SYMBOLTABLELISTTRAITS_H #include "llvm/ADT/ilist.h" namespace llvm { class ValueSymbolTable; template<typename NodeTy> class ilist_iterator; template<typename NodeTy, typename Traits> class iplist; template<typename Ty> struct ilist_traits; // ValueSubClass - The type of objects that I hold, e.g. Instruction. // ItemParentClass - The type of object that owns the list, e.g. BasicBlock. // template<typename ValueSubClass, typename ItemParentClass> class SymbolTableListTraits : public ilist_default_traits<ValueSubClass> { typedef ilist_traits<ValueSubClass> TraitsClass; public: SymbolTableListTraits() {} /// getListOwner - Return the object that owns this list. If this is a list /// of instructions, it returns the BasicBlock that owns them. ItemParentClass *getListOwner() { size_t Offset(size_t(&((ItemParentClass*)nullptr->*ItemParentClass:: getSublistAccess(static_cast<ValueSubClass*>(nullptr))))); iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this)); return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)- Offset); } static iplist<ValueSubClass> &getList(ItemParentClass *Par) { return Par->*(Par->getSublistAccess((ValueSubClass*)nullptr)); } static ValueSymbolTable *getSymTab(ItemParentClass *Par) { return Par ? toPtr(Par->getValueSymbolTable()) : nullptr; } void addNodeToList(ValueSubClass *V); void removeNodeFromList(ValueSubClass *V); void transferNodesFromList(ilist_traits<ValueSubClass> &L2, ilist_iterator<ValueSubClass> first, ilist_iterator<ValueSubClass> last); //private: template<typename TPtr> void setSymTabObject(TPtr *, TPtr); static ValueSymbolTable *toPtr(ValueSymbolTable *P) { return P; } static ValueSymbolTable *toPtr(ValueSymbolTable &R) { return &R; } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DerivedTypes.h
//===-- llvm/DerivedTypes.h - Classes for handling data types ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declarations of classes that represent "derived // types". These are things like "arrays of x" or "structure of x, y, z" or // "function returning x taking (y,z) as parameters", etc... // // The implementations of these classes live in the Type.cpp file. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DERIVEDTYPES_H #define LLVM_IR_DERIVEDTYPES_H #include "llvm/IR/Type.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" namespace llvm { class Value; class APInt; class LLVMContext; template<typename T> class ArrayRef; class StringRef; /// Class to represent integer types. Note that this class is also used to /// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and /// Int64Ty. /// @brief Integer representation type class IntegerType : public Type { friend class LLVMContextImpl; protected: explicit IntegerType(LLVMContext &C, unsigned NumBits) : Type(C, IntegerTyID){ setSubclassData(NumBits); } public: /// This enum is just used to hold constants we need for IntegerType. enum { MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified MAX_INT_BITS = (1<<23)-1 ///< Maximum number of bits that can be specified ///< Note that bit width is stored in the Type classes SubclassData field ///< which has 23 bits. This yields a maximum bit width of 8,388,607 bits. }; /// This static method is the primary way of constructing an IntegerType. /// If an IntegerType with the same NumBits value was previously instantiated, /// that instance will be returned. Otherwise a new one will be created. Only /// one instance with a given NumBits value is ever created. /// @brief Get or create an IntegerType instance. static IntegerType *get(LLVMContext &C, unsigned NumBits); /// @brief Get the number of bits in this IntegerType unsigned getBitWidth() const { return getSubclassData(); } /// getBitMask - Return a bitmask with ones set for all of the bits /// that can be set by an unsigned version of this type. This is 0xFF for /// i8, 0xFFFF for i16, etc. uint64_t getBitMask() const { return ~uint64_t(0UL) >> (64-getBitWidth()); } /// getSignBit - Return a uint64_t with just the most significant bit set (the /// sign bit, if the value is treated as a signed number). uint64_t getSignBit() const { return 1ULL << (getBitWidth()-1); } /// For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc. /// @returns a bit mask with ones set for all the bits of this type. /// @brief Get a bit mask for this type. APInt getMask() const; /// This method determines if the width of this IntegerType is a power-of-2 /// in terms of 8 bit bytes. /// @returns true if this is a power-of-2 byte width. /// @brief Is this a power-of-2 byte-width IntegerType ? bool isPowerOf2ByteWidth() const; /// Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == IntegerTyID; } }; /// FunctionType - Class to represent function types /// class FunctionType : public Type { FunctionType(const FunctionType &) = delete; const FunctionType &operator=(const FunctionType &) = delete; FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs); public: /// FunctionType::get - This static method is the primary way of constructing /// a FunctionType. /// static FunctionType *get(Type *Result, ArrayRef<Type*> Params, bool isVarArg); /// FunctionType::get - Create a FunctionType taking no parameters. /// static FunctionType *get(Type *Result, bool isVarArg); /// isValidReturnType - Return true if the specified type is valid as a return /// type. static bool isValidReturnType(Type *RetTy); /// isValidArgumentType - Return true if the specified type is valid as an /// argument type. static bool isValidArgumentType(Type *ArgTy); bool isVarArg() const { return getSubclassData()!=0; } Type *getReturnType() const { return ContainedTys[0]; } typedef Type::subtype_iterator param_iterator; param_iterator param_begin() const { return ContainedTys + 1; } param_iterator param_end() const { return &ContainedTys[NumContainedTys]; } ArrayRef<Type *> params() const { return makeArrayRef(param_begin(), param_end()); } /// Parameter type accessors. Type *getParamType(unsigned i) const { return ContainedTys[i+1]; } /// getNumParams - Return the number of fixed parameters this function type /// requires. This does not consider varargs. /// unsigned getNumParams() const { return NumContainedTys - 1; } /// Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == FunctionTyID; } }; static_assert(AlignOf<FunctionType>::Alignment >= AlignOf<Type *>::Alignment, "Alignment sufficient for objects appended to FunctionType"); /// CompositeType - Common super class of ArrayType, StructType, PointerType /// and VectorType. class CompositeType : public Type { protected: explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) { } public: /// getTypeAtIndex - Given an index value into the type, return the type of /// the element. /// Type *getTypeAtIndex(const Value *V); Type *getTypeAtIndex(unsigned Idx); bool indexValid(const Value *V) const; bool indexValid(unsigned Idx) const; /// Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == ArrayTyID || T->getTypeID() == StructTyID || T->getTypeID() == PointerTyID || T->getTypeID() == VectorTyID; } }; /// StructType - Class to represent struct types. There are two different kinds /// of struct types: Literal structs and Identified structs. /// /// Literal struct types (e.g. { i32, i32 }) are uniqued structurally, and must /// always have a body when created. You can get one of these by using one of /// the StructType::get() forms. /// /// Identified structs (e.g. %foo or %42) may optionally have a name and are not /// uniqued. The names for identified structs are managed at the LLVMContext /// level, so there can only be a single identified struct with a given name in /// a particular LLVMContext. Identified structs may also optionally be opaque /// (have no body specified). You get one of these by using one of the /// StructType::create() forms. /// /// Independent of what kind of struct you have, the body of a struct type are /// laid out in memory consequtively with the elements directly one after the /// other (if the struct is packed) or (if not packed) with padding between the /// elements as defined by DataLayout (which is required to match what the code /// generator for a target expects). /// class StructType : public CompositeType { StructType(const StructType &) = delete; const StructType &operator=(const StructType &) = delete; StructType(LLVMContext &C) : CompositeType(C, StructTyID), SymbolTableEntry(nullptr) {} enum { /// This is the contents of the SubClassData field. SCDB_HasBody = 1, SCDB_Packed = 2, SCDB_IsLiteral = 4, SCDB_IsSized = 8 }; /// SymbolTableEntry - For a named struct that actually has a name, this is a /// pointer to the symbol table entry (maintained by LLVMContext) for the /// struct. This is null if the type is an literal struct or if it is /// a identified type that has an empty name. /// void *SymbolTableEntry; public: /// StructType::create - This creates an identified struct. static StructType *create(LLVMContext &Context, StringRef Name); static StructType *create(LLVMContext &Context); static StructType *create(ArrayRef<Type*> Elements, StringRef Name, bool isPacked = false); static StructType *create(ArrayRef<Type*> Elements); static StructType *create(LLVMContext &Context, ArrayRef<Type*> Elements, StringRef Name, bool isPacked = false); static StructType *create(LLVMContext &Context, ArrayRef<Type*> Elements); static StructType *create(StringRef Name, Type *elt1, ...) LLVM_END_WITH_NULL; /// StructType::get - This static method is the primary way to create a /// literal StructType. static StructType *get(LLVMContext &Context, ArrayRef<Type*> Elements, bool isPacked = false); /// StructType::get - Create an empty structure type. /// static StructType *get(LLVMContext &Context, bool isPacked = false); /// StructType::get - This static method is a convenience method for creating /// structure types by specifying the elements as arguments. Note that this /// method always returns a non-packed struct, and requires at least one /// element type. static StructType *get(Type *elt1, ...) LLVM_END_WITH_NULL; bool isPacked() const { return (getSubclassData() & SCDB_Packed) != 0; } /// isLiteral - Return true if this type is uniqued by structural /// equivalence, false if it is a struct definition. bool isLiteral() const { return (getSubclassData() & SCDB_IsLiteral) != 0; } /// isOpaque - Return true if this is a type with an identity that has no body /// specified yet. These prints as 'opaque' in .ll files. bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; } /// isSized - Return true if this is a sized type. bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const; /// hasName - Return true if this is a named struct that has a non-empty name. bool hasName() const { return SymbolTableEntry != nullptr; } /// getName - Return the name for this struct type if it has an identity. /// This may return an empty string for an unnamed struct type. Do not call /// this on an literal type. StringRef getName() const; /// setName - Change the name of this type to the specified name, or to a name /// with a suffix if there is a collision. Do not call this on an literal /// type. void setName(StringRef Name); /// setBody - Specify a body for an opaque identified type. void setBody(ArrayRef<Type*> Elements, bool isPacked = false); void setBody(Type *elt1, ...) LLVM_END_WITH_NULL; /// isValidElementType - Return true if the specified type is valid as a /// element type. static bool isValidElementType(Type *ElemTy); // Iterator access to the elements. typedef Type::subtype_iterator element_iterator; element_iterator element_begin() const { return ContainedTys; } element_iterator element_end() const { return &ContainedTys[NumContainedTys];} ArrayRef<Type *> const elements() const { return makeArrayRef(element_begin(), element_end()); } /// isLayoutIdentical - Return true if this is layout identical to the /// specified struct. bool isLayoutIdentical(StructType *Other) const; /// Random access to the elements unsigned getNumElements() const { return NumContainedTys; } Type *getElementType(unsigned N) const { assert(N < NumContainedTys && "Element number out of range!"); return ContainedTys[N]; } /// Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == StructTyID; } }; /// SequentialType - This is the superclass of the array, pointer and vector /// type classes. All of these represent "arrays" in memory. The array type /// represents a specifically sized array, pointer types are unsized/unknown /// size arrays, vector types represent specifically sized arrays that /// allow for use of SIMD instructions. SequentialType holds the common /// features of all, which stem from the fact that all three lay their /// components out in memory identically. /// class SequentialType : public CompositeType { Type *ContainedType; ///< Storage for the single contained type. SequentialType(const SequentialType &) = delete; const SequentialType &operator=(const SequentialType &) = delete; protected: SequentialType(TypeID TID, Type *ElType) : CompositeType(ElType->getContext(), TID), ContainedType(ElType) { ContainedTys = &ContainedType; NumContainedTys = 1; } public: Type *getElementType() const { return ContainedTys[0]; } /// Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == ArrayTyID || T->getTypeID() == PointerTyID || T->getTypeID() == VectorTyID; } }; /// ArrayType - Class to represent array types. /// class ArrayType : public SequentialType { uint64_t NumElements; ArrayType(const ArrayType &) = delete; const ArrayType &operator=(const ArrayType &) = delete; ArrayType(Type *ElType, uint64_t NumEl); public: /// ArrayType::get - This static method is the primary way to construct an /// ArrayType /// static ArrayType *get(Type *ElementType, uint64_t NumElements); /// isValidElementType - Return true if the specified type is valid as a /// element type. static bool isValidElementType(Type *ElemTy); uint64_t getNumElements() const { return NumElements; } /// Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == ArrayTyID; } }; /// VectorType - Class to represent vector types. /// class VectorType : public SequentialType { unsigned NumElements; VectorType(const VectorType &) = delete; const VectorType &operator=(const VectorType &) = delete; VectorType(Type *ElType, unsigned NumEl); public: /// VectorType::get - This static method is the primary way to construct an /// VectorType. /// static VectorType *get(Type *ElementType, unsigned NumElements); /// VectorType::getInteger - This static method gets a VectorType with the /// same number of elements as the input type, and the element type is an /// integer type of the same width as the input element type. /// static VectorType *getInteger(VectorType *VTy) { unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); assert(EltBits && "Element size must be of a non-zero size"); Type *EltTy = IntegerType::get(VTy->getContext(), EltBits); return VectorType::get(EltTy, VTy->getNumElements()); } /// VectorType::getExtendedElementVectorType - This static method is like /// getInteger except that the element types are twice as wide as the /// elements in the input type. /// static VectorType *getExtendedElementVectorType(VectorType *VTy) { unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); Type *EltTy = IntegerType::get(VTy->getContext(), EltBits * 2); return VectorType::get(EltTy, VTy->getNumElements()); } /// VectorType::getTruncatedElementVectorType - This static method is like /// getInteger except that the element types are half as wide as the /// elements in the input type. /// static VectorType *getTruncatedElementVectorType(VectorType *VTy) { unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); assert((EltBits & 1) == 0 && "Cannot truncate vector element with odd bit-width"); Type *EltTy = IntegerType::get(VTy->getContext(), EltBits / 2); return VectorType::get(EltTy, VTy->getNumElements()); } /// VectorType::getHalfElementsVectorType - This static method returns /// a VectorType with half as many elements as the input type and the /// same element type. /// static VectorType *getHalfElementsVectorType(VectorType *VTy) { unsigned NumElts = VTy->getNumElements(); assert ((NumElts & 1) == 0 && "Cannot halve vector with odd number of elements."); return VectorType::get(VTy->getElementType(), NumElts/2); } /// VectorType::getDoubleElementsVectorType - This static method returns /// a VectorType with twice as many elements as the input type and the /// same element type. /// static VectorType *getDoubleElementsVectorType(VectorType *VTy) { unsigned NumElts = VTy->getNumElements(); return VectorType::get(VTy->getElementType(), NumElts*2); } /// isValidElementType - Return true if the specified type is valid as a /// element type. static bool isValidElementType(Type *ElemTy); /// @brief Return the number of elements in the Vector type. unsigned getNumElements() const { return NumElements; } /// @brief Return the number of bits in the Vector type. /// Returns zero when the vector is a vector of pointers. unsigned getBitWidth() const { return NumElements * getElementType()->getPrimitiveSizeInBits(); } /// Methods for support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == VectorTyID; } }; // HLSL change - add FixedVectorType to help move to newer llvm. using FixedVectorType = VectorType; /// PointerType - Class to represent pointers. /// class PointerType : public SequentialType { PointerType(const PointerType &) = delete; const PointerType &operator=(const PointerType &) = delete; explicit PointerType(Type *ElType, unsigned AddrSpace); public: /// PointerType::get - This constructs a pointer to an object of the specified /// type in a numbered address space. static PointerType *get(Type *ElementType, unsigned AddressSpace); /// PointerType::getUnqual - This constructs a pointer to an object of the /// specified type in the generic address space (address space zero). static PointerType *getUnqual(Type *ElementType) { return PointerType::get(ElementType, 0); } /// isValidElementType - Return true if the specified type is valid as a /// element type. static bool isValidElementType(Type *ElemTy); /// Return true if we can load or store from a pointer to this type. static bool isLoadableOrStorableType(Type *ElemTy); /// @brief Return the address space of the Pointer type. inline unsigned getAddressSpace() const { return getSubclassData(); } /// Implement support type inquiry through isa, cast, and dyn_cast. static inline bool classof(const Type *T) { return T->getTypeID() == PointerTyID; } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Attributes.h
//===-- llvm/Attributes.h - Container for Attributes ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// \brief This file contains the simple types necessary to represent the /// attributes associated with functions and their calls. /// //===----------------------------------------------------------------------===// #ifndef LLVM_IR_ATTRIBUTES_H #define LLVM_IR_ATTRIBUTES_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PointerLikeTypeTraits.h" #include <bitset> #include <cassert> #include <map> #include <string> namespace llvm { class AttrBuilder; class AttributeImpl; class AttributeSetImpl; class AttributeSetNode; class Constant; template<typename T> struct DenseMapInfo; class LLVMContext; class Type; //===----------------------------------------------------------------------===// /// \class /// \brief Functions, function parameters, and return types can have attributes /// to indicate how they should be treated by optimizations and code /// generation. This class represents one of those attributes. It's light-weight /// and should be passed around by-value. class Attribute { public: /// This enumeration lists the attributes that can be associated with /// parameters, function results, or the function itself. /// /// Note: The `uwtable' attribute is about the ABI or the user mandating an /// entry in the unwind table. The `nounwind' attribute is about an exception /// passing by the function. /// /// In a theoretical system that uses tables for profiling and SjLj for /// exceptions, they would be fully independent. In a normal system that uses /// tables for both, the semantics are: /// /// nil = Needs an entry because an exception might pass by. /// nounwind = No need for an entry /// uwtable = Needs an entry because the ABI says so and because /// an exception might pass by. /// uwtable + nounwind = Needs an entry because the ABI says so. enum AttrKind { // IR-Level Attributes None, ///< No attributes have been set Alignment, ///< Alignment of parameter (5 bits) ///< stored as log2 of alignment with +1 bias ///< 0 means unaligned (different from align(1)) AlwaysInline, ///< inline=always Builtin, ///< Callee is recognized as a builtin, despite ///< nobuiltin attribute on its declaration. ByVal, ///< Pass structure by value InAlloca, ///< Pass structure in an alloca Cold, ///< Marks function as being in a cold path. Convergent, ///< Can only be moved to control-equivalent blocks InlineHint, ///< Source said inlining was desirable InReg, ///< Force argument to be passed in register JumpTable, ///< Build jump-instruction tables and replace refs. MinSize, ///< Function must be optimized for size first Naked, ///< Naked function Nest, ///< Nested function static chain NoAlias, ///< Considered to not alias after call NoBuiltin, ///< Callee isn't recognized as a builtin NoCapture, ///< Function creates no aliases of pointer NoDuplicate, ///< Call cannot be duplicated NoImplicitFloat, ///< Disable implicit floating point insts NoInline, ///< inline=never NonLazyBind, ///< Function is called early and/or ///< often, so lazy binding isn't worthwhile NonNull, ///< Pointer is known to be not null Dereferenceable, ///< Pointer is known to be dereferenceable DereferenceableOrNull, ///< Pointer is either null or dereferenceable NoRedZone, ///< Disable redzone NoReturn, ///< Mark the function as not returning NoUnwind, ///< Function doesn't unwind stack OptimizeForSize, ///< opt_size OptimizeNone, ///< Function must not be optimized. ReadNone, ///< Function does not access memory ReadOnly, ///< Function only reads from memory ArgMemOnly, ///< Funciton can access memory only using pointers ///< based on its arguments. Returned, ///< Return value is always equal to this argument ReturnsTwice, ///< Function can return twice SExt, ///< Sign extended before/after call StackAlignment, ///< Alignment of stack for function (3 bits) ///< stored as log2 of alignment with +1 bias 0 ///< means unaligned (different from ///< alignstack=(1)) StackProtect, ///< Stack protection. StackProtectReq, ///< Stack protection required. StackProtectStrong, ///< Strong Stack protection. SafeStack, ///< Safe Stack protection. StructRet, ///< Hidden pointer to structure to return SanitizeAddress, ///< AddressSanitizer is on. SanitizeThread, ///< ThreadSanitizer is on. SanitizeMemory, ///< MemorySanitizer is on. UWTable, ///< Function must be in a unwind table ZExt, ///< Zero extended before/after call EndAttrKinds ///< Sentinal value useful for loops }; private: AttributeImpl *pImpl; Attribute(AttributeImpl *A) : pImpl(A) {} public: Attribute() : pImpl(nullptr) {} //===--------------------------------------------------------------------===// // Attribute Construction //===--------------------------------------------------------------------===// /// \brief Return a uniquified Attribute object. static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val = 0); static Attribute get(LLVMContext &Context, StringRef Kind, StringRef Val = StringRef()); /// \brief Return a uniquified Attribute object that has the specific /// alignment set. static Attribute getWithAlignment(LLVMContext &Context, uint64_t Align); static Attribute getWithStackAlignment(LLVMContext &Context, uint64_t Align); static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes); static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes); //===--------------------------------------------------------------------===// // Attribute Accessors //===--------------------------------------------------------------------===// /// \brief Return true if the attribute is an Attribute::AttrKind type. bool isEnumAttribute() const; /// \brief Return true if the attribute is an integer attribute. bool isIntAttribute() const; /// \brief Return true if the attribute is a string (target-dependent) /// attribute. bool isStringAttribute() const; /// \brief Return true if the attribute is present. bool hasAttribute(AttrKind Val) const; /// \brief Return true if the target-dependent attribute is present. bool hasAttribute(StringRef Val) const; /// \brief Return the attribute's kind as an enum (Attribute::AttrKind). This /// requires the attribute to be an enum or alignment attribute. Attribute::AttrKind getKindAsEnum() const; /// \brief Return the attribute's value as an integer. This requires that the /// attribute be an alignment attribute. uint64_t getValueAsInt() const; /// \brief Return the attribute's kind as a string. This requires the /// attribute to be a string attribute. StringRef getKindAsString() const; /// \brief Return the attribute's value as a string. This requires the /// attribute to be a string attribute. StringRef getValueAsString() const; /// \brief Returns the alignment field of an attribute as a byte alignment /// value. unsigned getAlignment() const; /// \brief Returns the stack alignment field of an attribute as a byte /// alignment value. unsigned getStackAlignment() const; /// \brief Returns the number of dereferenceable bytes from the /// dereferenceable attribute (or zero if unknown). uint64_t getDereferenceableBytes() const; /// \brief Returns the number of dereferenceable_or_null bytes from the /// dereferenceable_or_null attribute (or zero if unknown). uint64_t getDereferenceableOrNullBytes() const; /// \brief The Attribute is converted to a string of equivalent mnemonic. This /// is, presumably, for writing out the mnemonics for the assembly writer. std::string getAsString(bool InAttrGrp = false) const; /// \brief Equality and non-equality operators. bool operator==(Attribute A) const { return pImpl == A.pImpl; } bool operator!=(Attribute A) const { return pImpl != A.pImpl; } /// \brief Less-than operator. Useful for sorting the attributes list. bool operator<(Attribute A) const; void Profile(FoldingSetNodeID &ID) const { ID.AddPointer(pImpl); } }; //===----------------------------------------------------------------------===// /// \class /// \brief This class holds the attributes for a function, its return value, and /// its parameters. You access the attributes for each of them via an index into /// the AttributeSet object. The function attributes are at index /// `AttributeSet::FunctionIndex', the return value is at index /// `AttributeSet::ReturnIndex', and the attributes for the parameters start at /// index `1'. class AttributeSet { public: enum AttrIndex : unsigned { ReturnIndex = 0U, FunctionIndex = ~0U }; private: friend class AttrBuilder; friend class AttributeSetImpl; template <typename Ty> friend struct DenseMapInfo; /// \brief The attributes that we are managing. This can be null to represent /// the empty attributes list. AttributeSetImpl *pImpl; /// \brief The attributes for the specified index are returned. AttributeSetNode *getAttributes(unsigned Index) const; /// \brief Create an AttributeSet with the specified parameters in it. static AttributeSet get(LLVMContext &C, ArrayRef<std::pair<unsigned, Attribute> > Attrs); static AttributeSet get(LLVMContext &C, ArrayRef<std::pair<unsigned, AttributeSetNode*> > Attrs); static AttributeSet getImpl(LLVMContext &C, ArrayRef<std::pair<unsigned, AttributeSetNode*> > Attrs); explicit AttributeSet(AttributeSetImpl *LI) : pImpl(LI) {} public: AttributeSet() : pImpl(nullptr) {} //===--------------------------------------------------------------------===// // AttributeSet Construction and Mutation //===--------------------------------------------------------------------===// /// \brief Return an AttributeSet with the specified parameters in it. static AttributeSet get(LLVMContext &C, ArrayRef<AttributeSet> Attrs); static AttributeSet get(LLVMContext &C, unsigned Index, ArrayRef<Attribute::AttrKind> Kind); static AttributeSet get(LLVMContext &C, unsigned Index, const AttrBuilder &B); /// \brief Add an attribute to the attribute set at the given index. Because /// attribute sets are immutable, this returns a new set. AttributeSet addAttribute(LLVMContext &C, unsigned Index, Attribute::AttrKind Attr) const; /// \brief Add an attribute to the attribute set at the given index. Because /// attribute sets are immutable, this returns a new set. AttributeSet addAttribute(LLVMContext &C, unsigned Index, StringRef Kind) const; AttributeSet addAttribute(LLVMContext &C, unsigned Index, StringRef Kind, StringRef Value) const; /// \brief Add attributes to the attribute set at the given index. Because /// attribute sets are immutable, this returns a new set. AttributeSet addAttributes(LLVMContext &C, unsigned Index, AttributeSet Attrs) const; /// \brief Remove the specified attribute at the specified index from this /// attribute list. Because attribute lists are immutable, this returns the /// new list. AttributeSet removeAttribute(LLVMContext &C, unsigned Index, Attribute::AttrKind Attr) const; /// \brief Remove the specified attributes at the specified index from this /// attribute list. Because attribute lists are immutable, this returns the /// new list. AttributeSet removeAttributes(LLVMContext &C, unsigned Index, AttributeSet Attrs) const; /// \brief Remove the specified attributes at the specified index from this /// attribute list. Because attribute lists are immutable, this returns the /// new list. AttributeSet removeAttributes(LLVMContext &C, unsigned Index, const AttrBuilder &Attrs) const; /// \brief Add the dereferenceable attribute to the attribute set at the given /// index. Because attribute sets are immutable, this returns a new set. AttributeSet addDereferenceableAttr(LLVMContext &C, unsigned Index, uint64_t Bytes) const; /// \brief Add the dereferenceable_or_null attribute to the attribute set at /// the given index. Because attribute sets are immutable, this returns a new /// set. AttributeSet addDereferenceableOrNullAttr(LLVMContext &C, unsigned Index, uint64_t Bytes) const; //===--------------------------------------------------------------------===// // AttributeSet Accessors //===--------------------------------------------------------------------===// /// \brief Retrieve the LLVM context. LLVMContext &getContext() const; /// \brief The attributes for the specified index are returned. AttributeSet getParamAttributes(unsigned Index) const; /// \brief The attributes for the ret value are returned. AttributeSet getRetAttributes() const; /// \brief The function attributes are returned. AttributeSet getFnAttributes() const; /// \brief Return true if the attribute exists at the given index. bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const; /// \brief Return true if the attribute exists at the given index. bool hasAttribute(unsigned Index, StringRef Kind) const; /// \brief Return true if attribute exists at the given index. bool hasAttributes(unsigned Index) const; /// \brief Return true if the specified attribute is set for at least one /// parameter or for the return value. bool hasAttrSomewhere(Attribute::AttrKind Attr) const; /// \brief Return the attribute object that exists at the given index. Attribute getAttribute(unsigned Index, Attribute::AttrKind Kind) const; /// \brief Return the attribute object that exists at the given index. Attribute getAttribute(unsigned Index, StringRef Kind) const; /// \brief Return the alignment for the specified function parameter. unsigned getParamAlignment(unsigned Index) const; /// \brief Get the stack alignment. unsigned getStackAlignment(unsigned Index) const; /// \brief Get the number of dereferenceable bytes (or zero if unknown). uint64_t getDereferenceableBytes(unsigned Index) const; /// \brief Get the number of dereferenceable_or_null bytes (or zero if /// unknown). uint64_t getDereferenceableOrNullBytes(unsigned Index) const; /// \brief Return the attributes at the index as a string. std::string getAsString(unsigned Index, bool InAttrGrp = false) const; typedef ArrayRef<Attribute>::iterator iterator; iterator begin(unsigned Slot) const; iterator end(unsigned Slot) const; /// operator==/!= - Provide equality predicates. bool operator==(const AttributeSet &RHS) const { return pImpl == RHS.pImpl; } bool operator!=(const AttributeSet &RHS) const { return pImpl != RHS.pImpl; } //===--------------------------------------------------------------------===// // AttributeSet Introspection //===--------------------------------------------------------------------===// // FIXME: Remove this. uint64_t Raw(unsigned Index) const; /// \brief Return a raw pointer that uniquely identifies this attribute list. void *getRawPointer() const { return pImpl; } /// \brief Return true if there are no attributes. bool isEmpty() const { return getNumSlots() == 0; } /// \brief Return the number of slots used in this attribute list. This is /// the number of arguments that have an attribute set on them (including the /// function itself). unsigned getNumSlots() const; /// \brief Return the index for the given slot. unsigned getSlotIndex(unsigned Slot) const; /// \brief Return the attributes at the given slot. AttributeSet getSlotAttributes(unsigned Slot) const; void dump() const; }; //===----------------------------------------------------------------------===// /// \class /// \brief Provide DenseMapInfo for AttributeSet. template<> struct DenseMapInfo<AttributeSet> { static inline AttributeSet getEmptyKey() { uintptr_t Val = static_cast<uintptr_t>(-1); Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable; return AttributeSet(reinterpret_cast<AttributeSetImpl*>(Val)); } static inline AttributeSet getTombstoneKey() { uintptr_t Val = static_cast<uintptr_t>(-2); Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable; return AttributeSet(reinterpret_cast<AttributeSetImpl*>(Val)); } static unsigned getHashValue(AttributeSet AS) { return (unsigned((uintptr_t)AS.pImpl) >> 4) ^ (unsigned((uintptr_t)AS.pImpl) >> 9); } static bool isEqual(AttributeSet LHS, AttributeSet RHS) { return LHS == RHS; } }; // // /////////////////////////////////////////////////////////////////////////////// /// \class /// \brief This class is used in conjunction with the Attribute::get method to /// create an Attribute object. The object itself is uniquified. The Builder's /// value, however, is not. So this can be used as a quick way to test for /// equality, presence of attributes, etc. class AttrBuilder { std::bitset<Attribute::EndAttrKinds> Attrs; std::map<std::string, std::string> TargetDepAttrs; uint64_t Alignment; uint64_t StackAlignment; uint64_t DerefBytes; uint64_t DerefOrNullBytes; public: AttrBuilder() : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0), DerefOrNullBytes(0) {} explicit AttrBuilder(uint64_t Val) : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0), DerefOrNullBytes(0) { addRawValue(Val); } AttrBuilder(const Attribute &A) : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0), DerefOrNullBytes(0) { addAttribute(A); } AttrBuilder(AttributeSet AS, unsigned Idx); void clear(); /// \brief Add an attribute to the builder. AttrBuilder &addAttribute(Attribute::AttrKind Val); /// \brief Add the Attribute object to the builder. AttrBuilder &addAttribute(Attribute A); /// \brief Add the target-dependent attribute to the builder. AttrBuilder &addAttribute(StringRef A, StringRef V = StringRef()); /// \brief Remove an attribute from the builder. AttrBuilder &removeAttribute(Attribute::AttrKind Val); /// \brief Remove the attributes from the builder. AttrBuilder &removeAttributes(AttributeSet A, uint64_t Index); /// \brief Remove the target-dependent attribute to the builder. AttrBuilder &removeAttribute(StringRef A); /// \brief Add the attributes from the builder. AttrBuilder &merge(const AttrBuilder &B); /// \brief Remove the attributes from the builder. AttrBuilder &remove(const AttrBuilder &B); /// \brief Return true if the builder has any attribute that's in the /// specified builder. bool overlaps(const AttrBuilder &B) const; /// \brief Return true if the builder has the specified attribute. bool contains(Attribute::AttrKind A) const { assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!"); return Attrs[A]; } /// \brief Return true if the builder has the specified target-dependent /// attribute. bool contains(StringRef A) const; /// \brief Return true if the builder has IR-level attributes. bool hasAttributes() const; /// \brief Return true if the builder has any attribute that's in the /// specified attribute. bool hasAttributes(AttributeSet A, uint64_t Index) const; /// \brief Return true if the builder has an alignment attribute. bool hasAlignmentAttr() const; /// \brief Retrieve the alignment attribute, if it exists. uint64_t getAlignment() const { return Alignment; } /// \brief Retrieve the stack alignment attribute, if it exists. uint64_t getStackAlignment() const { return StackAlignment; } /// \brief Retrieve the number of dereferenceable bytes, if the dereferenceable /// attribute exists (zero is returned otherwise). uint64_t getDereferenceableBytes() const { return DerefBytes; } /// \brief Retrieve the number of dereferenceable_or_null bytes, if the /// dereferenceable_or_null attribute exists (zero is returned otherwise). uint64_t getDereferenceableOrNullBytes() const { return DerefOrNullBytes; } /// \brief This turns an int alignment (which must be a power of 2) into the /// form used internally in Attribute. AttrBuilder &addAlignmentAttr(unsigned Align); /// \brief This turns an int stack alignment (which must be a power of 2) into /// the form used internally in Attribute. AttrBuilder &addStackAlignmentAttr(unsigned Align); /// \brief This turns the number of dereferenceable bytes into the form used /// internally in Attribute. AttrBuilder &addDereferenceableAttr(uint64_t Bytes); /// \brief This turns the number of dereferenceable_or_null bytes into the /// form used internally in Attribute. AttrBuilder &addDereferenceableOrNullAttr(uint64_t Bytes); /// \brief Return true if the builder contains no target-independent /// attributes. bool empty() const { return Attrs.none(); } // Iterators for target-dependent attributes. typedef std::pair<std::string, std::string> td_type; typedef std::map<std::string, std::string>::iterator td_iterator; typedef std::map<std::string, std::string>::const_iterator td_const_iterator; typedef llvm::iterator_range<td_iterator> td_range; typedef llvm::iterator_range<td_const_iterator> td_const_range; td_iterator td_begin() { return TargetDepAttrs.begin(); } td_iterator td_end() { return TargetDepAttrs.end(); } td_const_iterator td_begin() const { return TargetDepAttrs.begin(); } td_const_iterator td_end() const { return TargetDepAttrs.end(); } td_range td_attrs() { return td_range(td_begin(), td_end()); } td_const_range td_attrs() const { return td_const_range(td_begin(), td_end()); } bool td_empty() const { return TargetDepAttrs.empty(); } bool operator==(const AttrBuilder &B); bool operator!=(const AttrBuilder &B) { return !(*this == B); } // FIXME: Remove this in 4.0. /// \brief Add the raw value to the internal representation. AttrBuilder &addRawValue(uint64_t Val); }; namespace AttributeFuncs { /// \brief Which attributes cannot be applied to a type. AttrBuilder typeIncompatible(const Type *Ty); } // end AttributeFuncs namespace } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/CMakeLists.txt
set(LLVM_TARGET_DEFINITIONS Intrinsics.td) tablegen(LLVM Intrinsics.gen -gen-intrinsic) add_public_tablegen_target(intrinsics_gen)
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Value.def
//===-------- llvm/IR/Value.def - File that describes Values ---v-*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains descriptions of the various LLVM values. This is // used as a central place for enumerating the different values. // //===----------------------------------------------------------------------===// // Provide definitions of macros so that users of this file do not have to // define everything to use it... // #if !(defined HANDLE_GLOBAL_VALUE || defined HANDLE_CONSTANT || \ defined HANDLE_INSTRUCTION || defined HANDLE_INLINE_ASM_VALUE || \ defined HANDLE_METADATA_VALUE || defined HANDLE_VALUE || \ defined HANDLE_CONSTANT_MARKER) #error "Missing macro definition of HANDLE_VALUE*" #endif #ifndef HANDLE_GLOBAL_VALUE #define HANDLE_GLOBAL_VALUE(ValueName) HANDLE_CONSTANT(ValueName) #endif #ifndef HANDLE_CONSTANT #define HANDLE_CONSTANT(ValueName) HANDLE_VALUE(ValueName) #endif #ifndef HANDLE_INSTRUCTION #define HANDLE_INSTRUCTION(ValueName) HANDLE_VALUE(ValueName) #endif #ifndef HANDLE_INLINE_ASM_VALUE #define HANDLE_INLINE_ASM_VALUE(ValueName) HANDLE_VALUE(ValueName) #endif #ifndef HANDLE_METADATA_VALUE #define HANDLE_METADATA_VALUE(ValueName) HANDLE_VALUE(ValueName) #endif #ifndef HANDLE_VALUE #define HANDLE_VALUE(ValueName) #endif #ifndef HANDLE_CONSTANT_MARKER #define HANDLE_CONSTANT_MARKER(MarkerName, ValueName) #endif HANDLE_VALUE(Argument) HANDLE_VALUE(BasicBlock) HANDLE_GLOBAL_VALUE(Function) HANDLE_GLOBAL_VALUE(GlobalAlias) HANDLE_GLOBAL_VALUE(GlobalVariable) HANDLE_CONSTANT(UndefValue) HANDLE_CONSTANT(BlockAddress) HANDLE_CONSTANT(ConstantExpr) HANDLE_CONSTANT(ConstantAggregateZero) HANDLE_CONSTANT(ConstantDataArray) HANDLE_CONSTANT(ConstantDataVector) HANDLE_CONSTANT(ConstantInt) HANDLE_CONSTANT(ConstantFP) HANDLE_CONSTANT(ConstantArray) HANDLE_CONSTANT(ConstantStruct) HANDLE_CONSTANT(ConstantVector) HANDLE_CONSTANT(ConstantPointerNull) HANDLE_METADATA_VALUE(MetadataAsValue) HANDLE_INLINE_ASM_VALUE(InlineAsm) HANDLE_INSTRUCTION(Instruction) // Enum values starting at InstructionVal are used for Instructions; // don't add new values here! HANDLE_CONSTANT_MARKER(ConstantFirstVal, Function) HANDLE_CONSTANT_MARKER(ConstantLastVal, ConstantPointerNull) #undef HANDLE_GLOBAL_VALUE #undef HANDLE_CONSTANT #undef HANDLE_INSTRUCTION #undef HANDLE_METADATA_VALUE #undef HANDLE_INLINE_ASM_VALUE #undef HANDLE_VALUE #undef HANDLE_CONSTANT_MARKER
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/LLVMContext.h
//===-- llvm/LLVMContext.h - Class for managing "global" state --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares LLVMContext, a container of "global" state in LLVM, such // as the global type and constant uniquing tables. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_LLVMCONTEXT_H #define LLVM_IR_LLVMCONTEXT_H #include "llvm-c/Core.h" #include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Options.h" namespace llvm { class LLVMContextImpl; class StringRef; class Twine; class Instruction; class Module; class SMDiagnostic; class DiagnosticInfo; template <typename T> class SmallVectorImpl; class Function; class DebugLoc; /// This is an important class for using LLVM in a threaded context. It /// (opaquely) owns and manages the core "global" data of LLVM's core /// infrastructure, including the type and constant uniquing tables. /// LLVMContext itself provides no locking guarantees, so you should be careful /// to have one context per thread. class LLVMContext { public: LLVMContextImpl *const pImpl; LLVMContext(); ~LLVMContext(); // Pinned metadata names, which always have the same value. This is a // compile-time performance optimization, not a correctness optimization. enum { MD_dbg = 0, // "dbg" MD_tbaa = 1, // "tbaa" MD_prof = 2, // "prof" MD_fpmath = 3, // "fpmath" MD_range = 4, // "range" MD_tbaa_struct = 5, // "tbaa.struct" MD_invariant_load = 6, // "invariant.load" MD_alias_scope = 7, // "alias.scope" MD_noalias = 8, // "noalias", MD_nontemporal = 9, // "nontemporal" MD_mem_parallel_loop_access = 10, // "llvm.mem.parallel_loop_access" MD_nonnull = 11, // "nonnull" MD_dereferenceable = 12, // "dereferenceable" MD_dereferenceable_or_null = 13 // "dereferenceable_or_null" }; /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. /// This ID is uniqued across modules in the current LLVMContext. unsigned getMDKindID(StringRef Name) const; // HLSL Change - Begin /// Return a unique non-zero ID for the specified metadata kind if it exists. bool findMDKindID(StringRef Name, unsigned *ID) const; // HLSL Change - End /// getMDKindNames - Populate client supplied SmallVector with the name for /// custom metadata IDs registered in this LLVMContext. void getMDKindNames(SmallVectorImpl<StringRef> &Result) const; typedef void (*InlineAsmDiagHandlerTy)(const SMDiagnostic&, void *Context, unsigned LocCookie); /// Defines the type of a diagnostic handler. /// \see LLVMContext::setDiagnosticHandler. /// \see LLVMContext::diagnose. typedef void (*DiagnosticHandlerTy)(const DiagnosticInfo &DI, void *Context); /// Defines the type of a yield callback. /// \see LLVMContext::setYieldCallback. typedef void (*YieldCallbackTy)(LLVMContext *Context, void *OpaqueHandle); /// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked /// when problems with inline asm are detected by the backend. The first /// argument is a function pointer and the second is a context pointer that /// gets passed into the DiagHandler. /// /// LLVMContext doesn't take ownership or interpret either of these /// pointers. void setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler, void *DiagContext = nullptr); /// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by /// setInlineAsmDiagnosticHandler. InlineAsmDiagHandlerTy getInlineAsmDiagnosticHandler() const; /// getInlineAsmDiagnosticContext - Return the diagnostic context set by /// setInlineAsmDiagnosticHandler. void *getInlineAsmDiagnosticContext() const; /// setDiagnosticHandler - This method sets a handler that is invoked /// when the backend needs to report anything to the user. The first /// argument is a function pointer and the second is a context pointer that /// gets passed into the DiagHandler. The third argument should be set to /// true if the handler only expects enabled diagnostics. /// /// LLVMContext doesn't take ownership or interpret either of these /// pointers. void setDiagnosticHandler(DiagnosticHandlerTy DiagHandler, void *DiagContext = nullptr, bool RespectFilters = false); /// getDiagnosticHandler - Return the diagnostic handler set by /// setDiagnosticHandler. DiagnosticHandlerTy getDiagnosticHandler() const; /// getDiagnosticContext - Return the diagnostic context set by /// setDiagnosticContext. void *getDiagnosticContext() const; /// \brief Report a message to the currently installed diagnostic handler. /// /// This function returns, in particular in the case of error reporting /// (DI.Severity == \a DS_Error), so the caller should leave the compilation /// process in a self-consistent state, even though the generated code /// need not be correct. /// /// The diagnostic message will be implicitly prefixed with a severity keyword /// according to \p DI.getSeverity(), i.e., "error: " for \a DS_Error, /// "warning: " for \a DS_Warning, and "note: " for \a DS_Note. void diagnose(const DiagnosticInfo &DI); /// \brief Registers a yield callback with the given context. /// /// The yield callback function may be called by LLVM to transfer control back /// to the client that invoked the LLVM compilation. This can be used to yield /// control of the thread, or perform periodic work needed by the client. /// There is no guaranteed frequency at which callbacks must occur; in fact, /// the client is not guaranteed to ever receive this callback. It is at the /// sole discretion of LLVM to do so and only if it can guarantee that /// suspending the thread won't block any forward progress in other LLVM /// contexts in the same process. /// /// At a suspend point, the state of the current LLVM context is intentionally /// undefined. No assumptions about it can or should be made. Only LLVM /// context API calls that explicitly state that they can be used during a /// yield callback are allowed to be used. Any other API calls into the /// context are not supported until the yield callback function returns /// control to LLVM. Other LLVM contexts are unaffected by this restriction. void setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle); /// \brief Calls the yield callback (if applicable). /// /// This transfers control of the current thread back to the client, which may /// suspend the current thread. Only call this method when LLVM doesn't hold /// any global mutex or cannot block the execution in another LLVM context. void yield(); /// emitError - Emit an error message to the currently installed error handler /// with optional location information. This function returns, so code should /// be prepared to drop the erroneous construct on the floor and "not crash". /// The generated code need not be correct. The error message will be /// implicitly prefixed with "error: " and should not end with a ".". void emitError(unsigned LocCookie, const Twine &ErrorStr); void emitError(const Instruction *I, const Twine &ErrorStr); void emitError(const Twine &ErrorStr); void emitWarning(const Twine &WarningStr); // HLSL Change /// \brief Query for a debug option's value. /// /// This function returns typed data populated from command line parsing. template <typename ValT, typename Base, ValT(Base::*Mem)> ValT getOption() const { return OptionRegistry::instance().template get<ValT, Base, Mem>(); } private: LLVMContext(LLVMContext&) = delete; void operator=(LLVMContext&) = delete; /// addModule - Register a module as being instantiated in this context. If /// the context is deleted, the module will be deleted as well. void addModule(Module*); /// removeModule - Unregister a module from this context. void removeModule(Module*); // Module needs access to the add/removeModule methods. friend class Module; }; /// getGlobalContext - Returns a global context. This is for LLVM clients that /// only care about operating on a single thread. extern LLVMContext &getGlobalContext(); // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef) /* Specialized opaque context conversions. */ inline LLVMContext **unwrap(LLVMContextRef* Tys) { return reinterpret_cast<LLVMContext**>(Tys); } inline LLVMContextRef *wrap(const LLVMContext **Tys) { return reinterpret_cast<LLVMContextRef*>(const_cast<LLVMContext**>(Tys)); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Value.h
//===-- llvm/Value.h - Definition of the Value class ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the Value class. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_VALUE_H #define LLVM_IR_VALUE_H #include "llvm-c/Core.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/Use.h" #include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" namespace llvm { class APInt; class Argument; class AssemblyAnnotationWriter; class BasicBlock; class Constant; class DataLayout; class Function; class GlobalAlias; class GlobalObject; class GlobalValue; class GlobalVariable; class InlineAsm; class Instruction; class LLVMContext; class Module; class ModuleSlotTracker; class StringRef; class Twine; class Type; class ValueHandleBase; class ValueSymbolTable; class raw_ostream; template<typename ValueTy> class StringMapEntry; typedef StringMapEntry<Value*> ValueName; //===----------------------------------------------------------------------===// // Value Class // // /////////////////////////////////////////////////////////////////////////////// /// \brief LLVM Value Representation /// /// This is a very important LLVM class. It is the base class of all values /// computed by a program that may be used as operands to other values. Value is /// the super class of other important classes such as Instruction and Function. /// All Values have a Type. Type is not a subclass of Value. Some values can /// have a name and they belong to some Module. Setting the name on the Value /// automatically updates the module's symbol table. /// /// Every value has a "use list" that keeps track of which other Values are /// using this Value. A Value can also have an arbitrary number of ValueHandle /// objects that watch it and listen to RAUW and Destroy events. See /// llvm/IR/ValueHandle.h for details. class Value { Type *VTy; Use *UseList; friend class ValueAsMetadata; // Allow access to IsUsedByMD. friend class ValueHandleBase; const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast) unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this? protected: /// \brief Hold subclass data that can be dropped. /// /// This member is similar to SubclassData, however it is for holding /// information which may be used to aid optimization, but which may be /// cleared to zero without affecting conservative interpretation. unsigned char SubclassOptionalData : 7; private: /// \brief Hold arbitrary subclass data. /// /// This member is defined by this class, but is not used for anything. /// Subclasses can use it to hold whatever state they find useful. This /// field is initialized to zero by the ctor. unsigned short SubclassData; protected: /// \brief The number of operands in the subclass. /// /// This member is defined by this class, but not used for anything. /// Subclasses can use it to store their number of operands, if they have /// any. /// /// This is stored here to save space in User on 64-bit hosts. Since most /// instances of Value have operands, 32-bit hosts aren't significantly /// affected. /// /// Note, this should *NOT* be used directly by any class other than User. /// User uses this value to find the Use list. enum : unsigned { NumUserOperandsBits = 29 }; unsigned NumUserOperands : NumUserOperandsBits; bool IsUsedByMD : 1; bool HasName : 1; bool HasHungOffUses : 1; private: template <typename UseT> // UseT == 'Use' or 'const Use' class use_iterator_impl { UseT *U; explicit use_iterator_impl(UseT *u) : U(u) {} friend class Value; public: using iterator_category = std::forward_iterator_tag; using value_type = UseT *; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; use_iterator_impl() : U() {} bool operator==(const use_iterator_impl &x) const { return U == x.U; } bool operator!=(const use_iterator_impl &x) const { return !operator==(x); } use_iterator_impl &operator++() { // Preincrement assert(U && "Cannot increment end iterator!"); U = U->getNext(); return *this; } use_iterator_impl operator++(int) { // Postincrement auto tmp = *this; ++*this; return tmp; } UseT &operator*() const { assert(U && "Cannot dereference end iterator!"); return *U; } UseT *operator->() const { return &operator*(); } operator use_iterator_impl<const UseT>() const { return use_iterator_impl<const UseT>(U); } }; template <typename UserTy> // UserTy == 'User' or 'const User' class user_iterator_impl { use_iterator_impl<Use> UI; explicit user_iterator_impl(Use *U) : UI(U) {} friend class Value; public: using iterator_category = std::forward_iterator_tag; using value_type = UserTy *; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; user_iterator_impl() {} bool operator==(const user_iterator_impl &x) const { return UI == x.UI; } bool operator!=(const user_iterator_impl &x) const { return !operator==(x); } /// \brief Returns true if this iterator is equal to user_end() on the value. bool atEnd() const { return *this == user_iterator_impl(); } user_iterator_impl &operator++() { // Preincrement ++UI; return *this; } user_iterator_impl operator++(int) { // Postincrement auto tmp = *this; ++*this; return tmp; } // Retrieve a pointer to the current User. UserTy *operator*() const { return UI->getUser(); } UserTy *operator->() const { return operator*(); } operator user_iterator_impl<const UserTy>() const { return user_iterator_impl<const UserTy>(*UI); } Use &getUse() const { return *UI; } }; void operator=(const Value &) = delete; Value(const Value &) = delete; protected: Value(Type *Ty, unsigned scid); public: virtual ~Value(); /// \brief Support for debugging, callable in GDB: V->dump() void dump() const; /// \brief Implement operator<< on Value. /// @{ void print(raw_ostream &O) const; void print(raw_ostream &O, ModuleSlotTracker &MST) const; /// @} /// \brief Print the name of this Value out to the specified raw_ostream. /// /// This is useful when you just want to print 'int %reg126', not the /// instruction that generated it. If you specify a Module for context, then /// even constanst get pretty-printed; for example, the type of a null /// pointer is printed symbolically. /// @{ void printAsOperand(raw_ostream &O, bool PrintType = true, const Module *M = nullptr) const; void printAsOperand(raw_ostream &O, bool PrintType, ModuleSlotTracker &MST) const; /// @} /// \brief All values are typed, get the type of this value. Type *getType() const { return VTy; } /// \brief All values hold a context through their type. LLVMContext &getContext() const; // \brief All values can potentially be named. bool hasName() const { return HasName; } ValueName *getValueName() const; void setValueName(ValueName *VN); private: void destroyValueName(); void setNameImpl(const Twine &Name); public: /// \brief Return a constant reference to the value's name. /// /// This is cheap and guaranteed to return the same reference as long as the /// value is not modified. StringRef getName() const; /// \brief Change the name of the value. /// /// Choose a new unique name if the provided name is taken. /// /// \param Name The new name; or "" if the value's name should be removed. void setName(const Twine &Name); /// \brief Transfer the name from V to this value. /// /// After taking V's name, sets V's name to empty. /// /// \note It is an error to call V->takeName(V). void takeName(Value *V); /// \brief Change all uses of this to point to a new Value. /// /// Go through the uses list for this definition and make each use point to /// "V" instead of "this". After this completes, 'this's use list is /// guaranteed to be empty. void replaceAllUsesWith(Value *V); /// replaceUsesOutsideBlock - Go through the uses list for this definition and /// make each use point to "V" instead of "this" when the use is outside the /// block. 'This's use list is expected to have at least one element. /// Unlike replaceAllUsesWith this function does not support basic block /// values or constant users. void replaceUsesOutsideBlock(Value *V, BasicBlock *BB); //---------------------------------------------------------------------- // Methods for handling the chain of uses of this Value. // bool use_empty() const { return UseList == nullptr; } typedef use_iterator_impl<Use> use_iterator; typedef use_iterator_impl<const Use> const_use_iterator; use_iterator use_begin() { return use_iterator(UseList); } const_use_iterator use_begin() const { return const_use_iterator(UseList); } use_iterator use_end() { return use_iterator(); } const_use_iterator use_end() const { return const_use_iterator(); } iterator_range<use_iterator> uses() { return iterator_range<use_iterator>(use_begin(), use_end()); } iterator_range<const_use_iterator> uses() const { return iterator_range<const_use_iterator>(use_begin(), use_end()); } bool user_empty() const { return UseList == nullptr; } typedef user_iterator_impl<User> user_iterator; typedef user_iterator_impl<const User> const_user_iterator; user_iterator user_begin() { return user_iterator(UseList); } const_user_iterator user_begin() const { return const_user_iterator(UseList); } user_iterator user_end() { return user_iterator(); } const_user_iterator user_end() const { return const_user_iterator(); } User *user_back() { return *user_begin(); } const User *user_back() const { return *user_begin(); } iterator_range<user_iterator> users() { return iterator_range<user_iterator>(user_begin(), user_end()); } iterator_range<const_user_iterator> users() const { return iterator_range<const_user_iterator>(user_begin(), user_end()); } /// \brief Return true if there is exactly one user of this value. /// /// This is specialized because it is a common request and does not require /// traversing the whole use list. bool hasOneUse() const { const_use_iterator I = use_begin(), E = use_end(); if (I == E) return false; return ++I == E; } /// \brief Return true if this Value has exactly N users. bool hasNUses(unsigned N) const; /// \brief Return true if this value has N users or more. /// /// This is logically equivalent to getNumUses() >= N. bool hasNUsesOrMore(unsigned N) const; /// \brief Check if this value is used in the specified basic block. bool isUsedInBasicBlock(const BasicBlock *BB) const; /// \brief This method computes the number of uses of this Value. /// /// This is a linear time operation. Use hasOneUse, hasNUses, or /// hasNUsesOrMore to check for specific values. unsigned getNumUses() const; /// \brief This method should only be used by the Use class. void addUse(Use &U) { U.addToList(&UseList); } /// \brief Concrete subclass of this. /// /// An enumeration for keeping track of the concrete subclass of Value that /// is actually instantiated. Values of this enumeration are kept in the /// Value classes SubclassID field. They are used for concrete type /// identification. enum ValueTy { #define HANDLE_VALUE(Name) Name##Val, #include "llvm/IR/Value.def" // Markers: #define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val, #include "llvm/IR/Value.def" }; /// \brief Return an ID for the concrete type of this object. /// /// This is used to implement the classof checks. This should not be used /// for any other purpose, as the values may change as LLVM evolves. Also, /// note that for instructions, the Instruction's opcode is added to /// InstructionVal. So this means three things: /// # there is no value with code InstructionVal (no opcode==0). /// # there are more possible values for the value type than in ValueTy enum. /// # the InstructionVal enumerator must be the highest valued enumerator in /// the ValueTy enum. unsigned getValueID() const { return SubclassID; } /// \brief Return the raw optional flags value contained in this value. /// /// This should only be used when testing two Values for equivalence. unsigned getRawSubclassOptionalData() const { return SubclassOptionalData; } /// \brief Clear the optional flags contained in this value. void clearSubclassOptionalData() { SubclassOptionalData = 0; } /// \brief Check the optional flags for equality. bool hasSameSubclassOptionalData(const Value *V) const { return SubclassOptionalData == V->SubclassOptionalData; } /// \brief Clear any optional flags not set in the given Value. void intersectOptionalDataWith(const Value *V) { SubclassOptionalData &= V->SubclassOptionalData; } /// \brief Return true if there is a value handle associated with this value. bool hasValueHandle() const { return HasValueHandle; } /// \brief Return true if there is metadata referencing this value. bool isUsedByMetadata() const { return IsUsedByMD; } /// \brief Strip off pointer casts, all-zero GEPs, and aliases. /// /// Returns the original uncasted value. If this is called on a non-pointer /// value, it returns 'this'. Value *stripPointerCasts(); const Value *stripPointerCasts() const { return const_cast<Value*>(this)->stripPointerCasts(); } /// \brief Strip off pointer casts and all-zero GEPs. /// /// Returns the original uncasted value. If this is called on a non-pointer /// value, it returns 'this'. Value *stripPointerCastsNoFollowAliases(); const Value *stripPointerCastsNoFollowAliases() const { return const_cast<Value*>(this)->stripPointerCastsNoFollowAliases(); } /// \brief Strip off pointer casts and all-constant inbounds GEPs. /// /// Returns the original pointer value. If this is called on a non-pointer /// value, it returns 'this'. Value *stripInBoundsConstantOffsets(); const Value *stripInBoundsConstantOffsets() const { return const_cast<Value*>(this)->stripInBoundsConstantOffsets(); } /// \brief Accumulate offsets from \a stripInBoundsConstantOffsets(). /// /// Stores the resulting constant offset stripped into the APInt provided. /// The provided APInt will be extended or truncated as needed to be the /// correct bitwidth for an offset of this pointer type. /// /// If this is called on a non-pointer value, it returns 'this'. Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset); const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const { return const_cast<Value *>(this) ->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); } /// \brief Strip off pointer casts and inbounds GEPs. /// /// Returns the original pointer value. If this is called on a non-pointer /// value, it returns 'this'. Value *stripInBoundsOffsets(); const Value *stripInBoundsOffsets() const { return const_cast<Value*>(this)->stripInBoundsOffsets(); } /// \brief Translate PHI node to its predecessor from the given basic block. /// /// If this value is a PHI node with CurBB as its parent, return the value in /// the PHI node corresponding to PredBB. If not, return ourself. This is /// useful if you want to know the value something has in a predecessor /// block. Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB); const Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) const{ return const_cast<Value*>(this)->DoPHITranslation(CurBB, PredBB); } /// \brief The maximum alignment for instructions. /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; /// \brief Mutate the type of this Value to be of the specified type. /// /// Note that this is an extremely dangerous operation which can create /// completely invalid IR very easily. It is strongly recommended that you /// recreate IR objects with the right types instead of mutating them in /// place. void mutateType(Type *Ty) { VTy = Ty; } /// \brief Sort the use-list. /// /// Sorts the Value's use-list by Cmp using a stable mergesort. Cmp is /// expected to compare two \a Use references. template <class Compare> void sortUseList(Compare Cmp); /// \brief Reverse the use-list. void reverseUseList(); private: /// \brief Merge two lists together. /// /// Merges \c L and \c R using \c Cmp. To enable stable sorts, always pushes /// "equal" items from L before items from R. /// /// \return the first element in the list. /// /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update). template <class Compare> static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) { Use *Merged; // HLSL Change Begins. Copied from LLVM Version 8.0.0. // MergeUseListsImpl(L, R, &Merged, Cmp); Use **Next = &Merged; while (true) { if (!L) { *Next = R; break; } if (!R) { *Next = L; break; } if (Cmp(*R, *L)) { *Next = R; Next = &R->Next; R = R->Next; } else { *Next = L; Next = &L->Next; L = L->Next; } } // HLSL Change Ends. return Merged; } /// \brief Tail-recursive helper for \a mergeUseLists(). /// /// \param[out] Next the first element in the list. // HLSL Change Begins. //template <class Compare> //static void mergeUseListsImpl(Use *L, Use *R, Use **Next, Compare Cmp); // HLSL Change Ends. protected: unsigned short getSubclassDataFromValue() const { return SubclassData; } void setValueSubclassData(unsigned short D) { SubclassData = D; } }; inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) { V.print(OS); return OS; } void Use::set(Value *V) { if (Val) removeFromList(); Val = V; if (V) V->addUse(*this); } template <class Compare> void Value::sortUseList(Compare Cmp) { if (!UseList || !UseList->Next) // No need to sort 0 or 1 uses. return; // Note: this function completely ignores Prev pointers until the end when // they're fixed en masse. // Create a binomial vector of sorted lists, visiting uses one at a time and // merging lists as necessary. const unsigned MaxSlots = 32; Use *Slots[MaxSlots]; // Collect the first use, turning it into a single-item list. Use *Next = UseList->Next; UseList->Next = nullptr; unsigned NumSlots = 1; Slots[0] = UseList; // Collect all but the last use. while (Next->Next) { Use *Current = Next; Next = Current->Next; // Turn Current into a single-item list. Current->Next = nullptr; // Save Current in the first available slot, merging on collisions. unsigned I; for (I = 0; I < NumSlots; ++I) { if (!Slots[I]) break; // Merge two lists, doubling the size of Current and emptying slot I. // // Since the uses in Slots[I] originally preceded those in Current, send // Slots[I] in as the left parameter to maintain a stable sort. Current = mergeUseLists(Slots[I], Current, Cmp); Slots[I] = nullptr; } // Check if this is a new slot. if (I == NumSlots) { ++NumSlots; assert(NumSlots <= MaxSlots && "Use list bigger than 2^32"); } // Found an open slot. Slots[I] = Current; } // Merge all the lists together. assert(Next && "Expected one more Use"); assert(!Next->Next && "Expected only one Use"); UseList = Next; for (unsigned I = 0; I < NumSlots; ++I) if (Slots[I]) // Since the uses in Slots[I] originally preceded those in UseList, send // Slots[I] in as the left parameter to maintain a stable sort. UseList = mergeUseLists(Slots[I], UseList, Cmp); // Fix the Prev pointers. for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) { I->setPrev(Prev); Prev = &I->Next; } } // HLSL Change Begins. /* template <class Compare> void Value::mergeUseListsImpl(Use *L, Use *R, Use **Next, Compare Cmp) { if (!L) { *Next = R; return; } if (!R) { *Next = L; return; } if (Cmp(*R, *L)) { *Next = R; mergeUseListsImpl(L, R->Next, &R->Next, Cmp); return; } *Next = L; mergeUseListsImpl(L->Next, R, &L->Next, Cmp); } */ // HLSL Change Ends. // isa - Provide some specializations of isa so that we don't have to include // the subtype header files to test to see if the value is a subclass... // template <> struct isa_impl<Constant, Value> { static inline bool doit(const Value &Val) { return Val.getValueID() >= Value::ConstantFirstVal && Val.getValueID() <= Value::ConstantLastVal; } }; template <> struct isa_impl<Argument, Value> { static inline bool doit (const Value &Val) { return Val.getValueID() == Value::ArgumentVal; } }; template <> struct isa_impl<InlineAsm, Value> { static inline bool doit(const Value &Val) { return Val.getValueID() == Value::InlineAsmVal; } }; template <> struct isa_impl<Instruction, Value> { static inline bool doit(const Value &Val) { return Val.getValueID() >= Value::InstructionVal; } }; template <> struct isa_impl<BasicBlock, Value> { static inline bool doit(const Value &Val) { return Val.getValueID() == Value::BasicBlockVal; } }; template <> struct isa_impl<Function, Value> { static inline bool doit(const Value &Val) { return Val.getValueID() == Value::FunctionVal; } }; template <> struct isa_impl<GlobalVariable, Value> { static inline bool doit(const Value &Val) { return Val.getValueID() == Value::GlobalVariableVal; } }; template <> struct isa_impl<GlobalAlias, Value> { static inline bool doit(const Value &Val) { return Val.getValueID() == Value::GlobalAliasVal; } }; template <> struct isa_impl<GlobalValue, Value> { static inline bool doit(const Value &Val) { return isa<GlobalObject>(Val) || isa<GlobalAlias>(Val); } }; template <> struct isa_impl<GlobalObject, Value> { static inline bool doit(const Value &Val) { return isa<GlobalVariable>(Val) || isa<Function>(Val); } }; // Value* is only 4-byte aligned. template<> class PointerLikeTypeTraits<Value*> { typedef Value* PT; public: static inline void *getAsVoidPointer(PT P) { return P; } static inline PT getFromVoidPointer(void *P) { return static_cast<PT>(P); } enum { NumLowBitsAvailable = 2 }; }; // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef) /* Specialized opaque value conversions. */ inline Value **unwrap(LLVMValueRef *Vals) { return reinterpret_cast<Value**>(Vals); } template<typename T> inline T **unwrap(LLVMValueRef *Vals, unsigned Length) { #ifdef DEBUG for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I) cast<T>(*I); #endif (void)Length; return reinterpret_cast<T**>(Vals); } inline LLVMValueRef *wrap(const Value **Vals) { return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals)); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/NoFolder.h
//===- NoFolder.h - Constant folding helper ---------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the NoFolder class, a helper for IRBuilder. It provides // IRBuilder with a set of methods for creating unfolded constants. This is // useful for learners trying to understand how LLVM IR works, and who don't // want details to be hidden by the constant folder. For general constant // creation and folding, use ConstantExpr and the routines in // llvm/Analysis/ConstantFolding.h. // // Note: since it is not actually possible to create unfolded constants, this // class returns instructions rather than constants. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_NOFOLDER_H #define LLVM_IR_NOFOLDER_H #include "llvm/ADT/ArrayRef.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" namespace llvm { /// NoFolder - Create "constants" (actually, instructions) with no folding. class NoFolder { public: explicit NoFolder() {} //===--------------------------------------------------------------------===// // Binary Operators //===--------------------------------------------------------------------===// Instruction *CreateAdd(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { BinaryOperator *BO = BinaryOperator::CreateAdd(LHS, RHS); if (HasNUW) BO->setHasNoUnsignedWrap(); if (HasNSW) BO->setHasNoSignedWrap(); return BO; } Instruction *CreateNSWAdd(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateNSWAdd(LHS, RHS); } Instruction *CreateNUWAdd(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateNUWAdd(LHS, RHS); } Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateFAdd(LHS, RHS); } Instruction *CreateSub(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { BinaryOperator *BO = BinaryOperator::CreateSub(LHS, RHS); if (HasNUW) BO->setHasNoUnsignedWrap(); if (HasNSW) BO->setHasNoSignedWrap(); return BO; } Instruction *CreateNSWSub(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateNSWSub(LHS, RHS); } Instruction *CreateNUWSub(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateNUWSub(LHS, RHS); } Instruction *CreateFSub(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateFSub(LHS, RHS); } Instruction *CreateMul(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { BinaryOperator *BO = BinaryOperator::CreateMul(LHS, RHS); if (HasNUW) BO->setHasNoUnsignedWrap(); if (HasNSW) BO->setHasNoSignedWrap(); return BO; } Instruction *CreateNSWMul(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateNSWMul(LHS, RHS); } Instruction *CreateNUWMul(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateNUWMul(LHS, RHS); } Instruction *CreateFMul(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateFMul(LHS, RHS); } Instruction *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false) const { if (!isExact) return BinaryOperator::CreateUDiv(LHS, RHS); return BinaryOperator::CreateExactUDiv(LHS, RHS); } Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateExactUDiv(LHS, RHS); } Instruction *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false) const { if (!isExact) return BinaryOperator::CreateSDiv(LHS, RHS); return BinaryOperator::CreateExactSDiv(LHS, RHS); } Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateExactSDiv(LHS, RHS); } Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateFDiv(LHS, RHS); } Instruction *CreateURem(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateURem(LHS, RHS); } Instruction *CreateSRem(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateSRem(LHS, RHS); } Instruction *CreateFRem(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateFRem(LHS, RHS); } Instruction *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false, bool HasNSW = false) const { BinaryOperator *BO = BinaryOperator::CreateShl(LHS, RHS); if (HasNUW) BO->setHasNoUnsignedWrap(); if (HasNSW) BO->setHasNoSignedWrap(); return BO; } Instruction *CreateLShr(Constant *LHS, Constant *RHS, bool isExact = false) const { if (!isExact) return BinaryOperator::CreateLShr(LHS, RHS); return BinaryOperator::CreateExactLShr(LHS, RHS); } Instruction *CreateAShr(Constant *LHS, Constant *RHS, bool isExact = false) const { if (!isExact) return BinaryOperator::CreateAShr(LHS, RHS); return BinaryOperator::CreateExactAShr(LHS, RHS); } Instruction *CreateAnd(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateAnd(LHS, RHS); } Instruction *CreateOr(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateOr(LHS, RHS); } Instruction *CreateXor(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateXor(LHS, RHS); } Instruction *CreateBinOp(Instruction::BinaryOps Opc, Constant *LHS, Constant *RHS) const { return BinaryOperator::Create(Opc, LHS, RHS); } //===--------------------------------------------------------------------===// // Unary Operators //===--------------------------------------------------------------------===// Instruction *CreateNeg(Constant *C, bool HasNUW = false, bool HasNSW = false) const { BinaryOperator *BO = BinaryOperator::CreateNeg(C); if (HasNUW) BO->setHasNoUnsignedWrap(); if (HasNSW) BO->setHasNoSignedWrap(); return BO; } Instruction *CreateNSWNeg(Constant *C) const { return BinaryOperator::CreateNSWNeg(C); } Instruction *CreateNUWNeg(Constant *C) const { return BinaryOperator::CreateNUWNeg(C); } Instruction *CreateFNeg(Constant *C) const { return BinaryOperator::CreateFNeg(C); } Instruction *CreateNot(Constant *C) const { return BinaryOperator::CreateNot(C); } //===--------------------------------------------------------------------===// // Memory Instructions //===--------------------------------------------------------------------===// Constant *CreateGetElementPtr(Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const { return ConstantExpr::getGetElementPtr(Ty, C, IdxList); } Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const { // This form of the function only exists to avoid ambiguous overload // warnings about whether to convert Idx to ArrayRef<Constant *> or // ArrayRef<Value *>. return ConstantExpr::getGetElementPtr(Ty, C, Idx); } Instruction *CreateGetElementPtr(Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const { return GetElementPtrInst::Create(Ty, C, IdxList); } Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef<Constant *> IdxList) const { return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList); } Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const { // This form of the function only exists to avoid ambiguous overload // warnings about whether to convert Idx to ArrayRef<Constant *> or // ArrayRef<Value *>. return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx); } Instruction *CreateInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef<Value *> IdxList) const { return GetElementPtrInst::CreateInBounds(Ty, C, IdxList); } //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// Instruction *CreateCast(Instruction::CastOps Op, Constant *C, Type *DestTy) const { return CastInst::Create(Op, C, DestTy); } Instruction *CreatePointerCast(Constant *C, Type *DestTy) const { return CastInst::CreatePointerCast(C, DestTy); } Instruction *CreateIntCast(Constant *C, Type *DestTy, bool isSigned) const { return CastInst::CreateIntegerCast(C, DestTy, isSigned); } Instruction *CreateFPCast(Constant *C, Type *DestTy) const { return CastInst::CreateFPCast(C, DestTy); } Instruction *CreateBitCast(Constant *C, Type *DestTy) const { return CreateCast(Instruction::BitCast, C, DestTy); } Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const { return CreateCast(Instruction::IntToPtr, C, DestTy); } Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const { return CreateCast(Instruction::PtrToInt, C, DestTy); } Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const { return CastInst::CreateZExtOrBitCast(C, DestTy); } Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const { return CastInst::CreateSExtOrBitCast(C, DestTy); } Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const { return CastInst::CreateTruncOrBitCast(C, DestTy); } //===--------------------------------------------------------------------===// // Compare Instructions //===--------------------------------------------------------------------===// Instruction *CreateICmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { return new ICmpInst(P, LHS, RHS); } Instruction *CreateFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const { return new FCmpInst(P, LHS, RHS); } //===--------------------------------------------------------------------===// // Other Instructions //===--------------------------------------------------------------------===// Instruction *CreateSelect(Constant *C, Constant *True, Constant *False) const { return SelectInst::Create(C, True, False); } Instruction *CreateExtractElement(Constant *Vec, Constant *Idx) const { return ExtractElementInst::Create(Vec, Idx); } Instruction *CreateInsertElement(Constant *Vec, Constant *NewElt, Constant *Idx) const { return InsertElementInst::Create(Vec, NewElt, Idx); } Instruction *CreateShuffleVector(Constant *V1, Constant *V2, Constant *Mask) const { return new ShuffleVectorInst(V1, V2, Mask); } Instruction *CreateExtractValue(Constant *Agg, ArrayRef<unsigned> IdxList) const { return ExtractValueInst::Create(Agg, IdxList); } Instruction *CreateInsertValue(Constant *Agg, Constant *Val, ArrayRef<unsigned> IdxList) const { return InsertValueInst::Create(Agg, Val, IdxList); } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Comdat.h
//===-- llvm/IR/Comdat.h - Comdat definitions -------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // /// @file /// This file contains the declaration of the Comdat class, which represents a /// single COMDAT in LLVM. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_COMDAT_H #define LLVM_IR_COMDAT_H #include "llvm/ADT/StringRef.h" #include "llvm/Support/Compiler.h" namespace llvm { class raw_ostream; template <typename ValueTy> class StringMapEntry; // This is a Name X SelectionKind pair. The reason for having this be an // independent object instead of just adding the name and the SelectionKind // to a GlobalObject is that it is invalid to have two Comdats with the same // name but different SelectionKind. This structure makes that unrepresentable. class Comdat { public: enum SelectionKind { Any, ///< The linker may choose any COMDAT. ExactMatch, ///< The data referenced by the COMDAT must be the same. Largest, ///< The linker will choose the largest COMDAT. NoDuplicates, ///< No other Module may specify this COMDAT. SameSize, ///< The data referenced by the COMDAT must be the same size. }; Comdat(Comdat &&C); SelectionKind getSelectionKind() const { return SK; } void setSelectionKind(SelectionKind Val) { SK = Val; } StringRef getName() const; void print(raw_ostream &OS) const; void dump() const; private: friend class Module; Comdat(); Comdat(SelectionKind SK, StringMapEntry<Comdat> *Name); Comdat(const Comdat &) = delete; // Points to the map in Module. StringMapEntry<Comdat> *Name; SelectionKind SK; }; inline raw_ostream &operator<<(raw_ostream &OS, const Comdat &C) { C.print(OS); return OS; } } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Use.h
//===-- llvm/Use.h - Definition of the Use class ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This defines the Use class. The Use class represents the operand of an /// instruction or some other User instance which refers to a Value. The Use /// class keeps the "use list" of the referenced value up to date. /// /// Pointer tagging is used to efficiently find the User corresponding to a Use /// without having to store a User pointer in every Use. A User is preceded in /// memory by all the Uses corresponding to its operands, and the low bits of /// one of the fields (Prev) of the Use class are used to encode offsets to be /// able to find that User given a pointer to any Use. For details, see: /// /// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout /// //===----------------------------------------------------------------------===// #ifndef LLVM_IR_USE_H #define LLVM_IR_USE_H #include "llvm-c/Core.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/Compiler.h" #include <cstddef> #include <iterator> namespace llvm { class Value; class User; class Use; template <typename> struct simplify_type; // Use** is only 4-byte aligned. template <> class PointerLikeTypeTraits<Use **> { public: static inline void *getAsVoidPointer(Use **P) { return P; } static inline Use **getFromVoidPointer(void *P) { return static_cast<Use **>(P); } enum { NumLowBitsAvailable = 2 }; }; /// \brief A Use represents the edge between a Value definition and its users. /// /// This is notionally a two-dimensional linked list. It supports traversing /// all of the uses for a particular value definition. It also supports jumping /// directly to the used value when we arrive from the User's operands, and /// jumping directly to the User when we arrive from the Value's uses. /// /// The pointer to the used Value is explicit, and the pointer to the User is /// implicit. The implicit pointer is found via a waymarking algorithm /// described in the programmer's manual: /// /// http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm /// /// This is essentially the single most memory intensive object in LLVM because /// of the number of uses in the system. At the same time, the constant time /// operations it allows are essential to many optimizations having reasonable /// time complexity. class Use { public: /// \brief Provide a fast substitute to std::swap<Use> /// that also works with less standard-compliant compilers void swap(Use &RHS); // A type for the word following an array of hung-off Uses in memory, which is // a pointer back to their User with the bottom bit set. typedef PointerIntPair<User *, 1, unsigned> UserRef; private: Use(const Use &U) = delete; /// Destructor - Only for zap() ~Use() { if (Val) removeFromList(); } enum PrevPtrTag { zeroDigitTag, oneDigitTag, stopTag, fullStopTag }; /// Constructor Use(PrevPtrTag tag) : Val(nullptr) { Prev.setInt(tag); } public: operator Value *() const { return Val; } Value *get() const { return Val; } /// \brief Returns the User that contains this Use. /// /// For an instruction operand, for example, this will return the /// instruction. User *getUser() const; inline void set(Value *Val); Value *operator=(Value *RHS) { set(RHS); return RHS; } const Use &operator=(const Use &RHS) { set(RHS.Val); return *this; } Value *operator->() { return Val; } const Value *operator->() const { return Val; } Use *getNext() const { return Next; } /// \brief Return the operand # of this use in its User. unsigned getOperandNo() const; /// \brief Initializes the waymarking tags on an array of Uses. /// /// This sets up the array of Uses such that getUser() can find the User from /// any of those Uses. static Use *initTags(Use *Start, Use *Stop); /// \brief Destroys Use operands when the number of operands of /// a User changes. static void zap(Use *Start, const Use *Stop, bool del = false); private: const Use *getImpliedUser() const; Value *Val; Use *Next; PointerIntPair<Use **, 2, PrevPtrTag> Prev; void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); } void addToList(Use **List) { Next = *List; if (Next) Next->setPrev(&Next); setPrev(List); *List = this; } void removeFromList() { Use **StrippedPrev = Prev.getPointer(); *StrippedPrev = Next; if (Next) Next->setPrev(StrippedPrev); } friend class Value; }; /// \brief Allow clients to treat uses just like values when using /// casting operators. template <> struct simplify_type<Use> { typedef Value *SimpleType; static SimpleType getSimplifiedValue(Use &Val) { return Val.get(); } }; template <> struct simplify_type<const Use> { typedef /*const*/ Value *SimpleType; static SimpleType getSimplifiedValue(const Use &Val) { return Val.get(); } }; // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseRef) } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Metadata.h
//===- llvm/IR/Metadata.h - Metadata definitions ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // /// @file /// This file contains the declarations for metadata subclasses. /// They represent the different flavors of metadata that live in LLVM. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_METADATA_H #define LLVM_IR_METADATA_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/ilist_node.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/Constant.h" #include "llvm/IR/MetadataTracking.h" #include "llvm/IR/Value.h" #include "llvm/Support/ErrorHandling.h" #include <type_traits> namespace llvm { class LLVMContext; class Module; class ModuleSlotTracker; template<typename ValueSubClass, typename ItemParentClass> class SymbolTableListTraits; enum LLVMConstants : uint32_t { DEBUG_METADATA_VERSION = 3 // Current debug info version number. }; /// \brief Root of the metadata hierarchy. /// /// This is a root class for typeless data in the IR. class Metadata { friend class ReplaceableMetadataImpl; /// \brief RTTI. const unsigned char SubclassID; protected: /// \brief Active type of storage. enum StorageType { Uniqued, Distinct, Temporary }; /// \brief Storage flag for non-uniqued, otherwise unowned, metadata. unsigned Storage : 2; // TODO: expose remaining bits to subclasses. unsigned short SubclassData16; unsigned SubclassData32; public: enum MetadataKind { MDTupleKind, DILocationKind, GenericDINodeKind, DISubrangeKind, DIEnumeratorKind, DIBasicTypeKind, DIDerivedTypeKind, DICompositeTypeKind, DISubroutineTypeKind, DIFileKind, DICompileUnitKind, DISubprogramKind, DILexicalBlockKind, DILexicalBlockFileKind, DINamespaceKind, DIModuleKind, DITemplateTypeParameterKind, DITemplateValueParameterKind, DIGlobalVariableKind, DILocalVariableKind, DIExpressionKind, DIObjCPropertyKind, DIImportedEntityKind, ConstantAsMetadataKind, LocalAsMetadataKind, MDStringKind }; protected: Metadata(unsigned ID, StorageType Storage) : SubclassID(ID), Storage(Storage), SubclassData16(0), SubclassData32(0) { } ~Metadata() = default; /// \brief Default handling of a changed operand, which asserts. /// /// If subclasses pass themselves in as owners to a tracking node reference, /// they must provide an implementation of this method. void handleChangedOperand(void *, Metadata *) { llvm_unreachable("Unimplemented in Metadata subclass"); } public: unsigned getMetadataID() const { return SubclassID; } /// \brief User-friendly dump. /// /// If \c M is provided, metadata nodes will be numbered canonically; /// otherwise, pointer addresses are substituted. /// /// Note: this uses an explicit overload instead of default arguments so that /// the nullptr version is easy to call from a debugger. /// /// @{ void dump() const; void dump(const Module *M) const; /// @} /// \brief Print. /// /// Prints definition of \c this. /// /// If \c M is provided, metadata nodes will be numbered canonically; /// otherwise, pointer addresses are substituted. /// @{ void print(raw_ostream &OS, const Module *M = nullptr) const; void print(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr) const; /// @} /// \brief Print as operand. /// /// Prints reference of \c this. /// /// If \c M is provided, metadata nodes will be numbered canonically; /// otherwise, pointer addresses are substituted. /// @{ void printAsOperand(raw_ostream &OS, const Module *M = nullptr) const; void printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr) const; /// @} }; #define HANDLE_METADATA(CLASS) class CLASS; #include "llvm/IR/Metadata.def" // Provide specializations of isa so that we don't need definitions of // subclasses to see if the metadata is a subclass. #define HANDLE_METADATA_LEAF(CLASS) \ template <> struct isa_impl<CLASS, Metadata> { \ static inline bool doit(const Metadata &MD) { \ return MD.getMetadataID() == Metadata::CLASS##Kind; \ } \ }; #include "llvm/IR/Metadata.def" inline raw_ostream &operator<<(raw_ostream &OS, const Metadata &MD) { MD.print(OS); return OS; } /// \brief Metadata wrapper in the Value hierarchy. /// /// A member of the \a Value hierarchy to represent a reference to metadata. /// This allows, e.g., instrinsics to have metadata as operands. /// /// Notably, this is the only thing in either hierarchy that is allowed to /// reference \a LocalAsMetadata. class MetadataAsValue : public Value { friend class ReplaceableMetadataImpl; friend class LLVMContextImpl; Metadata *MD; MetadataAsValue(Type *Ty, Metadata *MD); ~MetadataAsValue() override; /// \brief Drop use of metadata (during teardown). void dropUse() { MD = nullptr; } public: static MetadataAsValue *get(LLVMContext &Context, Metadata *MD); static MetadataAsValue *getIfExists(LLVMContext &Context, Metadata *MD); Metadata *getMetadata() const { return MD; } static bool classof(const Value *V) { return V->getValueID() == MetadataAsValueVal; } private: void handleChangedMetadata(Metadata *MD); void track(); void untrack(); }; /// \brief Shared implementation of use-lists for replaceable metadata. /// /// Most metadata cannot be RAUW'ed. This is a shared implementation of /// use-lists and associated API for the two that support it (\a ValueAsMetadata /// and \a TempMDNode). class ReplaceableMetadataImpl { friend class MetadataTracking; public: typedef MetadataTracking::OwnerTy OwnerTy; private: LLVMContext &Context; uint64_t NextIndex; SmallDenseMap<void *, std::pair<OwnerTy, uint64_t>, 4> UseMap; public: ReplaceableMetadataImpl(LLVMContext &Context) : Context(Context), NextIndex(0) {} ~ReplaceableMetadataImpl() { assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata"); } LLVMContext &getContext() const { return Context; } /// \brief Replace all uses of this with MD. /// /// Replace all uses of this with \c MD, which is allowed to be null. void replaceAllUsesWith(Metadata *MD); /// \brief Resolve all uses of this. /// /// Resolve all uses of this, turning off RAUW permanently. If \c /// ResolveUsers, call \a MDNode::resolve() on any users whose last operand /// is resolved. void resolveAllUses(bool ResolveUsers = true); private: void addRef(void *Ref, OwnerTy Owner); void dropRef(void *Ref); void moveRef(void *Ref, void *New, const Metadata &MD); static ReplaceableMetadataImpl *get(Metadata &MD); }; /// \brief Value wrapper in the Metadata hierarchy. /// /// This is a custom value handle that allows other metadata to refer to /// classes in the Value hierarchy. /// /// Because of full uniquing support, each value is only wrapped by a single \a /// ValueAsMetadata object, so the lookup maps are far more efficient than /// those using ValueHandleBase. class ValueAsMetadata : public Metadata, ReplaceableMetadataImpl { friend class ReplaceableMetadataImpl; friend class LLVMContextImpl; Value *V; /// \brief Drop users without RAUW (during teardown). void dropUsers() { ReplaceableMetadataImpl::resolveAllUses(/* ResolveUsers */ false); } protected: ValueAsMetadata(unsigned ID, Value *V) : Metadata(ID, Uniqued), ReplaceableMetadataImpl(V->getContext()), V(V) { assert(V && "Expected valid value"); } ~ValueAsMetadata() = default; public: static ValueAsMetadata *get(Value *V); static ConstantAsMetadata *getConstant(Value *C) { return cast<ConstantAsMetadata>(get(C)); } static LocalAsMetadata *getLocal(Value *Local) { return cast<LocalAsMetadata>(get(Local)); } static ValueAsMetadata *getIfExists(Value *V); static ConstantAsMetadata *getConstantIfExists(Value *C) { return cast_or_null<ConstantAsMetadata>(getIfExists(C)); } static LocalAsMetadata *getLocalIfExists(Value *Local) { return cast_or_null<LocalAsMetadata>(getIfExists(Local)); } Value *getValue() const { return V; } Type *getType() const { return V->getType(); } LLVMContext &getContext() const { return V->getContext(); } static void handleDeletion(Value *V); static void handleRAUW(Value *From, Value *To); protected: /// \brief Handle collisions after \a Value::replaceAllUsesWith(). /// /// RAUW isn't supported directly for \a ValueAsMetadata, but if the wrapped /// \a Value gets RAUW'ed and the target already exists, this is used to /// merge the two metadata nodes. void replaceAllUsesWith(Metadata *MD) { ReplaceableMetadataImpl::replaceAllUsesWith(MD); } public: static bool classof(const Metadata *MD) { return MD->getMetadataID() == LocalAsMetadataKind || MD->getMetadataID() == ConstantAsMetadataKind; } }; class ConstantAsMetadata : public ValueAsMetadata { friend class ValueAsMetadata; ConstantAsMetadata(Constant *C) : ValueAsMetadata(ConstantAsMetadataKind, C) {} public: static ConstantAsMetadata *get(Constant *C) { return ValueAsMetadata::getConstant(C); } static ConstantAsMetadata *getIfExists(Constant *C) { return ValueAsMetadata::getConstantIfExists(C); } Constant *getValue() const { return cast<Constant>(ValueAsMetadata::getValue()); } static bool classof(const Metadata *MD) { return MD->getMetadataID() == ConstantAsMetadataKind; } }; class LocalAsMetadata : public ValueAsMetadata { friend class ValueAsMetadata; LocalAsMetadata(Value *Local) : ValueAsMetadata(LocalAsMetadataKind, Local) { assert(!isa<Constant>(Local) && "Expected local value"); } public: static LocalAsMetadata *get(Value *Local) { return ValueAsMetadata::getLocal(Local); } static LocalAsMetadata *getIfExists(Value *Local) { return ValueAsMetadata::getLocalIfExists(Local); } static bool classof(const Metadata *MD) { return MD->getMetadataID() == LocalAsMetadataKind; } }; /// \brief Transitional API for extracting constants from Metadata. /// /// This namespace contains transitional functions for metadata that points to /// \a Constants. /// /// In prehistory -- when metadata was a subclass of \a Value -- \a MDNode /// operands could refer to any \a Value. There's was a lot of code like this: /// /// \code /// MDNode *N = ...; /// auto *CI = dyn_cast<ConstantInt>(N->getOperand(2)); /// \endcode /// /// Now that \a Value and \a Metadata are in separate hierarchies, maintaining /// the semantics for \a isa(), \a cast(), \a dyn_cast() (etc.) requires three /// steps: cast in the \a Metadata hierarchy, extraction of the \a Value, and /// cast in the \a Value hierarchy. Besides creating boiler-plate, this /// requires subtle control flow changes. /// /// The end-goal is to create a new type of metadata, called (e.g.) \a MDInt, /// so that metadata can refer to numbers without traversing a bridge to the \a /// Value hierarchy. In this final state, the code above would look like this: /// /// \code /// MDNode *N = ...; /// auto *MI = dyn_cast<MDInt>(N->getOperand(2)); /// \endcode /// /// The API in this namespace supports the transition. \a MDInt doesn't exist /// yet, and even once it does, changing each metadata schema to use it is its /// own mini-project. In the meantime this API prevents us from introducing /// complex and bug-prone control flow that will disappear in the end. In /// particular, the above code looks like this: /// /// \code /// MDNode *N = ...; /// auto *CI = mdconst::dyn_extract<ConstantInt>(N->getOperand(2)); /// \endcode /// /// The full set of provided functions includes: /// /// mdconst::hasa <=> isa /// mdconst::extract <=> cast /// mdconst::extract_or_null <=> cast_or_null /// mdconst::dyn_extract <=> dyn_cast /// mdconst::dyn_extract_or_null <=> dyn_cast_or_null /// /// The target of the cast must be a subclass of \a Constant. namespace mdconst { namespace detail { template <class T> T &make(); template <class T, class Result> struct HasDereference { typedef char Yes[1]; typedef char No[2]; template <size_t N> struct SFINAE {}; template <class U, class V> static Yes &hasDereference(SFINAE<sizeof(static_cast<V>(*make<U>()))> * = 0); template <class U, class V> static No &hasDereference(...); static const bool value = sizeof(hasDereference<T, Result>(nullptr)) == sizeof(Yes); }; template <class V, class M> struct IsValidPointer { static const bool value = std::is_base_of<Constant, V>::value && HasDereference<M, const Metadata &>::value; }; template <class V, class M> struct IsValidReference { static const bool value = std::is_base_of<Constant, V>::value && std::is_convertible<M, const Metadata &>::value; }; } // end namespace detail /// \brief Check whether Metadata has a Value. /// /// As an analogue to \a isa(), check whether \c MD has an \a Value inside of /// type \c X. template <class X, class Y> inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, bool>::type hasa(Y &&MD) { assert(MD && "Null pointer sent into hasa"); if (auto *V = dyn_cast<ConstantAsMetadata>(MD)) return isa<X>(V->getValue()); return false; } template <class X, class Y> inline typename std::enable_if<detail::IsValidReference<X, Y &>::value, bool>::type hasa(Y &MD) { return hasa(&MD); } /// \brief Extract a Value from Metadata. /// /// As an analogue to \a cast(), extract the \a Value subclass \c X from \c MD. template <class X, class Y> inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type extract(Y &&MD) { return cast<X>(cast<ConstantAsMetadata>(MD)->getValue()); } template <class X, class Y> inline typename std::enable_if<detail::IsValidReference<X, Y &>::value, X *>::type extract(Y &MD) { return extract(&MD); } /// \brief Extract a Value from Metadata, allowing null. /// /// As an analogue to \a cast_or_null(), extract the \a Value subclass \c X /// from \c MD, allowing \c MD to be null. template <class X, class Y> inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type extract_or_null(Y &&MD) { if (auto *V = cast_or_null<ConstantAsMetadata>(MD)) return cast<X>(V->getValue()); return nullptr; } /// \brief Extract a Value from Metadata, if any. /// /// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X /// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a /// Value it does contain is of the wrong subclass. template <class X, class Y> inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type dyn_extract(Y &&MD) { if (auto *V = dyn_cast<ConstantAsMetadata>(MD)) return dyn_cast<X>(V->getValue()); return nullptr; } /// \brief Extract a Value from Metadata, if any, allowing null. /// /// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X /// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a /// Value it does contain is of the wrong subclass, allowing \c MD to be null. template <class X, class Y> inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type dyn_extract_or_null(Y &&MD) { if (auto *V = dyn_cast_or_null<ConstantAsMetadata>(MD)) return dyn_cast<X>(V->getValue()); return nullptr; } } // end namespace mdconst //===----------------------------------------------------------------------===// /// \brief A single uniqued string. /// /// These are used to efficiently contain a byte sequence for metadata. /// MDString is always unnamed. class MDString : public Metadata { friend class StringMapEntry<MDString>; MDString(const MDString &) = delete; MDString &operator=(MDString &&) = delete; MDString &operator=(const MDString &) = delete; StringMapEntry<MDString> *Entry; MDString() : Metadata(MDStringKind, Uniqued), Entry(nullptr) {} MDString(MDString &&) : Metadata(MDStringKind, Uniqued) {} public: static MDString *get(LLVMContext &Context, StringRef Str); static MDString *get(LLVMContext &Context, const char *Str) { return get(Context, Str ? StringRef(Str) : StringRef()); } StringRef getString() const; unsigned getLength() const { return (unsigned)getString().size(); } typedef StringRef::iterator iterator; /// \brief Pointer to the first byte of the string. iterator begin() const { return getString().begin(); } /// \brief Pointer to one byte past the end of the string. iterator end() const { return getString().end(); } const unsigned char *bytes_begin() const { return getString().bytes_begin(); } const unsigned char *bytes_end() const { return getString().bytes_end(); } /// \brief Methods for support type inquiry through isa, cast, and dyn_cast. static bool classof(const Metadata *MD) { return MD->getMetadataID() == MDStringKind; } }; /// \brief A collection of metadata nodes that might be associated with a /// memory access used by the alias-analysis infrastructure. struct AAMDNodes { explicit AAMDNodes(MDNode *T = nullptr, MDNode *S = nullptr, MDNode *N = nullptr) : TBAA(T), Scope(S), NoAlias(N) {} bool operator==(const AAMDNodes &A) const { return TBAA == A.TBAA && Scope == A.Scope && NoAlias == A.NoAlias; } bool operator!=(const AAMDNodes &A) const { return !(*this == A); } explicit operator bool() const { return TBAA || Scope || NoAlias; } /// \brief The tag for type-based alias analysis. MDNode *TBAA; /// \brief The tag for alias scope specification (used with noalias). MDNode *Scope; /// \brief The tag specifying the noalias scope. MDNode *NoAlias; }; // Specialize DenseMapInfo for AAMDNodes. template<> struct DenseMapInfo<AAMDNodes> { static inline AAMDNodes getEmptyKey() { return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(), 0, 0); } static inline AAMDNodes getTombstoneKey() { return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(), 0, 0); } static unsigned getHashValue(const AAMDNodes &Val) { return DenseMapInfo<MDNode *>::getHashValue(Val.TBAA) ^ DenseMapInfo<MDNode *>::getHashValue(Val.Scope) ^ DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias); } static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) { return LHS == RHS; } }; /// \brief Tracking metadata reference owned by Metadata. /// /// Similar to \a TrackingMDRef, but it's expected to be owned by an instance /// of \a Metadata, which has the option of registering itself for callbacks to /// re-unique itself. /// /// In particular, this is used by \a MDNode. class MDOperand { MDOperand(MDOperand &&) = delete; MDOperand(const MDOperand &) = delete; MDOperand &operator=(MDOperand &&) = delete; MDOperand &operator=(const MDOperand &) = delete; Metadata *MD; public: MDOperand() : MD(nullptr) {} ~MDOperand() { untrack(); } Metadata *get() const { return MD; } operator Metadata *() const { return get(); } Metadata *operator->() const { return get(); } Metadata &operator*() const { return *get(); } void reset() { untrack(); MD = nullptr; } void reset(Metadata *MD, Metadata *Owner) { untrack(); this->MD = MD; track(Owner); } private: void track(Metadata *Owner) { if (MD) { if (Owner) MetadataTracking::track(this, *MD, *Owner); else MetadataTracking::track(MD); } } void untrack() { assert(static_cast<void *>(this) == &MD && "Expected same address"); if (MD) MetadataTracking::untrack(MD); } }; template <> struct simplify_type<MDOperand> { typedef Metadata *SimpleType; static SimpleType getSimplifiedValue(MDOperand &MD) { return MD.get(); } }; template <> struct simplify_type<const MDOperand> { typedef Metadata *SimpleType; static SimpleType getSimplifiedValue(const MDOperand &MD) { return MD.get(); } }; /// \brief Pointer to the context, with optional RAUW support. /// /// Either a raw (non-null) pointer to the \a LLVMContext, or an owned pointer /// to \a ReplaceableMetadataImpl (which has a reference to \a LLVMContext). class ContextAndReplaceableUses { PointerUnion<LLVMContext *, ReplaceableMetadataImpl *> Ptr; ContextAndReplaceableUses() = delete; ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete; ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete; ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete; ContextAndReplaceableUses & operator=(const ContextAndReplaceableUses &) = delete; public: ContextAndReplaceableUses(LLVMContext &Context) : Ptr(&Context) {} ContextAndReplaceableUses( std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) : Ptr(ReplaceableUses.release()) { assert(getReplaceableUses() && "Expected non-null replaceable uses"); } ~ContextAndReplaceableUses() { delete getReplaceableUses(); } operator LLVMContext &() { return getContext(); } /// \brief Whether this contains RAUW support. bool hasReplaceableUses() const { return Ptr.is<ReplaceableMetadataImpl *>(); } LLVMContext &getContext() const { if (hasReplaceableUses()) return getReplaceableUses()->getContext(); return *Ptr.get<LLVMContext *>(); } ReplaceableMetadataImpl *getReplaceableUses() const { if (hasReplaceableUses()) return Ptr.get<ReplaceableMetadataImpl *>(); return nullptr; } /// \brief Assign RAUW support to this. /// /// Make this replaceable, taking ownership of \c ReplaceableUses (which must /// not be null). void makeReplaceable(std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) { assert(ReplaceableUses && "Expected non-null replaceable uses"); assert(&ReplaceableUses->getContext() == &getContext() && "Expected same context"); delete getReplaceableUses(); Ptr = ReplaceableUses.release(); } /// \brief Drop RAUW support. /// /// Cede ownership of RAUW support, returning it. std::unique_ptr<ReplaceableMetadataImpl> takeReplaceableUses() { assert(hasReplaceableUses() && "Expected to own replaceable uses"); std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses( getReplaceableUses()); Ptr = &ReplaceableUses->getContext(); return ReplaceableUses; } }; struct TempMDNodeDeleter { inline void operator()(MDNode *Node) const; }; #define HANDLE_MDNODE_LEAF(CLASS) \ typedef std::unique_ptr<CLASS, TempMDNodeDeleter> Temp##CLASS; #define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_LEAF(CLASS) #include "llvm/IR/Metadata.def" /// \brief Metadata node. /// /// Metadata nodes can be uniqued, like constants, or distinct. Temporary /// metadata nodes (with full support for RAUW) can be used to delay uniquing /// until forward references are known. The basic metadata node is an \a /// MDTuple. /// /// There is limited support for RAUW at construction time. At construction /// time, if any operand is a temporary node (or an unresolved uniqued node, /// which indicates a transitive temporary operand), the node itself will be /// unresolved. As soon as all operands become resolved, it will drop RAUW /// support permanently. /// /// If an unresolved node is part of a cycle, \a resolveCycles() needs /// to be called on some member of the cycle once all temporary nodes have been /// replaced. class MDNode : public Metadata { friend class ReplaceableMetadataImpl; friend class LLVMContextImpl; MDNode(const MDNode &) = delete; void operator=(const MDNode &) = delete; void *operator new(size_t) = delete; unsigned NumOperands; unsigned NumUnresolved; protected: ContextAndReplaceableUses Context; void *operator new(size_t Size, unsigned NumOps); void operator delete(void *Mem); /// \brief Required by std, but never called. void operator delete(void *Mem, unsigned) { //llvm_unreachable("Constructor throws?"); // HLSL Change - why, yes; yes it does (under OOM) MDNode::operator delete(Mem); } /// \brief Required by std, but never called. void operator delete(void *, unsigned, bool) { llvm_unreachable("Constructor throws?"); } MDNode(LLVMContext &Context, unsigned ID, StorageType Storage, ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None); ~MDNode() = default; void dropAllReferences(); MDOperand *mutable_begin() { return mutable_end() - NumOperands; } MDOperand *mutable_end() { return reinterpret_cast<MDOperand *>(this); } typedef iterator_range<MDOperand *> mutable_op_range; mutable_op_range mutable_operands() { return mutable_op_range(mutable_begin(), mutable_end()); } public: static inline MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs); static inline MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs); static inline MDTuple *getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs); static inline TempMDTuple getTemporary(LLVMContext &Context, ArrayRef<Metadata *> MDs); /// \brief Create a (temporary) clone of this. TempMDNode clone() const; /// \brief Deallocate a node created by getTemporary. /// /// Calls \c replaceAllUsesWith(nullptr) before deleting, so any remaining /// references will be reset. static void deleteTemporary(MDNode *N); LLVMContext &getContext() const { return Context.getContext(); } /// \brief Replace a specific operand. void replaceOperandWith(unsigned I, Metadata *New); /// \brief Check if node is fully resolved. /// /// If \a isTemporary(), this always returns \c false; if \a isDistinct(), /// this always returns \c true. /// /// If \a isUniqued(), returns \c true if this has already dropped RAUW /// support (because all operands are resolved). /// /// As forward declarations are resolved, their containers should get /// resolved automatically. However, if this (or one of its operands) is /// involved in a cycle, \a resolveCycles() needs to be called explicitly. bool isResolved() const { return !Context.hasReplaceableUses(); } bool isUniqued() const { return Storage == Uniqued; } bool isDistinct() const { return Storage == Distinct; } bool isTemporary() const { return Storage == Temporary; } /// \brief RAUW a temporary. /// /// \pre \a isTemporary() must be \c true. void replaceAllUsesWith(Metadata *MD) { assert(isTemporary() && "Expected temporary node"); assert(!isResolved() && "Expected RAUW support"); Context.getReplaceableUses()->replaceAllUsesWith(MD); } /// \brief Resolve cycles. /// /// Once all forward declarations have been resolved, force cycles to be /// resolved. /// /// \pre No operands (or operands' operands, etc.) have \a isTemporary(). void resolveCycles(); /// \brief Replace a temporary node with a permanent one. /// /// Try to create a uniqued version of \c N -- in place, if possible -- and /// return it. If \c N cannot be uniqued, return a distinct node instead. template <class T> static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type replaceWithPermanent(std::unique_ptr<T, TempMDNodeDeleter> N) { return cast<T>(N.release()->replaceWithPermanentImpl()); } /// \brief Replace a temporary node with a uniqued one. /// /// Create a uniqued version of \c N -- in place, if possible -- and return /// it. Takes ownership of the temporary node. /// /// \pre N does not self-reference. template <class T> static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type replaceWithUniqued(std::unique_ptr<T, TempMDNodeDeleter> N) { return cast<T>(N.release()->replaceWithUniquedImpl()); } /// \brief Replace a temporary node with a distinct one. /// /// Create a distinct version of \c N -- in place, if possible -- and return /// it. Takes ownership of the temporary node. template <class T> static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type replaceWithDistinct(std::unique_ptr<T, TempMDNodeDeleter> N) { return cast<T>(N.release()->replaceWithDistinctImpl()); } private: MDNode *replaceWithPermanentImpl(); MDNode *replaceWithUniquedImpl(); MDNode *replaceWithDistinctImpl(); protected: /// \brief Set an operand. /// /// Sets the operand directly, without worrying about uniquing. void setOperand(unsigned I, Metadata *New); void storeDistinctInContext(); template <class T, class StoreT> static T *storeImpl(T *N, StorageType Storage, StoreT &Store); private: void handleChangedOperand(void *Ref, Metadata *New); void resolve(); void resolveAfterOperandChange(Metadata *Old, Metadata *New); void decrementUnresolvedOperandCount(); unsigned countUnresolvedOperands(); /// \brief Mutate this to be "uniqued". /// /// Mutate this so that \a isUniqued(). /// \pre \a isTemporary(). /// \pre already added to uniquing set. void makeUniqued(); /// \brief Mutate this to be "distinct". /// /// Mutate this so that \a isDistinct(). /// \pre \a isTemporary(). void makeDistinct(); public: // HLSL Change - make deleteAsSubclass accessible void deleteAsSubclass(); private: MDNode *uniquify(); void eraseFromStore(); template <class NodeTy> struct HasCachedHash; template <class NodeTy> static void dispatchRecalculateHash(NodeTy *N, std::true_type) { N->recalculateHash(); } template <class NodeTy> static void dispatchRecalculateHash(NodeTy *N, std::false_type) {} template <class NodeTy> static void dispatchResetHash(NodeTy *N, std::true_type) { N->setHash(0); } template <class NodeTy> static void dispatchResetHash(NodeTy *N, std::false_type) {} public: typedef const MDOperand *op_iterator; typedef iterator_range<op_iterator> op_range; op_iterator op_begin() const { return const_cast<MDNode *>(this)->mutable_begin(); } op_iterator op_end() const { return const_cast<MDNode *>(this)->mutable_end(); } op_range operands() const { return op_range(op_begin(), op_end()); } const MDOperand &getOperand(unsigned I) const { assert(I < NumOperands && "Out of range"); return op_begin()[I]; } /// \brief Return number of MDNode operands. unsigned getNumOperands() const { return NumOperands; } /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Metadata *MD) { switch (MD->getMetadataID()) { default: return false; #define HANDLE_MDNODE_LEAF(CLASS) \ case CLASS##Kind: \ return true; #include "llvm/IR/Metadata.def" } } /// \brief Check whether MDNode is a vtable access. bool isTBAAVtableAccess() const; /// \brief Methods for metadata merging. static MDNode *concatenate(MDNode *A, MDNode *B); static MDNode *intersect(MDNode *A, MDNode *B); static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B); static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B); static MDNode *getMostGenericRange(MDNode *A, MDNode *B); static MDNode *getMostGenericAliasScope(MDNode *A, MDNode *B); /// \brief Methods to print body of node, ie. without the '<addr> = ' prefix void printAsBody(raw_ostream &OS, const Module *M = nullptr) const; // HLSL Change void printAsBody(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr) const; // HLSL Change }; /// \brief Tuple of metadata. /// /// This is the simple \a MDNode arbitrary tuple. Nodes are uniqued by /// default based on their operands. class MDTuple : public MDNode { friend class LLVMContextImpl; friend class MDNode; MDTuple(LLVMContext &C, StorageType Storage, unsigned Hash, ArrayRef<Metadata *> Vals) : MDNode(C, MDTupleKind, Storage, Vals) { setHash(Hash); } ~MDTuple() { dropAllReferences(); } void setHash(unsigned Hash) { SubclassData32 = Hash; } void recalculateHash(); static MDTuple *getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs, StorageType Storage, bool ShouldCreate = true); TempMDTuple cloneImpl() const { return getTemporary(getContext(), SmallVector<Metadata *, 4>(op_begin(), op_end())); } public: /// \brief Get the hash, if any. unsigned getHash() const { return SubclassData32; } static MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return getImpl(Context, MDs, Uniqued); } static MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return getImpl(Context, MDs, Uniqued, /* ShouldCreate */ false); } /// \brief Return a distinct node. /// /// Return a distinct node -- i.e., a node that is not uniqued. static MDTuple *getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return getImpl(Context, MDs, Distinct); } /// \brief Return a temporary node. /// /// For use in constructing cyclic MDNode structures. A temporary MDNode is /// not uniqued, may be RAUW'd, and must be manually deleted with /// deleteTemporary. static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return TempMDTuple(getImpl(Context, MDs, Temporary)); } /// \brief Return a (temporary) clone of this. TempMDTuple clone() const { return cloneImpl(); } static bool classof(const Metadata *MD) { return MD->getMetadataID() == MDTupleKind; } }; MDTuple *MDNode::get(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return MDTuple::get(Context, MDs); } MDTuple *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return MDTuple::getIfExists(Context, MDs); } MDTuple *MDNode::getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return MDTuple::getDistinct(Context, MDs); } TempMDTuple MDNode::getTemporary(LLVMContext &Context, ArrayRef<Metadata *> MDs) { return MDTuple::getTemporary(Context, MDs); } void TempMDNodeDeleter::operator()(MDNode *Node) const { MDNode::deleteTemporary(Node); } /// \brief Typed iterator through MDNode operands. /// /// An iterator that transforms an \a MDNode::iterator into an iterator over a /// particular Metadata subclass. template <class T> class TypedMDOperandIterator { MDNode::op_iterator I = nullptr; public: using iterator_category = std::input_iterator_tag; using value_type = T *; using difference_type = std::ptrdiff_t; using pointer = void; using reference = T *; TypedMDOperandIterator() = default; explicit TypedMDOperandIterator(MDNode::op_iterator I) : I(I) {} T *operator*() const { return cast_or_null<T>(*I); } TypedMDOperandIterator &operator++() { ++I; return *this; } TypedMDOperandIterator operator++(int) { TypedMDOperandIterator Temp(*this); ++I; return Temp; } bool operator==(const TypedMDOperandIterator &X) const { return I == X.I; } bool operator!=(const TypedMDOperandIterator &X) const { return I != X.I; } }; /// \brief Typed, array-like tuple of metadata. /// /// This is a wrapper for \a MDTuple that makes it act like an array holding a /// particular type of metadata. template <class T> class MDTupleTypedArrayWrapper { const MDTuple *N = nullptr; public: MDTupleTypedArrayWrapper() = default; MDTupleTypedArrayWrapper(const MDTuple *N) : N(N) {} template <class U> MDTupleTypedArrayWrapper( const MDTupleTypedArrayWrapper<U> &Other, typename std::enable_if<std::is_convertible<U *, T *>::value>::type * = nullptr) : N(Other.get()) {} template <class U> explicit MDTupleTypedArrayWrapper( const MDTupleTypedArrayWrapper<U> &Other, typename std::enable_if<!std::is_convertible<U *, T *>::value>::type * = nullptr) : N(Other.get()) {} explicit operator bool() const { return get(); } explicit operator MDTuple *() const { return get(); } MDTuple *get() const { return const_cast<MDTuple *>(N); } MDTuple *operator->() const { return get(); } MDTuple &operator*() const { return *get(); } // FIXME: Fix callers and remove condition on N. unsigned size() const { return N ? N->getNumOperands() : 0u; } T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); } // FIXME: Fix callers and remove condition on N. typedef TypedMDOperandIterator<T> iterator; iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); } iterator end() const { return N ? iterator(N->op_end()) : iterator(); } }; #define HANDLE_METADATA(CLASS) \ typedef MDTupleTypedArrayWrapper<CLASS> CLASS##Array; #include "llvm/IR/Metadata.def" // // /////////////////////////////////////////////////////////////////////////////// /// \brief A tuple of MDNodes. /// /// Despite its name, a NamedMDNode isn't itself an MDNode. NamedMDNodes belong /// to modules, have names, and contain lists of MDNodes. /// /// TODO: Inherit from Metadata. class NamedMDNode : public ilist_node<NamedMDNode> { friend class SymbolTableListTraits<NamedMDNode, Module>; friend struct ilist_traits<NamedMDNode>; friend class LLVMContextImpl; friend class Module; NamedMDNode(const NamedMDNode &) = delete; std::string Name; Module *Parent; void *Operands; // SmallVector<TrackingMDRef, 4> void setParent(Module *M) { Parent = M; } explicit NamedMDNode(const Twine &N); template<class T1, class T2> class op_iterator_impl { const NamedMDNode *Node; unsigned Idx; op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) { } friend class NamedMDNode; public: using iterator_category = std::bidirectional_iterator_tag; using value_type = T2; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; op_iterator_impl() : Node(nullptr), Idx(0) { } bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; } bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; } op_iterator_impl &operator++() { ++Idx; return *this; } op_iterator_impl operator++(int) { op_iterator_impl tmp(*this); operator++(); return tmp; } op_iterator_impl &operator--() { --Idx; return *this; } op_iterator_impl operator--(int) { op_iterator_impl tmp(*this); operator--(); return tmp; } T1 operator*() const { return Node->getOperand(Idx); } }; public: /// \brief Drop all references and remove the node from parent module. void eraseFromParent(); /// \brief Remove all uses and clear node vector. void dropAllReferences(); ~NamedMDNode(); /// \brief Get the module that holds this named metadata collection. inline Module *getParent() { return Parent; } inline const Module *getParent() const { return Parent; } MDNode *getOperand(unsigned i) const; unsigned getNumOperands() const; void addOperand(MDNode *M); void setOperand(unsigned I, MDNode *New); StringRef getName() const; void print(raw_ostream &ROS) const; void dump() const; // --------------------------------------------------------------------------- // Operand Iterator interface... // typedef op_iterator_impl<MDNode *, MDNode> op_iterator; op_iterator op_begin() { return op_iterator(this, 0); } op_iterator op_end() { return op_iterator(this, getNumOperands()); } typedef op_iterator_impl<const MDNode *, MDNode> const_op_iterator; const_op_iterator op_begin() const { return const_op_iterator(this, 0); } const_op_iterator op_end() const { return const_op_iterator(this, getNumOperands()); } inline iterator_range<op_iterator> operands() { return iterator_range<op_iterator>(op_begin(), op_end()); } inline iterator_range<const_op_iterator> operands() const { return iterator_range<const_op_iterator>(op_begin(), op_end()); } }; } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DebugLoc.h
//===- DebugLoc.h - Debug Location Information ------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a number of light weight data structures used // to describe and track debug location information. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DEBUGLOC_H #define LLVM_IR_DEBUGLOC_H #include "llvm/IR/TrackingMDRef.h" #include "llvm/Support/DataTypes.h" namespace llvm { class LLVMContext; class raw_ostream; class DILocation; /// \brief A debug info location. /// /// This class is a wrapper around a tracking reference to an \a DILocation /// pointer. /// /// To avoid extra includes, \a DebugLoc doubles the \a DILocation API with a /// one based on relatively opaque \a MDNode pointers. class DebugLoc { TrackingMDNodeRef Loc; public: DebugLoc() {} DebugLoc(DebugLoc &&X) : Loc(std::move(X.Loc)) {} DebugLoc(const DebugLoc &X) : Loc(X.Loc) {} DebugLoc &operator=(DebugLoc &&X) { Loc = std::move(X.Loc); return *this; } DebugLoc &operator=(const DebugLoc &X) { Loc = X.Loc; return *this; } /// \brief Construct from an \a DILocation. DebugLoc(const DILocation *L); /// \brief Construct from an \a MDNode. /// /// Note: if \c N is not an \a DILocation, a verifier check will fail, and /// accessors will crash. However, construction from other nodes is /// supported in order to handle forward references when reading textual /// IR. explicit DebugLoc(const MDNode *N); /// \brief Get the underlying \a DILocation. /// /// \pre !*this or \c isa<DILocation>(getAsMDNode()). /// @{ DILocation *get() const; operator DILocation *() const { return get(); } DILocation *operator->() const { return get(); } DILocation &operator*() const { return *get(); } /// @} /// \brief Check for null. /// /// Check for null in a way that is safe with broken debug info. Unlike /// the conversion to \c DILocation, this doesn't require that \c Loc is of /// the right type. Important for cases like \a llvm::StripDebugInfo() and /// \a Instruction::hasMetadata(). explicit operator bool() const { return Loc; } /// \brief Check whether this has a trivial destructor. bool hasTrivialDestructor() const { return Loc.hasTrivialDestructor(); } /// \brief Create a new DebugLoc. /// /// Create a new DebugLoc at the specified line/col and scope/inline. This /// forwards to \a DILocation::get(). /// /// If \c !Scope, returns a default-constructed \a DebugLoc. /// /// FIXME: Remove this. Users should use DILocation::get(). static DebugLoc get(unsigned Line, unsigned Col, const MDNode *Scope, const MDNode *InlinedAt = nullptr); unsigned getLine() const; unsigned getCol() const; MDNode *getScope() const; DILocation *getInlinedAt() const; /// \brief Get the fully inlined-at scope for a DebugLoc. /// /// Gets the inlined-at scope for a DebugLoc. MDNode *getInlinedAtScope() const; /// \brief Find the debug info location for the start of the function. /// /// Walk up the scope chain of given debug loc and find line number info /// for the function. /// /// FIXME: Remove this. Users should use DILocation/DILocalScope API to /// find the subprogram, and then DILocation::get(). DebugLoc getFnDebugLoc() const; /// \brief Return \c this as a bar \a MDNode. MDNode *getAsMDNode() const { return Loc; } bool operator==(const DebugLoc &DL) const { return Loc == DL.Loc; } bool operator!=(const DebugLoc &DL) const { return Loc != DL.Loc; } void dump() const; /// \brief prints source location /path/to/file.exe:line:col @[inlined at] void print(raw_ostream &OS) const; }; } // end namespace llvm #endif /* LLVM_SUPPORT_DEBUGLOC_H */
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/MDBuilder.h
//===---- llvm/MDBuilder.h - Builder for LLVM metadata ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the MDBuilder class, which is used as a convenient way to // create LLVM metadata with a consistent and simplified interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_MDBUILDER_H #define LLVM_IR_MDBUILDER_H #include "llvm/ADT/StringRef.h" #include "llvm/Support/DataTypes.h" #include <utility> namespace llvm { class APInt; template <typename T> class ArrayRef; class LLVMContext; class Constant; class ConstantAsMetadata; class MDNode; class MDString; class MDBuilder { LLVMContext &Context; public: MDBuilder(LLVMContext &context) : Context(context) {} /// \brief Return the given string as metadata. MDString *createString(StringRef Str); /// \brief Return the given constant as metadata. ConstantAsMetadata *createConstant(Constant *C); //===------------------------------------------------------------------===// // FPMath metadata. //===------------------------------------------------------------------===// /// \brief Return metadata with the given settings. The special value 0.0 /// for the Accuracy parameter indicates the default (maximal precision) /// setting. MDNode *createFPMath(float Accuracy); //===------------------------------------------------------------------===// // Prof metadata. //===------------------------------------------------------------------===// /// \brief Return metadata containing two branch weights. MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight); /// \brief Return metadata containing a number of branch weights. MDNode *createBranchWeights(ArrayRef<uint32_t> Weights); /// Return metadata containing the entry count for a function. MDNode *createFunctionEntryCount(uint64_t Count); //===------------------------------------------------------------------===// // Range metadata. //===------------------------------------------------------------------===// /// \brief Return metadata describing the range [Lo, Hi). MDNode *createRange(const APInt &Lo, const APInt &Hi); /// \brief Return metadata describing the range [Lo, Hi). MDNode *createRange(Constant *Lo, Constant *Hi); //===------------------------------------------------------------------===// // AA metadata. //===------------------------------------------------------------------===// protected: /// \brief Return metadata appropriate for a AA root node (scope or TBAA). /// Each returned node is distinct from all other metadata and will never /// be identified (uniqued) with anything else. MDNode *createAnonymousAARoot(StringRef Name = StringRef(), MDNode *Extra = nullptr); public: /// \brief Return metadata appropriate for a TBAA root node. Each returned /// node is distinct from all other metadata and will never be identified /// (uniqued) with anything else. MDNode *createAnonymousTBAARoot() { return createAnonymousAARoot(); } /// \brief Return metadata appropriate for an alias scope domain node. /// Each returned node is distinct from all other metadata and will never /// be identified (uniqued) with anything else. MDNode *createAnonymousAliasScopeDomain(StringRef Name = StringRef()) { return createAnonymousAARoot(Name); } /// \brief Return metadata appropriate for an alias scope root node. /// Each returned node is distinct from all other metadata and will never /// be identified (uniqued) with anything else. MDNode *createAnonymousAliasScope(MDNode *Domain, StringRef Name = StringRef()) { return createAnonymousAARoot(Name, Domain); } /// \brief Return metadata appropriate for a TBAA root node with the given /// name. This may be identified (uniqued) with other roots with the same /// name. MDNode *createTBAARoot(StringRef Name); /// \brief Return metadata appropriate for an alias scope domain node with /// the given name. This may be identified (uniqued) with other roots with /// the same name. MDNode *createAliasScopeDomain(StringRef Name); /// \brief Return metadata appropriate for an alias scope node with /// the given name. This may be identified (uniqued) with other scopes with /// the same name and domain. MDNode *createAliasScope(StringRef Name, MDNode *Domain); /// \brief Return metadata for a non-root TBAA node with the given name, /// parent in the TBAA tree, and value for 'pointsToConstantMemory'. MDNode *createTBAANode(StringRef Name, MDNode *Parent, bool isConstant = false); struct TBAAStructField { uint64_t Offset; uint64_t Size; MDNode *TBAA; TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *TBAA) : Offset(Offset), Size(Size), TBAA(TBAA) {} }; /// \brief Return metadata for a tbaa.struct node with the given /// struct field descriptions. MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields); /// \brief Return metadata for a TBAA struct node in the type DAG /// with the given name, a list of pairs (offset, field type in the type DAG). MDNode * createTBAAStructTypeNode(StringRef Name, ArrayRef<std::pair<MDNode *, uint64_t>> Fields); /// \brief Return metadata for a TBAA scalar type node with the /// given name, an offset and a parent in the TBAA type DAG. MDNode *createTBAAScalarTypeNode(StringRef Name, MDNode *Parent, uint64_t Offset = 0); /// \brief Return metadata for a TBAA tag node with the given /// base type, access type and offset relative to the base type. MDNode *createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType, uint64_t Offset, bool IsConstant = false); }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/CFG.h
//===- CFG.h - Process LLVM structures as graphs ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines specializations of GraphTraits that allow Function and // BasicBlock graphs to be treated as proper graphs for generic algorithms. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_CFG_H #define LLVM_IR_CFG_H #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/Function.h" #include "llvm/IR/InstrTypes.h" namespace llvm { //===----------------------------------------------------------------------===// // BasicBlock pred_iterator definition //===----------------------------------------------------------------------===// template <class Ptr, class USE_iterator> // Predecessor Iterator class PredIterator { public: using iterator_category = std::forward_iterator_tag; using value_type = Ptr; using difference_type = std::ptrdiff_t; using pointer = Ptr *; using reference = Ptr *; private: typedef PredIterator<Ptr, USE_iterator> Self; USE_iterator It; inline void advancePastNonTerminators() { // Loop to ignore non-terminator uses (for example BlockAddresses). while (!It.atEnd() && !isa<TerminatorInst>(*It)) ++It; } public: PredIterator() {} explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) { advancePastNonTerminators(); } inline PredIterator(Ptr *bb, bool) : It(bb->user_end()) {} inline bool operator==(const Self& x) const { return It == x.It; } inline bool operator!=(const Self& x) const { return !operator==(x); } inline reference operator*() const { assert(!It.atEnd() && "pred_iterator out of range!"); return cast<TerminatorInst>(*It)->getParent(); } inline pointer *operator->() const { return &operator*(); } inline Self& operator++() { // Preincrement assert(!It.atEnd() && "pred_iterator out of range!"); ++It; advancePastNonTerminators(); return *this; } inline Self operator++(int) { // Postincrement Self tmp = *this; ++*this; return tmp; } /// getOperandNo - Return the operand number in the predecessor's /// terminator of the successor. unsigned getOperandNo() const { return It.getOperandNo(); } /// getUse - Return the operand Use in the predecessor's terminator /// of the successor. Use &getUse() const { return It.getUse(); } }; typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator; typedef PredIterator<const BasicBlock, Value::const_user_iterator> const_pred_iterator; typedef llvm::iterator_range<pred_iterator> pred_range; typedef llvm::iterator_range<const_pred_iterator> pred_const_range; inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); } inline const_pred_iterator pred_begin(const BasicBlock *BB) { return const_pred_iterator(BB); } inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);} inline const_pred_iterator pred_end(const BasicBlock *BB) { return const_pred_iterator(BB, true); } inline bool pred_empty(const BasicBlock *BB) { return pred_begin(BB) == pred_end(BB); } inline pred_range predecessors(BasicBlock *BB) { return pred_range(pred_begin(BB), pred_end(BB)); } inline pred_const_range predecessors(const BasicBlock *BB) { return pred_const_range(pred_begin(BB), pred_end(BB)); } //===----------------------------------------------------------------------===// // BasicBlock succ_iterator definition // // /////////////////////////////////////////////////////////////////////////////// template <class Term_, class BB_> // Successor Iterator class SuccIterator { public: using iterator_category = std::random_access_iterator_tag; using value_type = BB_; using difference_type = int; using pointer = BB_ *; using reference = BB_ *; private: Term_ Term; unsigned idx; typedef SuccIterator<Term_, BB_> Self; inline bool index_is_valid(int idx) { // HLSL Change Begin return idx >= 0 && (unsigned)idx <= Term->getNumSuccessors(); // HLSL Change End } /// \brief Proxy object to allow write access in operator[] class SuccessorProxy { Self it; public: explicit SuccessorProxy(const Self &it) : it(it) {} SuccessorProxy(const SuccessorProxy&) = default; SuccessorProxy &operator=(SuccessorProxy r) { *this = reference(r); return *this; } SuccessorProxy &operator=(reference r) { it.Term->setSuccessor(it.idx, r); return *this; } operator reference() const { return *it; } }; public: explicit inline SuccIterator(Term_ T) : Term(T), idx(0) {// begin iterator } inline SuccIterator(Term_ T, bool) // end iterator : Term(T) { if (Term) idx = Term->getNumSuccessors(); else // Term == NULL happens, if a basic block is not fully constructed and // consequently getTerminator() returns NULL. In this case we construct a // SuccIterator which describes a basic block that has zero successors. // Defining SuccIterator for incomplete and malformed CFGs is especially // useful for debugging. idx = 0; } /// getSuccessorIndex - This is used to interface between code that wants to /// operate on terminator instructions directly. unsigned getSuccessorIndex() const { return idx; } inline bool operator==(const Self& x) const { return idx == x.idx; } inline bool operator!=(const Self& x) const { return !operator==(x); } inline reference operator*() const { return Term->getSuccessor(idx); } inline pointer operator->() const { return operator*(); } inline Self& operator++() { ++idx; return *this; } // Preincrement inline Self operator++(int) { // Postincrement Self tmp = *this; ++*this; return tmp; } inline Self& operator--() { --idx; return *this; } // Predecrement inline Self operator--(int) { // Postdecrement Self tmp = *this; --*this; return tmp; } inline bool operator<(const Self& x) const { assert(Term == x.Term && "Cannot compare iterators of different blocks!"); return idx < x.idx; } inline bool operator<=(const Self& x) const { assert(Term == x.Term && "Cannot compare iterators of different blocks!"); return idx <= x.idx; } inline bool operator>=(const Self& x) const { assert(Term == x.Term && "Cannot compare iterators of different blocks!"); return idx >= x.idx; } inline bool operator>(const Self& x) const { assert(Term == x.Term && "Cannot compare iterators of different blocks!"); return idx > x.idx; } inline Self& operator+=(int Right) { unsigned new_idx = idx + Right; assert(index_is_valid(new_idx) && "Iterator index out of bound"); idx = new_idx; return *this; } inline Self operator+(int Right) const { Self tmp = *this; tmp += Right; return tmp; } inline Self& operator-=(int Right) { return operator+=(-Right); } inline Self operator-(int Right) const { return operator+(-Right); } inline int operator-(const Self& x) const { assert(Term == x.Term && "Cannot work on iterators of different blocks!"); int distance = idx - x.idx; return distance; } inline SuccessorProxy operator[](int offset) { Self tmp = *this; tmp += offset; return SuccessorProxy(tmp); } /// Get the source BB of this iterator. inline BB_ *getSource() { assert(Term && "Source not available, if basic block was malformed"); return Term->getParent(); } }; typedef SuccIterator<TerminatorInst*, BasicBlock> succ_iterator; typedef SuccIterator<const TerminatorInst*, const BasicBlock> succ_const_iterator; typedef llvm::iterator_range<succ_iterator> succ_range; typedef llvm::iterator_range<succ_const_iterator> succ_const_range; inline succ_iterator succ_begin(BasicBlock *BB) { return succ_iterator(BB->getTerminator()); } inline succ_const_iterator succ_begin(const BasicBlock *BB) { return succ_const_iterator(BB->getTerminator()); } inline succ_iterator succ_end(BasicBlock *BB) { return succ_iterator(BB->getTerminator(), true); } inline succ_const_iterator succ_end(const BasicBlock *BB) { return succ_const_iterator(BB->getTerminator(), true); } inline bool succ_empty(const BasicBlock *BB) { return succ_begin(BB) == succ_end(BB); } inline succ_range successors(BasicBlock *BB) { return succ_range(succ_begin(BB), succ_end(BB)); } inline succ_const_range successors(const BasicBlock *BB) { return succ_const_range(succ_begin(BB), succ_end(BB)); } template <typename T, typename U> struct isPodLike<SuccIterator<T, U> > { static const bool value = isPodLike<T>::value; }; //===--------------------------------------------------------------------===// // GraphTraits specializations for basic block graphs (CFGs) //===--------------------------------------------------------------------===// // Provide specializations of GraphTraits to be able to treat a function as a // graph of basic blocks... template <> struct GraphTraits<BasicBlock*> { typedef BasicBlock NodeType; typedef succ_iterator ChildIteratorType; static NodeType *getEntryNode(BasicBlock *BB) { return BB; } static inline ChildIteratorType child_begin(NodeType *N) { return succ_begin(N); } static inline ChildIteratorType child_end(NodeType *N) { return succ_end(N); } }; template <> struct GraphTraits<const BasicBlock*> { typedef const BasicBlock NodeType; typedef succ_const_iterator ChildIteratorType; static NodeType *getEntryNode(const BasicBlock *BB) { return BB; } static inline ChildIteratorType child_begin(NodeType *N) { return succ_begin(N); } static inline ChildIteratorType child_end(NodeType *N) { return succ_end(N); } }; // Provide specializations of GraphTraits to be able to treat a function as a // graph of basic blocks... and to walk it in inverse order. Inverse order for // a function is considered to be when traversing the predecessor edges of a BB // instead of the successor edges. // template <> struct GraphTraits<Inverse<BasicBlock*> > { typedef BasicBlock NodeType; typedef pred_iterator ChildIteratorType; static NodeType *getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; } static inline ChildIteratorType child_begin(NodeType *N) { return pred_begin(N); } static inline ChildIteratorType child_end(NodeType *N) { return pred_end(N); } }; template <> struct GraphTraits<Inverse<const BasicBlock*> > { typedef const BasicBlock NodeType; typedef const_pred_iterator ChildIteratorType; static NodeType *getEntryNode(Inverse<const BasicBlock*> G) { return G.Graph; } static inline ChildIteratorType child_begin(NodeType *N) { return pred_begin(N); } static inline ChildIteratorType child_end(NodeType *N) { return pred_end(N); } }; //===--------------------------------------------------------------------===// // GraphTraits specializations for function basic block graphs (CFGs) //===--------------------------------------------------------------------===// // Provide specializations of GraphTraits to be able to treat a function as a // graph of basic blocks... these are the same as the basic block iterators, // except that the root node is implicitly the first node of the function. // template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> { static NodeType *getEntryNode(Function *F) { return &F->getEntryBlock(); } // nodes_iterator/begin/end - Allow iteration over all nodes in the graph typedef Function::iterator nodes_iterator; static nodes_iterator nodes_begin(Function *F) { return F->begin(); } static nodes_iterator nodes_end (Function *F) { return F->end(); } static size_t size (Function *F) { return F->size(); } }; template <> struct GraphTraits<const Function*> : public GraphTraits<const BasicBlock*> { static NodeType *getEntryNode(const Function *F) {return &F->getEntryBlock();} // nodes_iterator/begin/end - Allow iteration over all nodes in the graph typedef Function::const_iterator nodes_iterator; static nodes_iterator nodes_begin(const Function *F) { return F->begin(); } static nodes_iterator nodes_end (const Function *F) { return F->end(); } static size_t size (const Function *F) { return F->size(); } }; // Provide specializations of GraphTraits to be able to treat a function as a // graph of basic blocks... and to walk it in inverse order. Inverse order for // a function is considered to be when traversing the predecessor edges of a BB // instead of the successor edges. // template <> struct GraphTraits<Inverse<Function*> > : public GraphTraits<Inverse<BasicBlock*> > { static NodeType *getEntryNode(Inverse<Function*> G) { return &G.Graph->getEntryBlock(); } }; template <> struct GraphTraits<Inverse<const Function*> > : public GraphTraits<Inverse<const BasicBlock*> > { static NodeType *getEntryNode(Inverse<const Function *> G) { return &G.Graph->getEntryBlock(); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/OperandTraits.h
//===-- llvm/OperandTraits.h - OperandTraits class definition ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the traits classes that are handy for enforcing the correct // layout of various User subclasses. It also provides the means for accessing // the operands in the most efficient manner. // #ifndef LLVM_IR_OPERANDTRAITS_H #define LLVM_IR_OPERANDTRAITS_H #include "llvm/IR/User.h" namespace llvm { //===----------------------------------------------------------------------===// // FixedNumOperand Trait Class //===----------------------------------------------------------------------===// /// FixedNumOperandTraits - determine the allocation regime of the Use array /// when it is a prefix to the User object, and the number of Use objects is /// known at compile time. template <typename SubClass, unsigned ARITY> struct FixedNumOperandTraits { static Use *op_begin(SubClass* U) { return reinterpret_cast<Use*>(U) - ARITY; } static Use *op_end(SubClass* U) { return reinterpret_cast<Use*>(U); } static unsigned operands(const User*) { return ARITY; } }; //===----------------------------------------------------------------------===// // OptionalOperand Trait Class //===----------------------------------------------------------------------===// /// OptionalOperandTraits - when the number of operands may change at runtime. /// Naturally it may only decrease, because the allocations may not change. template <typename SubClass, unsigned ARITY = 1> struct OptionalOperandTraits : public FixedNumOperandTraits<SubClass, ARITY> { static unsigned operands(const User *U) { return U->getNumOperands(); } }; //===----------------------------------------------------------------------===// // VariadicOperand Trait Class //===----------------------------------------------------------------------===// /// VariadicOperandTraits - determine the allocation regime of the Use array /// when it is a prefix to the User object, and the number of Use objects is /// only known at allocation time. template <typename SubClass, unsigned MINARITY = 0> struct VariadicOperandTraits { static Use *op_begin(SubClass* U) { return reinterpret_cast<Use*>(U) - static_cast<User*>(U)->getNumOperands(); } static Use *op_end(SubClass* U) { return reinterpret_cast<Use*>(U); } static unsigned operands(const User *U) { return U->getNumOperands(); } }; //===----------------------------------------------------------------------===// // HungoffOperand Trait Class // // /////////////////////////////////////////////////////////////////////////////// /// HungoffOperandTraits - determine the allocation regime of the Use array /// when it is not a prefix to the User object, but allocated at an unrelated /// heap address. /// Assumes that the User subclass that is determined by this traits class /// has an OperandList member of type User::op_iterator. [Note: this is now /// trivially satisfied, because User has that member for historic reasons.] /// /// This is the traits class that is needed when the Use array must be /// resizable. template <unsigned MINARITY = 1> struct HungoffOperandTraits { static Use *op_begin(User* U) { return U->getOperandList(); } static Use *op_end(User* U) { return U->getOperandList() + U->getNumOperands(); } static unsigned operands(const User *U) { return U->getNumOperands(); } }; /// Macro for generating in-class operand accessor declarations. /// It should only be called in the public section of the interface. /// #define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS) \ public: \ inline VALUECLASS *getOperand(unsigned) const; \ inline void setOperand(unsigned, VALUECLASS*); \ inline op_iterator op_begin(); \ inline const_op_iterator op_begin() const; \ inline op_iterator op_end(); \ inline const_op_iterator op_end() const; \ protected: \ template <int> inline Use &Op(); \ template <int> inline const Use &Op() const; \ public: \ inline unsigned getNumOperands() const /// Macro for generating out-of-class operand accessor definitions #define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS) \ CLASS::op_iterator CLASS::op_begin() { \ return OperandTraits<CLASS>::op_begin(this); \ } \ CLASS::const_op_iterator CLASS::op_begin() const { \ return OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this)); \ } \ CLASS::op_iterator CLASS::op_end() { \ return OperandTraits<CLASS>::op_end(this); \ } \ CLASS::const_op_iterator CLASS::op_end() const { \ return OperandTraits<CLASS>::op_end(const_cast<CLASS*>(this)); \ } \ VALUECLASS *CLASS::getOperand(unsigned i_nocapture) const { \ assert(i_nocapture < OperandTraits<CLASS>::operands(this) \ && "getOperand() out of range!"); \ return cast_or_null<VALUECLASS>( \ OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this))[i_nocapture].get()); \ } \ void CLASS::setOperand(unsigned i_nocapture, VALUECLASS *Val_nocapture) { \ assert(i_nocapture < OperandTraits<CLASS>::operands(this) \ && "setOperand() out of range!"); \ OperandTraits<CLASS>::op_begin(this)[i_nocapture] = Val_nocapture; \ } \ unsigned CLASS::getNumOperands() const { \ return OperandTraits<CLASS>::operands(this); \ } \ template <int Idx_nocapture> Use &CLASS::Op() { \ return this->OpFrom<Idx_nocapture>(this); \ } \ template <int Idx_nocapture> const Use &CLASS::Op() const { \ return this->OpFrom<Idx_nocapture>(this); \ } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Operator.h
//===-- llvm/Operator.h - Operator utility subclass -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines various classes for working with Instructions and // ConstantExprs. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_OPERATOR_H #define LLVM_IR_OPERATOR_H #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Type.h" namespace llvm { class GetElementPtrInst; class BinaryOperator; class ConstantExpr; /// This is a utility class that provides an abstraction for the common /// functionality between Instructions and ConstantExprs. class Operator : public User { private: // The Operator class is intended to be used as a utility, and is never itself // instantiated. void *operator new(size_t, unsigned) = delete; void *operator new(size_t s) = delete; Operator() = delete; protected: // NOTE: Cannot use = delete because it's not legal to delete // an overridden method that's not deleted in the base class. Cannot leave // this unimplemented because that leads to an ODR-violation. ~Operator() override; public: /// Return the opcode for this Instruction or ConstantExpr. unsigned getOpcode() const { if (const Instruction *I = dyn_cast<Instruction>(this)) return I->getOpcode(); return cast<ConstantExpr>(this)->getOpcode(); } /// If V is an Instruction or ConstantExpr, return its opcode. /// Otherwise return UserOp1. static unsigned getOpcode(const Value *V) { if (const Instruction *I = dyn_cast<Instruction>(V)) return I->getOpcode(); if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) return CE->getOpcode(); return Instruction::UserOp1; } static inline bool classof(const Instruction *) { return true; } static inline bool classof(const ConstantExpr *) { return true; } static inline bool classof(const Value *V) { return isa<Instruction>(V) || isa<ConstantExpr>(V); } }; /// Utility class for integer arithmetic operators which may exhibit overflow - /// Add, Sub, and Mul. It does not include SDiv, despite that operator having /// the potential for overflow. class OverflowingBinaryOperator : public Operator { public: enum { NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) }; private: friend class BinaryOperator; friend class ConstantExpr; void setHasNoUnsignedWrap(bool B) { SubclassOptionalData = (SubclassOptionalData & ~NoUnsignedWrap) | (B ? NoUnsignedWrap : 0); // HLSL Change - fix bool arithmetic operator } void setHasNoSignedWrap(bool B) { SubclassOptionalData = (SubclassOptionalData & ~NoSignedWrap) | (B ? NoSignedWrap : 0); // HLSL Change - fix bool arithmetic operator } public: /// Test whether this operation is known to never /// undergo unsigned overflow, aka the nuw property. bool hasNoUnsignedWrap() const { return SubclassOptionalData & NoUnsignedWrap; } /// Test whether this operation is known to never /// undergo signed overflow, aka the nsw property. bool hasNoSignedWrap() const { return (SubclassOptionalData & NoSignedWrap) != 0; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Sub || I->getOpcode() == Instruction::Mul || I->getOpcode() == Instruction::Shl; } static inline bool classof(const ConstantExpr *CE) { return CE->getOpcode() == Instruction::Add || CE->getOpcode() == Instruction::Sub || CE->getOpcode() == Instruction::Mul || CE->getOpcode() == Instruction::Shl; } static inline bool classof(const Value *V) { return (isa<Instruction>(V) && classof(cast<Instruction>(V))) || (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V))); } }; /// A udiv or sdiv instruction, which can be marked as "exact", /// indicating that no bits are destroyed. class PossiblyExactOperator : public Operator { public: enum { IsExact = (1 << 0) }; private: friend class BinaryOperator; friend class ConstantExpr; void setIsExact(bool B) { SubclassOptionalData = (SubclassOptionalData & ~IsExact) | (B ? IsExact : 0); // HLSL Change - fix bool arithmetic operator } public: /// Test whether this division is known to be exact, with zero remainder. bool isExact() const { return SubclassOptionalData & IsExact; } static bool isPossiblyExactOpcode(unsigned OpC) { return OpC == Instruction::SDiv || OpC == Instruction::UDiv || OpC == Instruction::AShr || OpC == Instruction::LShr; } static inline bool classof(const ConstantExpr *CE) { return isPossiblyExactOpcode(CE->getOpcode()); } static inline bool classof(const Instruction *I) { return isPossiblyExactOpcode(I->getOpcode()); } static inline bool classof(const Value *V) { return (isa<Instruction>(V) && classof(cast<Instruction>(V))) || (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V))); } }; /// Convenience struct for specifying and reasoning about fast-math flags. class FastMathFlags { private: friend class FPMathOperator; unsigned Flags; FastMathFlags(unsigned F) : Flags(F) { } public: enum { UnsafeAlgebra = (1 << 0), NoNaNs = (1 << 1), NoInfs = (1 << 2), NoSignedZeros = (1 << 3), AllowReciprocal = (1 << 4) }; FastMathFlags() : Flags(0) { } /// Whether any flag is set bool any() const { return Flags != 0; } /// Set all the flags to false void clear() { Flags = 0; } /// Flag queries bool noNaNs() const { return 0 != (Flags & NoNaNs); } bool noInfs() const { return 0 != (Flags & NoInfs); } bool noSignedZeros() const { return 0 != (Flags & NoSignedZeros); } bool allowReciprocal() const { return 0 != (Flags & AllowReciprocal); } bool unsafeAlgebra() const { return 0 != (Flags & UnsafeAlgebra); } /// Flag setters void setNoNaNs() { Flags |= NoNaNs; } void setNoInfs() { Flags |= NoInfs; } void setNoSignedZeros() { Flags |= NoSignedZeros; } void setAllowReciprocal() { Flags |= AllowReciprocal; } void setUnsafeAlgebra() { Flags |= UnsafeAlgebra; setNoNaNs(); setNoInfs(); setNoSignedZeros(); setAllowReciprocal(); } // HLSL Change Begins. void setUnsafeAlgebraHLSL() { Flags |= UnsafeAlgebra; // HLSL has NaNs. setNoInfs(); setNoSignedZeros(); setAllowReciprocal(); } // HLSL Change Ends. void operator&=(const FastMathFlags &OtherFlags) { Flags &= OtherFlags.Flags; } }; /// Utility class for floating point operations which can have /// information about relaxed accuracy requirements attached to them. class FPMathOperator : public Operator { private: friend class Instruction; void setHasUnsafeAlgebra(bool B) { SubclassOptionalData = (SubclassOptionalData & ~FastMathFlags::UnsafeAlgebra) | (B ? FastMathFlags::UnsafeAlgebra : 0); // HLSL Change - fix bool arithmetic operator // Unsafe algebra implies all the others if (B) { setHasNoNaNs(true); setHasNoInfs(true); setHasNoSignedZeros(true); setHasAllowReciprocal(true); } } void setHasNoNaNs(bool B) { SubclassOptionalData = (SubclassOptionalData & ~FastMathFlags::NoNaNs) | (B ? FastMathFlags::NoNaNs : 0); // HLSL Change - fix bool arithmetic operator } void setHasNoInfs(bool B) { SubclassOptionalData = (SubclassOptionalData & ~FastMathFlags::NoInfs) | (B ? FastMathFlags::NoInfs : 0); // HLSL Change - fix bool arithmetic operator } void setHasNoSignedZeros(bool B) { SubclassOptionalData = (SubclassOptionalData & ~FastMathFlags::NoSignedZeros) | (B ? FastMathFlags::NoSignedZeros : 0); // HLSL Change - fix bool arithmetic operator } void setHasAllowReciprocal(bool B) { SubclassOptionalData = (SubclassOptionalData & ~FastMathFlags::AllowReciprocal) | (B ? FastMathFlags::AllowReciprocal : 0); // HLSL Change - fix bool arithmetic operator } /// Convenience function for setting multiple fast-math flags. /// FMF is a mask of the bits to set. void setFastMathFlags(FastMathFlags FMF) { SubclassOptionalData |= FMF.Flags; } /// Convenience function for copying all fast-math flags. /// All values in FMF are transferred to this operator. void copyFastMathFlags(FastMathFlags FMF) { SubclassOptionalData = FMF.Flags; } public: /// Test whether this operation is permitted to be /// algebraically transformed, aka the 'A' fast-math property. bool hasUnsafeAlgebra() const { return (SubclassOptionalData & FastMathFlags::UnsafeAlgebra) != 0; } /// Test whether this operation's arguments and results are to be /// treated as non-NaN, aka the 'N' fast-math property. bool hasNoNaNs() const { return (SubclassOptionalData & FastMathFlags::NoNaNs) != 0; } /// Test whether this operation's arguments and results are to be /// treated as NoN-Inf, aka the 'I' fast-math property. bool hasNoInfs() const { return (SubclassOptionalData & FastMathFlags::NoInfs) != 0; } /// Test whether this operation can treat the sign of zero /// as insignificant, aka the 'S' fast-math property. bool hasNoSignedZeros() const { return (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0; } /// Test whether this operation is permitted to use /// reciprocal instead of division, aka the 'R' fast-math property. bool hasAllowReciprocal() const { return (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0; } /// Convenience function for getting all the fast-math flags FastMathFlags getFastMathFlags() const { return FastMathFlags(SubclassOptionalData); } /// \brief Get the maximum error permitted by this operation in ULPs. An /// accuracy of 0.0 means that the operation should be performed with the /// default precision. float getFPAccuracy() const; static inline bool classof(const Instruction *I) { return I->getType()->isFPOrFPVectorTy() || I->getOpcode() == Instruction::FCmp; } static inline bool classof(const Value *V) { return isa<Instruction>(V) && classof(cast<Instruction>(V)); } }; /// A helper template for defining operators for individual opcodes. template<typename SuperClass, unsigned Opc> class ConcreteOperator : public SuperClass { public: static inline bool classof(const Instruction *I) { return I->getOpcode() == Opc; } static inline bool classof(const ConstantExpr *CE) { return CE->getOpcode() == Opc; } static inline bool classof(const Value *V) { return (isa<Instruction>(V) && classof(cast<Instruction>(V))) || (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V))); } }; class AddOperator : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> { }; class SubOperator : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> { }; class MulOperator : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> { }; class ShlOperator : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> { }; class SDivOperator : public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> { }; class UDivOperator : public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> { }; class AShrOperator : public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> { }; class LShrOperator : public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> { }; class ZExtOperator : public ConcreteOperator<Operator, Instruction::ZExt> {}; class GEPOperator : public ConcreteOperator<Operator, Instruction::GetElementPtr> { enum { IsInBounds = (1 << 0) }; friend class GetElementPtrInst; friend class ConstantExpr; void setIsInBounds(bool B) { SubclassOptionalData = (SubclassOptionalData & ~IsInBounds) | (B * IsInBounds); } public: /// Test whether this is an inbounds GEP, as defined by LangRef.html. bool isInBounds() const { return SubclassOptionalData & IsInBounds; } inline op_iterator idx_begin() { return op_begin()+1; } inline const_op_iterator idx_begin() const { return op_begin()+1; } inline op_iterator idx_end() { return op_end(); } inline const_op_iterator idx_end() const { return op_end(); } Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; // get index for modifying correct operand } /// Method to return the pointer operand as a PointerType. Type *getPointerOperandType() const { return getPointerOperand()->getType(); } Type *getSourceElementType() const; /// Method to return the address space of the pointer operand. unsigned getPointerAddressSpace() const { return getPointerOperandType()->getPointerAddressSpace(); } unsigned getNumIndices() const { // Note: always non-negative return getNumOperands() - 1; } bool hasIndices() const { return getNumOperands() > 1; } /// Return true if all of the indices of this GEP are zeros. /// If so, the result pointer and the first operand have the same /// value, just potentially different types. bool hasAllZeroIndices() const { for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) { if (ConstantInt *C = dyn_cast<ConstantInt>(I)) if (C->isZero()) continue; return false; } return true; } /// Return true if all of the indices of this GEP are constant integers. /// If so, the result pointer and the first operand have /// a constant offset between them. bool hasAllConstantIndices() const { for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) { if (!isa<ConstantInt>(I)) return false; } return true; } /// \brief Accumulate the constant address offset of this GEP if possible. /// /// This routine accepts an APInt into which it will accumulate the constant /// offset of this GEP if the GEP is in fact constant. If the GEP is not /// all-constant, it returns false and the value of the offset APInt is /// undefined (it is *not* preserved!). The APInt passed into this routine /// must be at exactly as wide as the IntPtr type for the address space of the /// base GEP pointer. bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; }; class PtrToIntOperator : public ConcreteOperator<Operator, Instruction::PtrToInt> { friend class PtrToInt; friend class ConstantExpr; public: Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; // get index for modifying correct operand } /// Method to return the pointer operand as a PointerType. Type *getPointerOperandType() const { return getPointerOperand()->getType(); } /// Method to return the address space of the pointer operand. unsigned getPointerAddressSpace() const { return cast<PointerType>(getPointerOperandType())->getAddressSpace(); } }; class BitCastOperator : public ConcreteOperator<Operator, Instruction::BitCast> { friend class BitCastInst; friend class ConstantExpr; public: Type *getSrcTy() const { return getOperand(0)->getType(); } Type *getDestTy() const { return getType(); } }; // HLSL CHANGE: Add this helper class from upstream. class AddrSpaceCastOperator : public ConcreteOperator<Operator, Instruction::AddrSpaceCast> { friend class AddrSpaceCastInst; friend class ConstantExpr; public: Value *getPointerOperand() { return getOperand(0); } const Value *getPointerOperand() const { return getOperand(0); } unsigned getSrcAddressSpace() const { return getPointerOperand()->getType()->getPointerAddressSpace(); } unsigned getDestAddressSpace() const { return getType()->getPointerAddressSpace(); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DiagnosticInfo.h
//===- llvm/Support/DiagnosticInfo.h - Diagnostic Declaration ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the different classes involved in low level diagnostics. // // Diagnostics reporting is still done as part of the LLVMContext. //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DIAGNOSTICINFO_H #define LLVM_IR_DIAGNOSTICINFO_H #include "llvm-c/Core.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/Module.h" #include "llvm/Support/Casting.h" #include <functional> namespace llvm { // Forward declarations. class DiagnosticPrinter; class Function; class Instruction; class LLVMContextImpl; class Twine; class Value; class DebugLoc; class SMDiagnostic; /// \brief Defines the different supported severity of a diagnostic. enum DiagnosticSeverity { DS_Error, DS_Warning, DS_Remark, // A note attaches additional information to one of the previous diagnostic // types. DS_Note }; /// \brief Defines the different supported kind of a diagnostic. /// This enum should be extended with a new ID for each added concrete subclass. enum DiagnosticKind { DK_Bitcode, DK_InlineAsm, DK_StackSize, DK_Linker, DK_DebugMetadataVersion, DK_SampleProfile, DK_OptimizationRemark, DK_OptimizationRemarkMissed, DK_OptimizationRemarkAnalysis, DK_OptimizationFailure, DK_MIRParser, DK_DXIL, // HLSL Change DK_FirstPluginKind }; /// \brief Get the next available kind ID for a plugin diagnostic. /// Each time this function is called, it returns a different number. /// Therefore, a plugin that wants to "identify" its own classes /// with a dynamic identifier, just have to use this method to get a new ID /// and assign it to each of its classes. /// The returned ID will be greater than or equal to DK_FirstPluginKind. /// Thus, the plugin identifiers will not conflict with the /// DiagnosticKind values. int getNextAvailablePluginDiagnosticKind(); /// \brief This is the base abstract class for diagnostic reporting in /// the backend. /// The print method must be overloaded by the subclasses to print a /// user-friendly message in the client of the backend (let us call it a /// frontend). class DiagnosticInfo { private: /// Kind defines the kind of report this is about. const /* DiagnosticKind */ int Kind; /// Severity gives the severity of the diagnostic. const DiagnosticSeverity Severity; public: DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity) : Kind(Kind), Severity(Severity) {} virtual ~DiagnosticInfo() {} /* DiagnosticKind */ int getKind() const { return Kind; } DiagnosticSeverity getSeverity() const { return Severity; } /// Print using the given \p DP a user-friendly message. /// This is the default message that will be printed to the user. /// It is used when the frontend does not directly take advantage /// of the information contained in fields of the subclasses. /// The printed message must not end with '.' nor start with a severity /// keyword. virtual void print(DiagnosticPrinter &DP) const = 0; }; typedef std::function<void(const DiagnosticInfo &)> DiagnosticHandlerFunction; /// Diagnostic information for inline asm reporting. /// This is basically a message and an optional location. class DiagnosticInfoInlineAsm : public DiagnosticInfo { private: /// Optional line information. 0 if not set. unsigned LocCookie; /// Message to be reported. const Twine &MsgStr; /// Optional origin of the problem. const Instruction *Instr; public: /// \p MsgStr is the message to be reported to the frontend. /// This class does not copy \p MsgStr, therefore the reference must be valid /// for the whole life time of the Diagnostic. DiagnosticInfoInlineAsm(const Twine &MsgStr, DiagnosticSeverity Severity = DS_Error) : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(0), MsgStr(MsgStr), Instr(nullptr) {} /// \p LocCookie if non-zero gives the line number for this report. /// \p MsgStr gives the message. /// This class does not copy \p MsgStr, therefore the reference must be valid /// for the whole life time of the Diagnostic. DiagnosticInfoInlineAsm(unsigned LocCookie, const Twine &MsgStr, DiagnosticSeverity Severity = DS_Error) : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(LocCookie), MsgStr(MsgStr), Instr(nullptr) {} /// \p Instr gives the original instruction that triggered the diagnostic. /// \p MsgStr gives the message. /// This class does not copy \p MsgStr, therefore the reference must be valid /// for the whole life time of the Diagnostic. /// Same for \p I. DiagnosticInfoInlineAsm(const Instruction &I, const Twine &MsgStr, DiagnosticSeverity Severity = DS_Error); unsigned getLocCookie() const { return LocCookie; } const Twine &getMsgStr() const { return MsgStr; } const Instruction *getInstruction() const { return Instr; } /// \see DiagnosticInfo::print. void print(DiagnosticPrinter &DP) const override; static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_InlineAsm; } }; /// Diagnostic information for stack size reporting. /// This is basically a function and a size. class DiagnosticInfoStackSize : public DiagnosticInfo { private: /// The function that is concerned by this stack size diagnostic. const Function &Fn; /// The computed stack size. unsigned StackSize; public: /// \p The function that is concerned by this stack size diagnostic. /// \p The computed stack size. DiagnosticInfoStackSize(const Function &Fn, unsigned StackSize, DiagnosticSeverity Severity = DS_Warning) : DiagnosticInfo(DK_StackSize, Severity), Fn(Fn), StackSize(StackSize) {} const Function &getFunction() const { return Fn; } unsigned getStackSize() const { return StackSize; } /// \see DiagnosticInfo::print. void print(DiagnosticPrinter &DP) const override; static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_StackSize; } }; /// Diagnostic information for debug metadata version reporting. /// This is basically a module and a version. class DiagnosticInfoDebugMetadataVersion : public DiagnosticInfo { private: /// The module that is concerned by this debug metadata version diagnostic. const Module &M; /// The actual metadata version. unsigned MetadataVersion; public: /// \p The module that is concerned by this debug metadata version diagnostic. /// \p The actual metadata version. DiagnosticInfoDebugMetadataVersion(const Module &M, unsigned MetadataVersion, DiagnosticSeverity Severity = DS_Warning) : DiagnosticInfo(DK_DebugMetadataVersion, Severity), M(M), MetadataVersion(MetadataVersion) {} const Module &getModule() const { return M; } unsigned getMetadataVersion() const { return MetadataVersion; } /// \see DiagnosticInfo::print. void print(DiagnosticPrinter &DP) const override; static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_DebugMetadataVersion; } }; /// Diagnostic information for the sample profiler. class DiagnosticInfoSampleProfile : public DiagnosticInfo { public: DiagnosticInfoSampleProfile(const char *FileName, unsigned LineNum, const Twine &Msg, DiagnosticSeverity Severity = DS_Error) : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName), LineNum(LineNum), Msg(Msg) {} DiagnosticInfoSampleProfile(const char *FileName, const Twine &Msg, DiagnosticSeverity Severity = DS_Error) : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName), LineNum(0), Msg(Msg) {} DiagnosticInfoSampleProfile(const Twine &Msg, DiagnosticSeverity Severity = DS_Error) : DiagnosticInfo(DK_SampleProfile, Severity), FileName(nullptr), LineNum(0), Msg(Msg) {} /// \see DiagnosticInfo::print. void print(DiagnosticPrinter &DP) const override; static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_SampleProfile; } const char *getFileName() const { return FileName; } unsigned getLineNum() const { return LineNum; } const Twine &getMsg() const { return Msg; } private: /// Name of the input file associated with this diagnostic. const char *FileName; /// Line number where the diagnostic occurred. If 0, no line number will /// be emitted in the message. unsigned LineNum; /// Message to report. const Twine &Msg; }; /// Common features for diagnostics dealing with optimization remarks. class DiagnosticInfoOptimizationBase : public DiagnosticInfo { public: /// \p PassName is the name of the pass emitting this diagnostic. /// \p Fn is the function where the diagnostic is being emitted. \p DLoc is /// the location information to use in the diagnostic. If line table /// information is available, the diagnostic will include the source code /// location. \p Msg is the message to show. Note that this class does not /// copy this message, so this reference must be valid for the whole life time /// of the diagnostic. DiagnosticInfoOptimizationBase(enum DiagnosticKind Kind, enum DiagnosticSeverity Severity, const char *PassName, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg) : DiagnosticInfo(Kind, Severity), PassName(PassName), Fn(Fn), DLoc(DLoc), Msg(Msg) {} /// \see DiagnosticInfo::print. void print(DiagnosticPrinter &DP) const override; static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_OptimizationRemark; } /// Return true if this optimization remark is enabled by one of /// of the LLVM command line flags (-pass-remarks, -pass-remarks-missed, /// or -pass-remarks-analysis). Note that this only handles the LLVM /// flags. We cannot access Clang flags from here (they are handled /// in BackendConsumer::OptimizationRemarkHandler). virtual bool isEnabled() const = 0; /// Return true if location information is available for this diagnostic. bool isLocationAvailable() const; /// Return a string with the location information for this diagnostic /// in the format "file:line:col". If location information is not available, /// it returns "<unknown>:0:0". const std::string getLocationStr() const; /// Return location information for this diagnostic in three parts: /// the source file name, line number and column. void getLocation(StringRef *Filename, unsigned *Line, unsigned *Column) const; StringRef getPassName() const { return PassName; } const Function &getFunction() const { return Fn; } const DebugLoc &getDebugLoc() const { return DLoc; } const Twine &getMsg() const { return Msg; } private: /// Name of the pass that triggers this report. If this matches the /// regular expression given in -Rpass=regexp, then the remark will /// be emitted. const char *PassName; /// Function where this diagnostic is triggered. const Function &Fn; /// Debug location where this diagnostic is triggered. DebugLoc DLoc; /// Message to report. const Twine &Msg; }; /// Diagnostic information for applied optimization remarks. class DiagnosticInfoOptimizationRemark : public DiagnosticInfoOptimizationBase { public: /// \p PassName is the name of the pass emitting this diagnostic. If /// this name matches the regular expression given in -Rpass=, then the /// diagnostic will be emitted. \p Fn is the function where the diagnostic /// is being emitted. \p DLoc is the location information to use in the /// diagnostic. If line table information is available, the diagnostic /// will include the source code location. \p Msg is the message to show. /// Note that this class does not copy this message, so this reference /// must be valid for the whole life time of the diagnostic. DiagnosticInfoOptimizationRemark(const char *PassName, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg) : DiagnosticInfoOptimizationBase(DK_OptimizationRemark, DS_Remark, PassName, Fn, DLoc, Msg) {} static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_OptimizationRemark; } /// \see DiagnosticInfoOptimizationBase::isEnabled. bool isEnabled() const override; }; /// Diagnostic information for missed-optimization remarks. class DiagnosticInfoOptimizationRemarkMissed : public DiagnosticInfoOptimizationBase { public: /// \p PassName is the name of the pass emitting this diagnostic. If /// this name matches the regular expression given in -Rpass-missed=, then the /// diagnostic will be emitted. \p Fn is the function where the diagnostic /// is being emitted. \p DLoc is the location information to use in the /// diagnostic. If line table information is available, the diagnostic /// will include the source code location. \p Msg is the message to show. /// Note that this class does not copy this message, so this reference /// must be valid for the whole life time of the diagnostic. DiagnosticInfoOptimizationRemarkMissed(const char *PassName, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg) : DiagnosticInfoOptimizationBase(DK_OptimizationRemarkMissed, DS_Remark, PassName, Fn, DLoc, Msg) {} static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_OptimizationRemarkMissed; } /// \see DiagnosticInfoOptimizationBase::isEnabled. bool isEnabled() const override; }; /// Diagnostic information for optimization analysis remarks. class DiagnosticInfoOptimizationRemarkAnalysis : public DiagnosticInfoOptimizationBase { public: /// \p PassName is the name of the pass emitting this diagnostic. If /// this name matches the regular expression given in -Rpass-analysis=, then /// the diagnostic will be emitted. \p Fn is the function where the diagnostic /// is being emitted. \p DLoc is the location information to use in the /// diagnostic. If line table information is available, the diagnostic will /// include the source code location. \p Msg is the message to show. Note that /// this class does not copy this message, so this reference must be valid for /// the whole life time of the diagnostic. DiagnosticInfoOptimizationRemarkAnalysis(const char *PassName, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg) : DiagnosticInfoOptimizationBase(DK_OptimizationRemarkAnalysis, DS_Remark, PassName, Fn, DLoc, Msg) {} static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_OptimizationRemarkAnalysis; } /// \see DiagnosticInfoOptimizationBase::isEnabled. bool isEnabled() const override; }; /// Diagnostic information for machine IR parser. class DiagnosticInfoMIRParser : public DiagnosticInfo { const SMDiagnostic &Diagnostic; public: DiagnosticInfoMIRParser(DiagnosticSeverity Severity, const SMDiagnostic &Diagnostic) : DiagnosticInfo(DK_MIRParser, Severity), Diagnostic(Diagnostic) {} const SMDiagnostic &getDiagnostic() const { return Diagnostic; } void print(DiagnosticPrinter &DP) const override; static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_MIRParser; } }; // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DiagnosticInfo, LLVMDiagnosticInfoRef) /// Emit an optimization-applied message. \p PassName is the name of the pass /// emitting the message. If -Rpass= is given and \p PassName matches the /// regular expression in -Rpass, then the remark will be emitted. \p Fn is /// the function triggering the remark, \p DLoc is the debug location where /// the diagnostic is generated. \p Msg is the message string to use. void emitOptimizationRemark(LLVMContext &Ctx, const char *PassName, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg); /// Emit an optimization-missed message. \p PassName is the name of the /// pass emitting the message. If -Rpass-missed= is given and \p PassName /// matches the regular expression in -Rpass, then the remark will be /// emitted. \p Fn is the function triggering the remark, \p DLoc is the /// debug location where the diagnostic is generated. \p Msg is the /// message string to use. void emitOptimizationRemarkMissed(LLVMContext &Ctx, const char *PassName, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg); /// Emit an optimization analysis remark message. \p PassName is the name of /// the pass emitting the message. If -Rpass-analysis= is given and \p /// PassName matches the regular expression in -Rpass, then the remark will be /// emitted. \p Fn is the function triggering the remark, \p DLoc is the debug /// location where the diagnostic is generated. \p Msg is the message string /// to use. void emitOptimizationRemarkAnalysis(LLVMContext &Ctx, const char *PassName, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg); /// Diagnostic information for optimization failures. class DiagnosticInfoOptimizationFailure : public DiagnosticInfoOptimizationBase { public: /// \p Fn is the function where the diagnostic is being emitted. \p DLoc is /// the location information to use in the diagnostic. If line table /// information is available, the diagnostic will include the source code /// location. \p Msg is the message to show. Note that this class does not /// copy this message, so this reference must be valid for the whole life time /// of the diagnostic. DiagnosticInfoOptimizationFailure(const Function &Fn, const DebugLoc &DLoc, const Twine &Msg) : DiagnosticInfoOptimizationBase(DK_OptimizationFailure, DS_Warning, nullptr, Fn, DLoc, Msg) {} static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_OptimizationFailure; } /// \see DiagnosticInfoOptimizationBase::isEnabled. bool isEnabled() const override; }; /// Emit a warning when loop vectorization is specified but fails. \p Fn is the /// function triggering the warning, \p DLoc is the debug location where the /// diagnostic is generated. \p Msg is the message string to use. void emitLoopVectorizeWarning(LLVMContext &Ctx, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg); /// Emit a warning when loop interleaving is specified but fails. \p Fn is the /// function triggering the warning, \p DLoc is the debug location where the /// diagnostic is generated. \p Msg is the message string to use. void emitLoopInterleaveWarning(LLVMContext &Ctx, const Function &Fn, const DebugLoc &DLoc, const Twine &Msg); // HLSL Change start - Dxil Diagnostic Info reporter /// Diagnostic information for Dxil errors /// Intended for use in post-codegen passes /// where location information is stored in metadata class DiagnosticInfoDxil : public DiagnosticInfo { private: // Function const Function *Func; bool HasLocation = false; unsigned Line = 0; unsigned Column = 0; StringRef FileName; /// Message to be reported. const Twine &MsgStr; public: /// This class does not copy \p MsgStr, therefore the reference must be valid /// for the whole life time of the Diagnostic. /// DiagnosticInfoDxil(const Function *F, const Twine &MsgStr, DiagnosticSeverity Severity) : DiagnosticInfo(DK_DXIL, Severity), Func(F), MsgStr(MsgStr) {} DiagnosticInfoDxil(const Function *F, const DILocation *Loc, const Twine &MsgStr, DiagnosticSeverity Severity = DS_Error); DiagnosticInfoDxil(const Function *F, const DIGlobalVariable *DGV, const Twine &MsgStr, DiagnosticSeverity Severity = DS_Error); const Function *getFunction() const { return Func; } const Twine &getMsgStr() const { return MsgStr; } bool hasLocation() const { return HasLocation; } unsigned getLine() const { return Line; } unsigned getColumn() const { return Column; } StringRef getFileName() const { return FileName; } /// \see DiagnosticInfo::print. void print(DiagnosticPrinter &DP) const override; static bool classof(const DiagnosticInfo *DI) { return DI->getKind() == DK_DXIL; } }; // HLSL Change end - Dxil Diagnostic Info reporter } // End namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/GlobalValue.h
//===-- llvm/GlobalValue.h - Class to represent a global value --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a common base class of all globally definable objects. As such, // it is subclassed by GlobalVariable, GlobalAlias and by Function. This is // used because you can do certain things with these global objects that you // can't do to anything else. For example, use the address of one as a // constant. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_GLOBALVALUE_H #define LLVM_IR_GLOBALVALUE_H #include "llvm/IR/Constant.h" #include "llvm/IR/DerivedTypes.h" #include <system_error> namespace llvm { class Comdat; class PointerType; class Module; namespace Intrinsic { enum ID : unsigned; } class GlobalValue : public Constant { GlobalValue(const GlobalValue &) = delete; public: /// @brief An enumeration for the kinds of linkage for global values. enum LinkageTypes { ExternalLinkage = 0,///< Externally visible function AvailableExternallyLinkage, ///< Available for inspection, not emission. LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline) LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent. WeakAnyLinkage, ///< Keep one copy of named function when linking (weak) WeakODRLinkage, ///< Same, but only replaced by something equivalent. AppendingLinkage, ///< Special purpose, only applies to global arrays InternalLinkage, ///< Rename collisions when linking (static functions). PrivateLinkage, ///< Like Internal, but omit from symbol table. ExternalWeakLinkage,///< ExternalWeak linkage description. CommonLinkage ///< Tentative definitions. }; /// @brief An enumeration for the kinds of visibility of global values. enum VisibilityTypes { DefaultVisibility = 0, ///< The GV is visible HiddenVisibility, ///< The GV is hidden ProtectedVisibility ///< The GV is protected }; /// @brief Storage classes of global values for PE targets. enum DLLStorageClassTypes { DefaultStorageClass = 0, DLLImportStorageClass = 1, ///< Function to be imported from DLL DLLExportStorageClass = 2 ///< Function to be accessible from DLL. }; protected: GlobalValue(PointerType *Ty, ValueTy VTy, Use *Ops, unsigned NumOps, LinkageTypes Linkage, const Twine &Name) : Constant(Ty, VTy, Ops, NumOps), Linkage(Linkage), Visibility(DefaultVisibility), UnnamedAddr(0), DllStorageClass(DefaultStorageClass), ThreadLocal(NotThreadLocal), IntID((Intrinsic::ID)0U), Parent(nullptr) { setName(Name); } // Note: VC++ treats enums as signed, so an extra bit is required to prevent // Linkage and Visibility from turning into negative values. LinkageTypes Linkage : 5; // The linkage of this global unsigned Visibility : 2; // The visibility style of this global unsigned UnnamedAddr : 1; // This value's address is not significant unsigned DllStorageClass : 2; // DLL storage class unsigned ThreadLocal : 3; // Is this symbol "Thread Local", if so, what is // the desired model? static const unsigned GlobalValueSubClassDataBits = 19; private: // Give subclasses access to what otherwise would be wasted padding. // (19 + 3 + 2 + 1 + 2 + 5) == 32. unsigned SubClassData : GlobalValueSubClassDataBits; friend class Constant; void destroyConstantImpl(); Value *handleOperandChangeImpl(Value *From, Value *To, Use *U); protected: /// \brief The intrinsic ID for this subclass (which must be a Function). /// /// This member is defined by this class, but not used for anything. /// Subclasses can use it to store their intrinsic ID, if they have one. /// /// This is stored here to save space in Function on 64-bit hosts. Intrinsic::ID IntID; unsigned getGlobalValueSubClassData() const { return SubClassData; } void setGlobalValueSubClassData(unsigned V) { assert(V < (1 << GlobalValueSubClassDataBits) && "It will not fit"); SubClassData = V; } Module *Parent; // The containing module. public: enum ThreadLocalMode { NotThreadLocal = 0, GeneralDynamicTLSModel, LocalDynamicTLSModel, InitialExecTLSModel, LocalExecTLSModel }; ~GlobalValue() override { removeDeadConstantUsers(); // remove any dead constants using this. } unsigned getAlignment() const; bool hasUnnamedAddr() const { return UnnamedAddr; } void setUnnamedAddr(bool Val) { UnnamedAddr = Val; } bool hasComdat() const { return getComdat() != nullptr; } Comdat *getComdat(); const Comdat *getComdat() const { return const_cast<GlobalValue *>(this)->getComdat(); } VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); } bool hasDefaultVisibility() const { return Visibility == DefaultVisibility; } bool hasHiddenVisibility() const { return Visibility == HiddenVisibility; } bool hasProtectedVisibility() const { return Visibility == ProtectedVisibility; } void setVisibility(VisibilityTypes V) { assert((!hasLocalLinkage() || V == DefaultVisibility) && "local linkage requires default visibility"); Visibility = V; } /// If the value is "Thread Local", its value isn't shared by the threads. bool isThreadLocal() const { return getThreadLocalMode() != NotThreadLocal; } void setThreadLocal(bool Val) { setThreadLocalMode(Val ? GeneralDynamicTLSModel : NotThreadLocal); } void setThreadLocalMode(ThreadLocalMode Val) { assert(Val == NotThreadLocal || getValueID() != Value::FunctionVal); ThreadLocal = Val; } ThreadLocalMode getThreadLocalMode() const { return static_cast<ThreadLocalMode>(ThreadLocal); } DLLStorageClassTypes getDLLStorageClass() const { return DLLStorageClassTypes(DllStorageClass); } bool hasDLLImportStorageClass() const { return DllStorageClass == DLLImportStorageClass; } bool hasDLLExportStorageClass() const { return DllStorageClass == DLLExportStorageClass; } void setDLLStorageClass(DLLStorageClassTypes C) { DllStorageClass = C; } bool hasSection() const { return !StringRef(getSection()).empty(); } // It is unfortunate that we have to use "char *" in here since this is // always non NULL, but: // * The C API expects a null terminated string, so we cannot use StringRef. // * The C API expects us to own it, so we cannot use a std:string. // * For GlobalAliases we can fail to find the section and we have to // return "", so we cannot use a "const std::string &". const char *getSection() const; /// Global values are always pointers. PointerType *getType() const { return cast<PointerType>(User::getType()); } Type *getValueType() const { return getType()->getElementType(); } static LinkageTypes getLinkOnceLinkage(bool ODR) { return ODR ? LinkOnceODRLinkage : LinkOnceAnyLinkage; } static LinkageTypes getWeakLinkage(bool ODR) { return ODR ? WeakODRLinkage : WeakAnyLinkage; } static bool isExternalLinkage(LinkageTypes Linkage) { return Linkage == ExternalLinkage; } static bool isAvailableExternallyLinkage(LinkageTypes Linkage) { return Linkage == AvailableExternallyLinkage; } static bool isLinkOnceODRLinkage(LinkageTypes Linkage) { return Linkage == LinkOnceODRLinkage; } static bool isLinkOnceLinkage(LinkageTypes Linkage) { return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage; } static bool isWeakAnyLinkage(LinkageTypes Linkage) { return Linkage == WeakAnyLinkage; } static bool isWeakODRLinkage(LinkageTypes Linkage) { return Linkage == WeakODRLinkage; } static bool isWeakLinkage(LinkageTypes Linkage) { return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage); } static bool isAppendingLinkage(LinkageTypes Linkage) { return Linkage == AppendingLinkage; } static bool isInternalLinkage(LinkageTypes Linkage) { return Linkage == InternalLinkage; } static bool isPrivateLinkage(LinkageTypes Linkage) { return Linkage == PrivateLinkage; } static bool isLocalLinkage(LinkageTypes Linkage) { return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage); } static bool isExternalWeakLinkage(LinkageTypes Linkage) { return Linkage == ExternalWeakLinkage; } static bool isCommonLinkage(LinkageTypes Linkage) { return Linkage == CommonLinkage; } /// Whether the definition of this global may be discarded if it is not used /// in its compilation unit. static bool isDiscardableIfUnused(LinkageTypes Linkage) { return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage); } /// Whether the definition of this global may be replaced by something /// non-equivalent at link time. For example, if a function has weak linkage /// then the code defining it may be replaced by different code. static bool mayBeOverridden(LinkageTypes Linkage) { return Linkage == WeakAnyLinkage || Linkage == LinkOnceAnyLinkage || Linkage == CommonLinkage || Linkage == ExternalWeakLinkage; } /// Whether the definition of this global may be replaced at link time. NB: /// Using this method outside of the code generators is almost always a /// mistake: when working at the IR level use mayBeOverridden instead as it /// knows about ODR semantics. static bool isWeakForLinker(LinkageTypes Linkage) { return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage || Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage || Linkage == CommonLinkage || Linkage == ExternalWeakLinkage; } bool hasExternalLinkage() const { return isExternalLinkage(Linkage); } bool hasAvailableExternallyLinkage() const { return isAvailableExternallyLinkage(Linkage); } bool hasLinkOnceLinkage() const { return isLinkOnceLinkage(Linkage); } bool hasLinkOnceODRLinkage() const { return isLinkOnceODRLinkage(Linkage); } bool hasWeakLinkage() const { return isWeakLinkage(Linkage); } bool hasWeakAnyLinkage() const { return isWeakAnyLinkage(Linkage); } bool hasWeakODRLinkage() const { return isWeakODRLinkage(Linkage); } bool hasAppendingLinkage() const { return isAppendingLinkage(Linkage); } bool hasInternalLinkage() const { return isInternalLinkage(Linkage); } bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); } bool hasLocalLinkage() const { return isLocalLinkage(Linkage); } bool hasExternalWeakLinkage() const { return isExternalWeakLinkage(Linkage); } bool hasCommonLinkage() const { return isCommonLinkage(Linkage); } void setLinkage(LinkageTypes LT) { if (isLocalLinkage(LT)) Visibility = DefaultVisibility; Linkage = LT; } LinkageTypes getLinkage() const { return Linkage; } bool isDiscardableIfUnused() const { return isDiscardableIfUnused(Linkage); } bool mayBeOverridden() const { return mayBeOverridden(Linkage); } bool isWeakForLinker() const { return isWeakForLinker(Linkage); } /// Copy all additional attributes (those not needed to create a GlobalValue) /// from the GlobalValue Src to this one. virtual void copyAttributesFrom(const GlobalValue *Src); /// If special LLVM prefix that is used to inform the asm printer to not emit /// usual symbol prefix before the symbol name is used then return linkage /// name after skipping this special LLVM prefix. static StringRef getRealLinkageName(StringRef Name) { if (!Name.empty() && Name[0] == '\1') return Name.substr(1); return Name; } /// @name Materialization /// Materialization is used to construct functions only as they're needed. This /// is useful to reduce memory usage in LLVM or parsing work done by the /// BitcodeReader to load the Module. /// @{ /// If this function's Module is being lazily streamed in functions from disk /// or some other source, this method can be used to check to see if the /// function has been read in yet or not. bool isMaterializable() const; /// Returns true if this function was loaded from a GVMaterializer that's /// still attached to its Module and that knows how to dematerialize the /// function. bool isDematerializable() const; /// Make sure this GlobalValue is fully read. If the module is corrupt, this /// returns true and fills in the optional string with information about the /// problem. If successful, this returns false. std::error_code materialize(); /// If this GlobalValue is read in, and if the GVMaterializer supports it, /// release the memory for the function, and set it up to be materialized /// lazily. If !isDematerializable(), this method is a noop. void dematerialize(); /// @} /// Return true if the primary definition of this global value is outside of /// the current translation unit. bool isDeclaration() const; bool isDeclarationForLinker() const { if (hasAvailableExternallyLinkage()) return true; return isDeclaration(); } /// Returns true if this global's definition will be the one chosen by the /// linker. bool isStrongDefinitionForLinker() const { return !(isDeclarationForLinker() || isWeakForLinker()); } /// This method unlinks 'this' from the containing module, but does not delete /// it. virtual void removeFromParent() = 0; /// This method unlinks 'this' from the containing module and deletes it. virtual void eraseFromParent() = 0; /// Get the module that this global value is contained inside of... Module *getParent() { return Parent; } const Module *getParent() const { return Parent; } // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Value *V) { return V->getValueID() == Value::FunctionVal || V->getValueID() == Value::GlobalVariableVal || V->getValueID() == Value::GlobalAliasVal; } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/IRPrintingPasses.h
//===- IRPrintingPasses.h - Passes to print out IR constructs ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This file defines passes to print out IR in various granularities. The /// PrintModulePass pass simply prints out the entire module when it is /// executed. The PrintFunctionPass class is designed to be pipelined with /// other FunctionPass's, and prints out the functions of the module as they /// are processed. /// //===----------------------------------------------------------------------===// #ifndef LLVM_IR_IRPRINTINGPASSES_H #define LLVM_IR_IRPRINTINGPASSES_H #include "llvm/ADT/StringRef.h" #include <string> namespace llvm { class BasicBlockPass; class Function; class FunctionPass; class Module; class ModulePass; class PreservedAnalyses; class raw_ostream; /// \brief Create and return a pass that writes the module to the specified /// \c raw_ostream. ModulePass *createPrintModulePass(raw_ostream &OS, const std::string &Banner = "", bool ShouldPreserveUseListOrder = false); /// \brief Create and return a pass that prints functions to the specified /// \c raw_ostream as they are processed. FunctionPass *createPrintFunctionPass(raw_ostream &OS, const std::string &Banner = ""); /// \brief Create and return a pass that writes the BB to the specified /// \c raw_ostream. BasicBlockPass *createPrintBasicBlockPass(raw_ostream &OS, const std::string &Banner = ""); /// \brief Pass for printing a Module as LLVM's text IR assembly. /// /// Note: This pass is for use with the new pass manager. Use the create...Pass /// functions above to create passes for use with the legacy pass manager. class PrintModulePass { raw_ostream &OS; std::string Banner; bool ShouldPreserveUseListOrder; public: PrintModulePass(); PrintModulePass(raw_ostream &OS, const std::string &Banner = "", bool ShouldPreserveUseListOrder = false); PreservedAnalyses run(Module &M); static StringRef name() { return "PrintModulePass"; } }; /// \brief Pass for printing a Function as LLVM's text IR assembly. /// /// Note: This pass is for use with the new pass manager. Use the create...Pass /// functions above to create passes for use with the legacy pass manager. class PrintFunctionPass { raw_ostream &OS; std::string Banner; public: PrintFunctionPass(); PrintFunctionPass(raw_ostream &OS, const std::string &Banner = ""); PreservedAnalyses run(Function &F); static StringRef name() { return "PrintFunctionPass"; } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DebugInfo.h
//===- DebugInfo.h - Debug Information Helpers ------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a bunch of datatypes that are useful for creating and // walking debug info in LLVM IR form. They essentially provide wrappers around // the information in the global variables that's needed when constructing the // DWARF information. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DEBUGINFO_H #define LLVM_IR_DEBUGINFO_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/ErrorHandling.h" #include <iterator> namespace llvm { class Module; class DbgDeclareInst; class DbgValueInst; /// \brief Maps from type identifier to the actual MDNode. typedef DenseMap<const MDString *, DIType *> DITypeIdentifierMap; /// \brief Find subprogram that is enclosing this scope. DISubprogram *getDISubprogram(const MDNode *Scope); /// \brief Find debug info for a given function. /// /// \returns a valid subprogram, if found. Otherwise, return \c nullptr. DISubprogram *getDISubprogram(const Function *F); /// \brief Find underlying composite type. DICompositeTypeBase *getDICompositeType(DIType *T); /// \brief Generate map by visiting all retained types. DITypeIdentifierMap generateDITypeIdentifierMap(const NamedMDNode *CU_Nodes); /// \brief Strip debug info in the module if it exists. /// /// To do this, we remove all calls to the debugger intrinsics and any named /// metadata for debugging. We also remove debug locations for instructions. /// Return true if module is modified. bool StripDebugInfo(Module &M); bool stripDebugInfo(Function &F); /// \brief Return Debug Info Metadata Version by checking module flags. unsigned getDebugMetadataVersionFromModule(const Module &M); bool hasDebugInfo(const Module &M); // HLSL Change - Helper function to check if there's real debug info (variables, types) /// \brief Utility to find all debug info in a module. /// /// DebugInfoFinder tries to list all debug info MDNodes used in a module. To /// list debug info MDNodes used by an instruction, DebugInfoFinder uses /// processDeclare, processValue and processLocation to handle DbgDeclareInst, /// DbgValueInst and DbgLoc attached to instructions. processModule will go /// through all DICompileUnits in llvm.dbg.cu and list debug info MDNodes /// used by the CUs. class DebugInfoFinder { public: DebugInfoFinder() : TypeMapInitialized(false) {} /// \brief Process entire module and collect debug info anchors. void processModule(const Module &M); /// \brief Process DbgDeclareInst. void processDeclare(const Module &M, const DbgDeclareInst *DDI); /// \brief Process DbgValueInst. void processValue(const Module &M, const DbgValueInst *DVI); /// \brief Process debug info location. void processLocation(const Module &M, const DILocation *Loc); /// \brief Clear all lists. void reset(); // HLSL Change Begins. /// \brief Append new global variable. bool appendGlobalVariable(DIGlobalVariable *DIG); // HLSL Change Ends. private: void InitializeTypeMap(const Module &M); void processType(DIType *DT); void processSubprogram(DISubprogram *SP); void processScope(DIScope *Scope); bool addCompileUnit(DICompileUnit *CU); bool addGlobalVariable(DIGlobalVariable *DIG); bool addSubprogram(DISubprogram *SP); bool addType(DIType *DT); bool addScope(DIScope *Scope); public: typedef SmallVectorImpl<DICompileUnit *>::const_iterator compile_unit_iterator; typedef SmallVectorImpl<DISubprogram *>::const_iterator subprogram_iterator; typedef SmallVectorImpl<DIGlobalVariable *>::const_iterator global_variable_iterator; typedef SmallVectorImpl<DIType *>::const_iterator type_iterator; typedef SmallVectorImpl<DIScope *>::const_iterator scope_iterator; iterator_range<compile_unit_iterator> compile_units() const { return iterator_range<compile_unit_iterator>(CUs.begin(), CUs.end()); } iterator_range<subprogram_iterator> subprograms() const { return iterator_range<subprogram_iterator>(SPs.begin(), SPs.end()); } iterator_range<global_variable_iterator> global_variables() const { return iterator_range<global_variable_iterator>(GVs.begin(), GVs.end()); } iterator_range<type_iterator> types() const { return iterator_range<type_iterator>(TYs.begin(), TYs.end()); } iterator_range<scope_iterator> scopes() const { return iterator_range<scope_iterator>(Scopes.begin(), Scopes.end()); } unsigned compile_unit_count() const { return CUs.size(); } unsigned global_variable_count() const { return GVs.size(); } unsigned subprogram_count() const { return SPs.size(); } unsigned type_count() const { return TYs.size(); } unsigned scope_count() const { return Scopes.size(); } private: SmallVector<DICompileUnit *, 8> CUs; SmallVector<DISubprogram *, 8> SPs; SmallVector<DIGlobalVariable *, 8> GVs; SmallVector<DIType *, 8> TYs; SmallVector<DIScope *, 8> Scopes; SmallPtrSet<const MDNode *, 64> NodesSeen; DITypeIdentifierMap TypeIdentifierMap; /// \brief Specify if TypeIdentifierMap is initialized. bool TypeMapInitialized; }; DenseMap<const Function *, DISubprogram *> makeSubprogramMap(const Module &M); } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/IntrinsicInst.h
//===-- llvm/IntrinsicInst.h - Intrinsic Instruction Wrappers ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines classes that make it really easy to deal with intrinsic // functions with the isa/dyncast family of functions. In particular, this // allows you to do things like: // // if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst)) // ... MCI->getDest() ... MCI->getSource() ... // // All intrinsic function calls are instances of the call instruction, so these // are all subclasses of the CallInst class. Note that none of these classes // has state or virtual methods, which is an important part of this gross/neat // hack working. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_INTRINSICINST_H #define LLVM_IR_INTRINSICINST_H #include "llvm/IR/Constants.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/DebugInfoMetadata.h" // HLSL Change namespace llvm { /// IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic /// functions. This allows the standard isa/dyncast/cast functionality to /// work with calls to intrinsic functions. class IntrinsicInst : public CallInst { IntrinsicInst() = delete; IntrinsicInst(const IntrinsicInst&) = delete; void operator=(const IntrinsicInst&) = delete; public: /// getIntrinsicID - Return the intrinsic ID of this intrinsic. /// Intrinsic::ID getIntrinsicID() const { return getCalledFunction()->getIntrinsicID(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const CallInst *I) { if (const Function *CF = I->getCalledFunction()) return CF->isIntrinsic(); return false; } static inline bool classof(const Value *V) { return isa<CallInst>(V) && classof(cast<CallInst>(V)); } }; /// DbgInfoIntrinsic - This is the common base class for debug info intrinsics /// class DbgInfoIntrinsic : public IntrinsicInst { public: // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { switch (I->getIntrinsicID()) { case Intrinsic::dbg_declare: case Intrinsic::dbg_value: return true; default: return false; } } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } static Value *StripCast(Value *C); }; /// DbgDeclareInst - This represents the llvm.dbg.declare instruction. /// class DbgDeclareInst : public DbgInfoIntrinsic { public: Value *getAddress() const; DILocalVariable *getVariable() const { return cast<DILocalVariable>(getRawVariable()); } DIExpression *getExpression() const { return cast<DIExpression>(getRawExpression()); } void setVariable(DIVariable *v) { setArgOperand(1, MetadataAsValue::get(getContext(), v)); } // HLSL Change Metadata *getRawVariable() const { return cast<MetadataAsValue>(getArgOperand(1))->getMetadata(); } Metadata *getRawExpression() const { return cast<MetadataAsValue>(getArgOperand(2))->getMetadata(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::dbg_declare; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } }; /// DbgValueInst - This represents the llvm.dbg.value instruction. /// class DbgValueInst : public DbgInfoIntrinsic { public: const Value *getValue() const; Value *getValue(); uint64_t getOffset() const { return cast<ConstantInt>( const_cast<Value*>(getArgOperand(1)))->getZExtValue(); } DILocalVariable *getVariable() const { return cast<DILocalVariable>(getRawVariable()); } DIExpression *getExpression() const { return cast<DIExpression>(getRawExpression()); } void setVariable(DIVariable *v) { setArgOperand(2, MetadataAsValue::get(getContext(), v)); } // HLSL Change Metadata *getRawVariable() const { return cast<MetadataAsValue>(getArgOperand(2))->getMetadata(); } Metadata *getRawExpression() const { return cast<MetadataAsValue>(getArgOperand(3))->getMetadata(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::dbg_value; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } }; /// MemIntrinsic - This is the common base class for memset/memcpy/memmove. /// class MemIntrinsic : public IntrinsicInst { public: Value *getRawDest() const { return const_cast<Value*>(getArgOperand(0)); } const Use &getRawDestUse() const { return getArgOperandUse(0); } Use &getRawDestUse() { return getArgOperandUse(0); } Value *getLength() const { return const_cast<Value*>(getArgOperand(2)); } const Use &getLengthUse() const { return getArgOperandUse(2); } Use &getLengthUse() { return getArgOperandUse(2); } ConstantInt *getAlignmentCst() const { return cast<ConstantInt>(const_cast<Value*>(getArgOperand(3))); } unsigned getAlignment() const { return getAlignmentCst()->getZExtValue(); } ConstantInt *getVolatileCst() const { return cast<ConstantInt>(const_cast<Value*>(getArgOperand(4))); } bool isVolatile() const { return !getVolatileCst()->isZero(); } unsigned getDestAddressSpace() const { return cast<PointerType>(getRawDest()->getType())->getAddressSpace(); } /// getDest - This is just like getRawDest, but it strips off any cast /// instructions that feed it, giving the original input. The returned /// value is guaranteed to be a pointer. Value *getDest() const { return getRawDest()->stripPointerCasts(); } /// set* - Set the specified arguments of the instruction. /// void setDest(Value *Ptr) { assert(getRawDest()->getType() == Ptr->getType() && "setDest called with pointer of wrong type!"); setArgOperand(0, Ptr); } void setLength(Value *L) { assert(getLength()->getType() == L->getType() && "setLength called with value of wrong type!"); setArgOperand(2, L); } void setAlignment(Constant* A) { setArgOperand(3, A); } void setVolatile(Constant* V) { setArgOperand(4, V); } Type *getAlignmentType() const { return getArgOperand(3)->getType(); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { switch (I->getIntrinsicID()) { case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: return true; default: return false; } } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } }; /// MemSetInst - This class wraps the llvm.memset intrinsic. /// class MemSetInst : public MemIntrinsic { public: /// get* - Return the arguments to the instruction. /// Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); } const Use &getValueUse() const { return getArgOperandUse(1); } Use &getValueUse() { return getArgOperandUse(1); } void setValue(Value *Val) { assert(getValue()->getType() == Val->getType() && "setValue called with value of wrong type!"); setArgOperand(1, Val); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memset; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } }; /// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics. /// class MemTransferInst : public MemIntrinsic { public: /// get* - Return the arguments to the instruction. /// Value *getRawSource() const { return const_cast<Value*>(getArgOperand(1)); } const Use &getRawSourceUse() const { return getArgOperandUse(1); } Use &getRawSourceUse() { return getArgOperandUse(1); } /// getSource - This is just like getRawSource, but it strips off any cast /// instructions that feed it, giving the original input. The returned /// value is guaranteed to be a pointer. Value *getSource() const { return getRawSource()->stripPointerCasts(); } unsigned getSourceAddressSpace() const { return cast<PointerType>(getRawSource()->getType())->getAddressSpace(); } void setSource(Value *Ptr) { assert(getRawSource()->getType() == Ptr->getType() && "setSource called with pointer of wrong type!"); setArgOperand(1, Ptr); } // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memcpy || I->getIntrinsicID() == Intrinsic::memmove; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } }; /// MemCpyInst - This class wraps the llvm.memcpy intrinsic. /// class MemCpyInst : public MemTransferInst { public: // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memcpy; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } }; /// MemMoveInst - This class wraps the llvm.memmove intrinsic. /// class MemMoveInst : public MemTransferInst { public: // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memmove; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } }; /// VAStartInst - This represents the llvm.va_start intrinsic. /// class VAStartInst : public IntrinsicInst { public: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::vastart; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); } }; /// VAEndInst - This represents the llvm.va_end intrinsic. /// class VAEndInst : public IntrinsicInst { public: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::vaend; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); } }; /// VACopyInst - This represents the llvm.va_copy intrinsic. /// class VACopyInst : public IntrinsicInst { public: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::vacopy; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } Value *getDest() const { return const_cast<Value*>(getArgOperand(0)); } Value *getSrc() const { return const_cast<Value*>(getArgOperand(1)); } }; /// This represents the llvm.instrprof_increment intrinsic. class InstrProfIncrementInst : public IntrinsicInst { public: static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::instrprof_increment; } static inline bool classof(const Value *V) { return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); } GlobalVariable *getName() const { return cast<GlobalVariable>( const_cast<Value *>(getArgOperand(0))->stripPointerCasts()); } ConstantInt *getHash() const { return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1))); } ConstantInt *getNumCounters() const { return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2))); } ConstantInt *getIndex() const { return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3))); } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Module.h
//===-- llvm/Module.h - C++ class to represent a VM module ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // /// @file /// Module.h This file contains the declarations for the Module class. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_MODULE_H #define LLVM_IR_MODULE_H #include "llvm/ADT/iterator_range.h" #include "llvm/IR/Comdat.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Metadata.h" #include "llvm/Support/CBindingWrapping.h" #include "llvm/Support/CodeGen.h" #include "llvm/Support/DataTypes.h" #include <system_error> // HLSL Change start namespace hlsl { class DxilModule; class HLModule; } // HLSL Change end namespace llvm { class FunctionType; class GVMaterializer; class LLVMContext; class RandomNumberGenerator; class StructType; template<> struct ilist_traits<Function> : public SymbolTableListTraits<Function, Module> { // createSentinel is used to get hold of the node that marks the end of the // list... (same trick used here as in ilist_traits<Instruction>) // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends Function * createSentinel() const { return static_cast<Function*>(&Sentinel); } static void destroySentinel(Function*) {} Function *provideInitialHead() const { return createSentinel(); } Function *ensureHead(Function*) const { return createSentinel(); } static void noteHead(Function*, Function*) {} private: mutable ilist_node<Function> Sentinel; }; template<> struct ilist_traits<GlobalVariable> : public SymbolTableListTraits<GlobalVariable, Module> { // createSentinel is used to create a node that marks the end of the list. // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends GlobalVariable * createSentinel() const { return static_cast<GlobalVariable*>(&Sentinel); } static void destroySentinel(GlobalVariable*) {} GlobalVariable *provideInitialHead() const { return createSentinel(); } GlobalVariable *ensureHead(GlobalVariable*) const { return createSentinel(); } static void noteHead(GlobalVariable*, GlobalVariable*) {} private: mutable ilist_node<GlobalVariable> Sentinel; }; template<> struct ilist_traits<GlobalAlias> : public SymbolTableListTraits<GlobalAlias, Module> { // createSentinel is used to create a node that marks the end of the list. // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends GlobalAlias * createSentinel() const { return static_cast<GlobalAlias*>(&Sentinel); } static void destroySentinel(GlobalAlias*) {} GlobalAlias *provideInitialHead() const { return createSentinel(); } GlobalAlias *ensureHead(GlobalAlias*) const { return createSentinel(); } static void noteHead(GlobalAlias*, GlobalAlias*) {} private: mutable ilist_node<GlobalAlias> Sentinel; }; template<> struct ilist_traits<NamedMDNode> : public ilist_default_traits<NamedMDNode> { // createSentinel is used to get hold of a node that marks the end of // the list... // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends NamedMDNode * createSentinel() const { return static_cast<NamedMDNode*>(&Sentinel); } static void destroySentinel(NamedMDNode*) {} NamedMDNode *provideInitialHead() const { return createSentinel(); } NamedMDNode *ensureHead(NamedMDNode*) const { return createSentinel(); } static void noteHead(NamedMDNode*, NamedMDNode*) {} void addNodeToList(NamedMDNode *) {} void removeNodeFromList(NamedMDNode *) {} private: mutable ilist_node<NamedMDNode> Sentinel; }; /// A Module instance is used to store all the information related to an /// LLVM module. Modules are the top level container of all other LLVM /// Intermediate Representation (IR) objects. Each module directly contains a /// list of globals variables, a list of functions, a list of libraries (or /// other modules) this module depends on, a symbol table, and various data /// about the target's characteristics. /// /// A module maintains a GlobalValRefMap object that is used to hold all /// constant references to global variables in the module. When a global /// variable is destroyed, it should have no entries in the GlobalValueRefMap. /// @brief The main container class for the LLVM Intermediate Representation. class Module { /// @name Types And Enumerations /// @{ public: /// The type for the list of global variables. typedef iplist<GlobalVariable> GlobalListType; /// The type for the list of functions. typedef iplist<Function> FunctionListType; /// The type for the list of aliases. typedef iplist<GlobalAlias> AliasListType; /// The type for the list of named metadata. typedef ilist<NamedMDNode> NamedMDListType; /// The type of the comdat "symbol" table. typedef StringMap<Comdat> ComdatSymTabType; /// The Global Variable iterator. typedef GlobalListType::iterator global_iterator; /// The Global Variable constant iterator. typedef GlobalListType::const_iterator const_global_iterator; /// The Function iterators. typedef FunctionListType::iterator iterator; /// The Function constant iterator typedef FunctionListType::const_iterator const_iterator; /// The Function reverse iterator. typedef FunctionListType::reverse_iterator reverse_iterator; /// The Function constant reverse iterator. typedef FunctionListType::const_reverse_iterator const_reverse_iterator; /// The Global Alias iterators. typedef AliasListType::iterator alias_iterator; /// The Global Alias constant iterator typedef AliasListType::const_iterator const_alias_iterator; /// The named metadata iterators. typedef NamedMDListType::iterator named_metadata_iterator; /// The named metadata constant iterators. typedef NamedMDListType::const_iterator const_named_metadata_iterator; /// This enumeration defines the supported behaviors of module flags. enum ModFlagBehavior { /// Emits an error if two values disagree, otherwise the resulting value is /// that of the operands. Error = 1, /// Emits a warning if two values disagree. The result value will be the /// operand for the flag from the first module being linked. Warning = 2, /// Adds a requirement that another module flag be present and have a /// specified value after linking is performed. The value must be a metadata /// pair, where the first element of the pair is the ID of the module flag /// to be restricted, and the second element of the pair is the value the /// module flag should be restricted to. This behavior can be used to /// restrict the allowable results (via triggering of an error) of linking /// IDs with the **Override** behavior. Require = 3, /// Uses the specified value, regardless of the behavior or value of the /// other module. If both modules specify **Override**, but the values /// differ, an error will be emitted. Override = 4, /// Appends the two values, which are required to be metadata nodes. Append = 5, /// Appends the two values, which are required to be metadata /// nodes. However, duplicate entries in the second list are dropped /// during the append operation. AppendUnique = 6, // Markers: ModFlagBehaviorFirstVal = Error, ModFlagBehaviorLastVal = AppendUnique }; /// Checks if Metadata represents a valid ModFlagBehavior, and stores the /// converted result in MFB. static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB); struct ModuleFlagEntry { ModFlagBehavior Behavior; MDString *Key; Metadata *Val; ModuleFlagEntry(ModFlagBehavior B, MDString *K, Metadata *V) : Behavior(B), Key(K), Val(V) {} }; /// @} /// @name Member Variables /// @{ private: LLVMContext &Context; ///< The LLVMContext from which types and ///< constants are allocated. GlobalListType GlobalList; ///< The Global Variables in the module FunctionListType FunctionList; ///< The Functions in the module AliasListType AliasList; ///< The Aliases in the module NamedMDListType NamedMDList; ///< The named metadata in the module std::string GlobalScopeAsm; ///< Inline Asm at global scope. ValueSymbolTable *ValSymTab; ///< Symbol table for values ComdatSymTabType ComdatSymTab; ///< Symbol table for COMDATs std::unique_ptr<GVMaterializer> Materializer; ///< Used to materialize GlobalValues std::string ModuleID; ///< Human readable identifier for the module std::string TargetTriple; ///< Platform target triple Module compiled on ///< Format: (arch)(sub)-(vendor)-(sys0-(abi) void *NamedMDSymTab; ///< NamedMDNode names. DataLayout DL; ///< DataLayout associated with the module friend class Constant; // HLSL Change start hlsl::HLModule *TheHLModule = nullptr; hlsl::DxilModule *TheDxilModule = nullptr; // HLSL Change end /// @} /// @name Constructors /// @{ public: /// The Module constructor. Note that there is no default constructor. You /// must provide a name for the module upon construction. explicit Module(StringRef ModuleID, LLVMContext& C); /// The module destructor. This will dropAllReferences. ~Module(); /// @} /// @name Module Level Accessors /// @{ /// Get the module identifier which is, essentially, the name of the module. /// @returns the module identifier as a string const std::string &getModuleIdentifier() const { return ModuleID; } /// \brief Get a short "name" for the module. /// /// This is useful for debugging or logging. It is essentially a convenience /// wrapper around getModuleIdentifier(). StringRef getName() const { return ModuleID; } /// Get the data layout string for the module's target platform. This is /// equivalent to getDataLayout()->getStringRepresentation(). const std::string &getDataLayoutStr() const { return DL.getStringRepresentation(); } /// Get the data layout for the module's target platform. const DataLayout &getDataLayout() const; /// Get the target triple which is a string describing the target host. /// @returns a string containing the target triple. const std::string &getTargetTriple() const { return TargetTriple; } /// Get the global data context. /// @returns LLVMContext - a container for LLVM's global information LLVMContext &getContext() const { return Context; } /// Get any module-scope inline assembly blocks. /// @returns a string containing the module-scope inline assembly blocks. const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; } /// Get a RandomNumberGenerator salted for use with this module. The /// RNG can be seeded via -rng-seed=<uint64> and is salted with the /// ModuleID and the provided pass salt. The returned RNG should not /// be shared across threads or passes. /// /// A unique RNG per pass ensures a reproducible random stream even /// when other randomness consuming passes are added or removed. In /// addition, the random stream will be reproducible across LLVM /// versions when the pass does not change. RandomNumberGenerator *createRNG(const Pass* P) const; /// @} /// @name Module Level Mutators /// @{ /// Set the module identifier. void setModuleIdentifier(StringRef ID) { ModuleID = ID; } /// Set the data layout void setDataLayout(StringRef Desc); void setDataLayout(const DataLayout &Other); /// Set the target triple. void setTargetTriple(StringRef T) { TargetTriple = T; } /// Set the module-scope inline assembly blocks. /// A trailing newline is added if the input doesn't have one. void setModuleInlineAsm(StringRef Asm) { GlobalScopeAsm = Asm; if (!GlobalScopeAsm.empty() && GlobalScopeAsm[GlobalScopeAsm.size()-1] != '\n') GlobalScopeAsm += '\n'; } /// Append to the module-scope inline assembly blocks. /// A trailing newline is added if the input doesn't have one. void appendModuleInlineAsm(StringRef Asm) { GlobalScopeAsm += Asm; if (!GlobalScopeAsm.empty() && GlobalScopeAsm[GlobalScopeAsm.size()-1] != '\n') GlobalScopeAsm += '\n'; } /// @} /// @name Generic Value Accessors /// @{ /// Return the global value in the module with the specified name, of /// arbitrary type. This method returns null if a global with the specified /// name is not found. GlobalValue *getNamedValue(StringRef Name) const; /// Return a unique non-zero ID for the specified metadata kind. This ID is /// uniqued across modules in the current LLVMContext. unsigned getMDKindID(StringRef Name) const; /// Populate client supplied SmallVector with the name for custom metadata IDs /// registered in this LLVMContext. void getMDKindNames(SmallVectorImpl<StringRef> &Result) const; /// Return the type with the specified name, or null if there is none by that /// name. StructType *getTypeByName(StringRef Name) const; std::vector<StructType *> getIdentifiedStructTypes() const; /// @} /// @name Function Accessors /// @{ /// Look up the specified function in the module symbol table. Four /// possibilities: /// 1. If it does not exist, add a prototype for the function and return it. /// 2. If it exists, and has a local linkage, the existing function is /// renamed and a new one is inserted. /// 3. Otherwise, if the existing function has the correct prototype, return /// the existing function. /// 4. Finally, the function exists but has the wrong prototype: return the /// function with a constantexpr cast to the right prototype. Constant *getOrInsertFunction(StringRef Name, FunctionType *T, AttributeSet AttributeList); Constant *getOrInsertFunction(StringRef Name, FunctionType *T); /// Look up the specified function in the module symbol table. If it does not /// exist, add a prototype for the function and return it. This function /// guarantees to return a constant of pointer to the specified function type /// or a ConstantExpr BitCast of that type if the named function has a /// different type. This version of the method takes a null terminated list of /// function arguments, which makes it easier for clients to use. Constant *getOrInsertFunction(StringRef Name, AttributeSet AttributeList, Type *RetTy, ...) LLVM_END_WITH_NULL; /// Same as above, but without the attributes. Constant *getOrInsertFunction(StringRef Name, Type *RetTy, ...) LLVM_END_WITH_NULL; /// Look up the specified function in the module symbol table. If it does not /// exist, return null. Function *getFunction(StringRef Name) const; /// @} /// @name Global Variable Accessors /// @{ /// Look up the specified global variable in the module symbol table. If it /// does not exist, return null. If AllowInternal is set to true, this /// function will return types that have InternalLinkage. By default, these /// types are not returned. GlobalVariable *getGlobalVariable(StringRef Name) const { return getGlobalVariable(Name, false); } GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal) const { return const_cast<Module *>(this)->getGlobalVariable(Name, AllowInternal); } GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal = false); /// Return the global variable in the module with the specified name, of /// arbitrary type. This method returns null if a global with the specified /// name is not found. GlobalVariable *getNamedGlobal(StringRef Name) { return getGlobalVariable(Name, true); } const GlobalVariable *getNamedGlobal(StringRef Name) const { return const_cast<Module *>(this)->getNamedGlobal(Name); } /// Look up the specified global in the module symbol table. /// 1. If it does not exist, add a declaration of the global and return it. /// 2. Else, the global exists but has the wrong type: return the function /// with a constantexpr cast to the right type. /// 3. Finally, if the existing global is the correct declaration, return /// the existing global. Constant *getOrInsertGlobal(StringRef Name, Type *Ty); /// @} /// @name Global Alias Accessors /// @{ /// Return the global alias in the module with the specified name, of /// arbitrary type. This method returns null if a global with the specified /// name is not found. GlobalAlias *getNamedAlias(StringRef Name) const; /// @} /// @name Named Metadata Accessors /// @{ /// Return the first NamedMDNode in the module with the specified name. This /// method returns null if a NamedMDNode with the specified name is not found. NamedMDNode *getNamedMetadata(const Twine &Name) const; /// Return the named MDNode in the module with the specified name. This method /// returns a new NamedMDNode if a NamedMDNode with the specified name is not /// found. NamedMDNode *getOrInsertNamedMetadata(StringRef Name); /// Remove the given NamedMDNode from this module and delete it. void eraseNamedMetadata(NamedMDNode *NMD); /// @} /// @name Comdat Accessors /// @{ /// Return the Comdat in the module with the specified name. It is created /// if it didn't already exist. Comdat *getOrInsertComdat(StringRef Name); /// @} /// @name Module Flags Accessors /// @{ /// Returns the module flags in the provided vector. void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const; /// Return the corresponding value if Key appears in module flags, otherwise /// return null. Metadata *getModuleFlag(StringRef Key) const; /// Returns the NamedMDNode in the module that represents module-level flags. /// This method returns null if there are no module-level flags. NamedMDNode *getModuleFlagsMetadata() const; /// Returns the NamedMDNode in the module that represents module-level flags. /// If module-level flags aren't found, it creates the named metadata that /// contains them. NamedMDNode *getOrInsertModuleFlagsMetadata(); /// Add a module-level flag to the module-level flags metadata. It will create /// the module-level flags named metadata if it doesn't already exist. void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val); void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Constant *Val); void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val); void addModuleFlag(MDNode *Node); /// @} /// @name Materialization /// @{ /// Sets the GVMaterializer to GVM. This module must not yet have a /// Materializer. To reset the materializer for a module that already has one, /// call MaterializeAllPermanently first. Destroying this module will destroy /// its materializer without materializing any more GlobalValues. Without /// destroying the Module, there is no way to detach or destroy a materializer /// without materializing all the GVs it controls, to avoid leaving orphan /// unmaterialized GVs. void setMaterializer(GVMaterializer *GVM); /// Retrieves the GVMaterializer, if any, for this Module. GVMaterializer *getMaterializer() const { return Materializer.get(); } /// Returns true if this GV was loaded from this Module's GVMaterializer and /// the GVMaterializer knows how to dematerialize the GV. bool isDematerializable(const GlobalValue *GV) const; /// Make sure the GlobalValue is fully read. If the module is corrupt, this /// returns true and fills in the optional string with information about the /// problem. If successful, this returns false. std::error_code materialize(GlobalValue *GV); /// If the GlobalValue is read in, and if the GVMaterializer supports it, /// release the memory for the function, and set it up to be materialized /// lazily. If !isDematerializable(), this method is a no-op. void dematerialize(GlobalValue *GV); /// Make sure all GlobalValues in this Module are fully read. std::error_code materializeAll(); /// Make sure all GlobalValues in this Module are fully read and clear the /// Materializer. If the module is corrupt, this DOES NOT clear the old /// Materializer. std::error_code materializeAllPermanently(); std::error_code materializeMetadata(); std::error_code materializeSelectNamedMetadata(ArrayRef<StringRef> NamedMetadata); // HLSL Change /// @} /// @name Direct access to the globals list, functions list, and symbol table /// @{ /// Get the Module's list of global variables (constant). const GlobalListType &getGlobalList() const { return GlobalList; } /// Get the Module's list of global variables. GlobalListType &getGlobalList() { return GlobalList; } static GlobalListType Module::*getSublistAccess(GlobalVariable*) { return &Module::GlobalList; } /// Get the Module's list of functions (constant). const FunctionListType &getFunctionList() const { return FunctionList; } /// Get the Module's list of functions. FunctionListType &getFunctionList() { return FunctionList; } static FunctionListType Module::*getSublistAccess(Function*) { return &Module::FunctionList; } /// Get the Module's list of aliases (constant). const AliasListType &getAliasList() const { return AliasList; } /// Get the Module's list of aliases. AliasListType &getAliasList() { return AliasList; } static AliasListType Module::*getSublistAccess(GlobalAlias*) { return &Module::AliasList; } /// Get the Module's list of named metadata (constant). const NamedMDListType &getNamedMDList() const { return NamedMDList; } /// Get the Module's list of named metadata. NamedMDListType &getNamedMDList() { return NamedMDList; } static NamedMDListType Module::*getSublistAccess(NamedMDNode*) { return &Module::NamedMDList; } /// Get the symbol table of global variable and function identifiers const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; } /// Get the Module's symbol table of global variable and function identifiers. ValueSymbolTable &getValueSymbolTable() { return *ValSymTab; } /// Get the Module's symbol table for COMDATs (constant). const ComdatSymTabType &getComdatSymbolTable() const { return ComdatSymTab; } /// Get the Module's symbol table for COMDATs. ComdatSymTabType &getComdatSymbolTable() { return ComdatSymTab; } /// @} /// @name Global Variable Iteration /// @{ global_iterator global_begin() { return GlobalList.begin(); } const_global_iterator global_begin() const { return GlobalList.begin(); } global_iterator global_end () { return GlobalList.end(); } const_global_iterator global_end () const { return GlobalList.end(); } bool global_empty() const { return GlobalList.empty(); } iterator_range<global_iterator> globals() { return iterator_range<global_iterator>(global_begin(), global_end()); } iterator_range<const_global_iterator> globals() const { return iterator_range<const_global_iterator>(global_begin(), global_end()); } /// @} /// @name Function Iteration /// @{ iterator begin() { return FunctionList.begin(); } const_iterator begin() const { return FunctionList.begin(); } iterator end () { return FunctionList.end(); } const_iterator end () const { return FunctionList.end(); } reverse_iterator rbegin() { return FunctionList.rbegin(); } const_reverse_iterator rbegin() const{ return FunctionList.rbegin(); } reverse_iterator rend() { return FunctionList.rend(); } const_reverse_iterator rend() const { return FunctionList.rend(); } size_t size() const { return FunctionList.size(); } bool empty() const { return FunctionList.empty(); } iterator_range<iterator> functions() { return iterator_range<iterator>(begin(), end()); } iterator_range<const_iterator> functions() const { return iterator_range<const_iterator>(begin(), end()); } /// @} /// @name Alias Iteration /// @{ alias_iterator alias_begin() { return AliasList.begin(); } const_alias_iterator alias_begin() const { return AliasList.begin(); } alias_iterator alias_end () { return AliasList.end(); } const_alias_iterator alias_end () const { return AliasList.end(); } size_t alias_size () const { return AliasList.size(); } bool alias_empty() const { return AliasList.empty(); } iterator_range<alias_iterator> aliases() { return iterator_range<alias_iterator>(alias_begin(), alias_end()); } iterator_range<const_alias_iterator> aliases() const { return iterator_range<const_alias_iterator>(alias_begin(), alias_end()); } /// @} /// @name Named Metadata Iteration /// @{ named_metadata_iterator named_metadata_begin() { return NamedMDList.begin(); } const_named_metadata_iterator named_metadata_begin() const { return NamedMDList.begin(); } named_metadata_iterator named_metadata_end() { return NamedMDList.end(); } const_named_metadata_iterator named_metadata_end() const { return NamedMDList.end(); } size_t named_metadata_size() const { return NamedMDList.size(); } bool named_metadata_empty() const { return NamedMDList.empty(); } iterator_range<named_metadata_iterator> named_metadata() { return iterator_range<named_metadata_iterator>(named_metadata_begin(), named_metadata_end()); } iterator_range<const_named_metadata_iterator> named_metadata() const { return iterator_range<const_named_metadata_iterator>(named_metadata_begin(), named_metadata_end()); } /// Destroy ConstantArrays in LLVMContext if they are not used. /// ConstantArrays constructed during linking can cause quadratic memory /// explosion. Releasing all unused constants can cause a 20% LTO compile-time /// slowdown for a large application. /// /// NOTE: Constants are currently owned by LLVMContext. This can then only /// be called where all uses of the LLVMContext are understood. void dropTriviallyDeadConstantArrays(); /// @} /// @name Utility functions for printing and dumping Module objects /// @{ /// Print the module to an output stream with an optional /// AssemblyAnnotationWriter. If \c ShouldPreserveUseListOrder, then include /// uselistorder directives so that use-lists can be recreated when reading /// the assembly. void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW, bool ShouldPreserveUseListOrder = false) const; /// Dump the module to stderr (for debugging). LLVM_DUMP_METHOD void dump() const; // HLSL Change - Add LLVM_DUMP_METHOD /// This function causes all the subinstructions to "let go" of all references /// that they are maintaining. This allows one to 'delete' a whole class at /// a time, even though there may be circular references... first all /// references are dropped, and all use counts go to zero. Then everything /// is delete'd for real. Note that no operations are valid on an object /// that has "dropped all references", except operator delete. void dropAllReferences(); /// @} /// @name Utility functions for querying Debug information. /// @{ /// \brief Returns the Dwarf Version by checking module flags. unsigned getDwarfVersion() const; /// @} /// @name Utility functions for querying and setting PIC level /// @{ /// \brief Returns the PIC level (small or large model) PICLevel::Level getPICLevel() const; /// \brief Set the PIC level (small or large model) void setPICLevel(PICLevel::Level PL); /// @} // HLSL Change start typedef void (*RemoveGlobalCallback)(llvm::Module*, llvm::GlobalObject*); typedef void(*ResetModuleCallback)(llvm::Module*); RemoveGlobalCallback pfnRemoveGlobal = nullptr; void CallRemoveGlobalHook(llvm::GlobalObject* G) { if (pfnRemoveGlobal) (*pfnRemoveGlobal)(this, G); } bool HasHLModule() const { return TheHLModule != nullptr; } void SetHLModule(hlsl::HLModule *pValue) { TheHLModule = pValue; } hlsl::HLModule &GetHLModule() const { return *TheHLModule; } hlsl::HLModule &GetOrCreateHLModule(bool skipInit = false); ResetModuleCallback pfnResetHLModule = nullptr; void ResetHLModule() { if (pfnResetHLModule) (*pfnResetHLModule)(this); } bool HasDxilModule() const { return TheDxilModule != nullptr; } void SetDxilModule(hlsl::DxilModule *pValue) { TheDxilModule = pValue; } hlsl::DxilModule &GetDxilModule() const { return *TheDxilModule; } hlsl::DxilModule &GetOrCreateDxilModule(bool skipInit = false); ResetModuleCallback pfnResetDxilModule = nullptr; void ResetDxilModule() { if (pfnResetDxilModule) (*pfnResetDxilModule)(this); } // HLSL Change end }; /// An raw_ostream inserter for modules. inline raw_ostream &operator<<(raw_ostream &O, const Module &M) { M.print(O, nullptr); return O; } // Create wrappers for C Binding types (see CBindingWrapping.h). DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Module, LLVMModuleRef) /* LLVMModuleProviderRef exists for historical reasons, but now just holds a * Module. */ inline Module *unwrap(LLVMModuleProviderRef MP) { return reinterpret_cast<Module*>(MP); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Intrinsics.h
//===-- llvm/Instrinsics.h - LLVM Intrinsic Function Handling ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a set of enums which allow processing of intrinsic // functions. Values of these enum types are returned by // Function::getIntrinsicID. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_INTRINSICS_H #define LLVM_IR_INTRINSICS_H #include "llvm/ADT/ArrayRef.h" #include <string> namespace llvm { class Type; class FunctionType; class Function; class LLVMContext; class Module; class AttributeSet; /// This namespace contains an enum with a value for every intrinsic/builtin /// function known by LLVM. The enum values are returned by /// Function::getIntrinsicID(). namespace Intrinsic { enum ID : unsigned { not_intrinsic = 0, // Must be zero // Get the intrinsic enums generated from Intrinsics.td #define GET_INTRINSIC_ENUM_VALUES #include "llvm/IR/Intrinsics.gen" #undef GET_INTRINSIC_ENUM_VALUES , num_intrinsics }; /// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx". std::string getName(ID id, ArrayRef<Type*> Tys = None); /// Return the function type for an intrinsic. FunctionType *getType(LLVMContext &Context, ID id, ArrayRef<Type*> Tys = None); /// Returns true if the intrinsic can be overloaded. bool isOverloaded(ID id); /// Returns true if the intrinsic is a leaf, i.e. it does not make any calls /// itself. Most intrinsics are leafs, the exceptions being the patchpoint /// and statepoint intrinsics. These call (or invoke) their "target" argument. bool isLeaf(ID id); /// Return the attributes for an intrinsic. AttributeSet getAttributes(LLVMContext &C, ID id); /// Create or insert an LLVM Function declaration for an intrinsic, and return /// it. /// /// The Tys parameter is for intrinsics with overloaded types (e.g., those /// using iAny, fAny, vAny, or iPTRAny). For a declaration of an overloaded /// intrinsic, Tys must provide exactly one type for each overloaded type in /// the intrinsic. Function *getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys = None); /// Map a GCC builtin name to an intrinsic ID. ID getIntrinsicForGCCBuiltin(const char *Prefix, const char *BuiltinName); /// Map a MS builtin name to an intrinsic ID. ID getIntrinsicForMSBuiltin(const char *Prefix, const char *BuiltinName); /// This is a type descriptor which explains the type requirements of an /// intrinsic. This is returned by getIntrinsicInfoTableEntries. struct IITDescriptor { enum IITDescriptorKind { Void, VarArg, MMX, Metadata, Half, Float, Double, Integer, Vector, Pointer, Struct, Argument, ExtendArgument, TruncArgument, HalfVecArgument, SameVecWidthArgument, PtrToArgument, VecOfPtrsToElt } Kind; union { unsigned Integer_Width; unsigned Float_Width; unsigned Vector_Width; unsigned Pointer_AddressSpace; unsigned Struct_NumElements; unsigned Argument_Info; }; enum ArgKind { AK_Any, AK_AnyInteger, AK_AnyFloat, AK_AnyVector, AK_AnyPointer }; unsigned getArgumentNumber() const { assert(Kind == Argument || Kind == ExtendArgument || Kind == TruncArgument || Kind == HalfVecArgument || Kind == SameVecWidthArgument || Kind == PtrToArgument || Kind == VecOfPtrsToElt); return Argument_Info >> 3; } ArgKind getArgumentKind() const { assert(Kind == Argument || Kind == ExtendArgument || Kind == TruncArgument || Kind == HalfVecArgument || Kind == SameVecWidthArgument || Kind == PtrToArgument || Kind == VecOfPtrsToElt); return (ArgKind)(Argument_Info & 7); } static IITDescriptor get(IITDescriptorKind K, unsigned Field) { IITDescriptor Result = { K, { Field } }; return Result; } }; /// Return the IIT table descriptor for the specified intrinsic into an array /// of IITDescriptors. void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl<IITDescriptor> &T); } // End Intrinsic namespace } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DebugInfoFlags.def
//===- llvm/IR/DebugInfoFlags.def - Debug info flag definitions -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Macros for running through debug info flags. // //===----------------------------------------------------------------------===// #ifndef HANDLE_DI_FLAG #error "Missing macro definition of HANDLE_DI_FLAG" #endif HANDLE_DI_FLAG(1, Private) HANDLE_DI_FLAG(2, Protected) HANDLE_DI_FLAG(3, Public) HANDLE_DI_FLAG((1 << 2), FwdDecl) HANDLE_DI_FLAG((1 << 3), AppleBlock) HANDLE_DI_FLAG((1 << 4), BlockByrefStruct) HANDLE_DI_FLAG((1 << 5), Virtual) HANDLE_DI_FLAG((1 << 6), Artificial) HANDLE_DI_FLAG((1 << 7), Explicit) HANDLE_DI_FLAG((1 << 8), Prototyped) HANDLE_DI_FLAG((1 << 9), ObjcClassComplete) HANDLE_DI_FLAG((1 << 10), ObjectPointer) HANDLE_DI_FLAG((1 << 11), Vector) HANDLE_DI_FLAG((1 << 12), StaticMember) HANDLE_DI_FLAG((1 << 13), LValueReference) HANDLE_DI_FLAG((1 << 14), RValueReference) #undef HANDLE_DI_FLAG
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/ModuleSlotTracker.h
//===-- llvm/IR/ModuleSlotTracker.h -----------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_MODULESLOTTRACKER_H #define LLVM_IR_MODULESLOTTRACKER_H #include <memory> namespace llvm { class Module; class Function; class SlotTracker; /// Manage lifetime of a slot tracker for printing IR. /// /// Wrapper around the \a SlotTracker used internally by \a AsmWriter. This /// class allows callers to share the cost of incorporating the metadata in a /// module or a function. /// /// If the IR changes from underneath \a ModuleSlotTracker, strings like /// "<badref>" will be printed, or, worse, the wrong slots entirely. class ModuleSlotTracker { /// Storage for a slot tracker. std::unique_ptr<SlotTracker> MachineStorage; const Module *M = nullptr; const Function *F = nullptr; SlotTracker *Machine = nullptr; public: /// Wrap a preinitialized SlotTracker. ModuleSlotTracker(SlotTracker &Machine, const Module *M, const Function *F = nullptr); /// Construct a slot tracker from a module. /// /// If \a M is \c nullptr, uses a null slot tracker. Otherwise, initializes /// a slot tracker, and initializes all metadata slots. \c /// ShouldInitializeAllMetadata defaults to true because this is expected to /// be shared between multiple callers, and otherwise MDNode references will /// not match up. explicit ModuleSlotTracker(const Module *M, bool ShouldInitializeAllMetadata = true); /// Destructor to clean up storage. ~ModuleSlotTracker(); SlotTracker *getMachine() const { return Machine; } const Module *getModule() const { return M; } const Function *getCurrentFunction() const { return F; } /// Incorporate the given function. /// /// Purge the currently incorporated function and incorporate \c F. If \c F /// is currently incorporated, this is a no-op. void incorporateFunction(const Function &F); }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/InstIterator.h
//===- InstIterator.h - Classes for inst iteration --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains definitions of two iterators for iterating over the // instructions in a function. This is effectively a wrapper around a two level // iterator that can probably be genericized later. // // Note that this iterator gets invalidated any time that basic blocks or // instructions are moved around. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_INSTITERATOR_H #define LLVM_IR_INSTITERATOR_H #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Function.h" namespace llvm { // This class implements inst_begin() & inst_end() for // inst_iterator and const_inst_iterator's. // template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator { typedef BB_t BBty; typedef BB_i_t BBIty; typedef BI_t BIty; typedef II_t IIty; BB_t *BBs; // BasicBlocksType BB_i_t BB; // BasicBlocksType::iterator BI_t BI; // BasicBlock::iterator public: typedef std::bidirectional_iterator_tag iterator_category; typedef IIty value_type; typedef signed difference_type; typedef IIty* pointer; typedef IIty& reference; // Default constructor InstIterator() {} // Copy constructor... template<typename A, typename B, typename C, typename D> InstIterator(const InstIterator<A,B,C,D> &II) : BBs(II.BBs), BB(II.BB), BI(II.BI) {} template<typename A, typename B, typename C, typename D> InstIterator(InstIterator<A,B,C,D> &II) : BBs(II.BBs), BB(II.BB), BI(II.BI) {} template<class M> InstIterator(M &m) : BBs(&m.getBasicBlockList()), BB(BBs->begin()) { // begin ctor if (BB != BBs->end()) { BI = BB->begin(); advanceToNextBB(); } } template<class M> InstIterator(M &m, bool) : BBs(&m.getBasicBlockList()), BB(BBs->end()) { // end ctor } // Accessors to get at the underlying iterators... inline BBIty &getBasicBlockIterator() { return BB; } inline BIty &getInstructionIterator() { return BI; } inline reference operator*() const { return *BI; } inline pointer operator->() const { return &operator*(); } inline bool operator==(const InstIterator &y) const { return BB == y.BB && (BB == BBs->end() || BI == y.BI); } inline bool operator!=(const InstIterator& y) const { return !operator==(y); } InstIterator& operator++() { ++BI; advanceToNextBB(); return *this; } inline InstIterator operator++(int) { InstIterator tmp = *this; ++*this; return tmp; } InstIterator& operator--() { while (BB == BBs->end() || BI == BB->begin()) { --BB; BI = BB->end(); } --BI; return *this; } inline InstIterator operator--(int) { InstIterator tmp = *this; --*this; return tmp; } inline bool atEnd() const { return BB == BBs->end(); } private: inline void advanceToNextBB() { // The only way that the II could be broken is if it is now pointing to // the end() of the current BasicBlock and there are successor BBs. while (BI == BB->end()) { ++BB; if (BB == BBs->end()) break; BI = BB->begin(); } } }; typedef InstIterator<iplist<BasicBlock>, Function::iterator, BasicBlock::iterator, Instruction> inst_iterator; typedef InstIterator<const iplist<BasicBlock>, Function::const_iterator, BasicBlock::const_iterator, const Instruction> const_inst_iterator; inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); } inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); } inline iterator_range<inst_iterator> inst_range(Function *F) { return iterator_range<inst_iterator>(inst_begin(F), inst_end(F)); } inline const_inst_iterator inst_begin(const Function *F) { return const_inst_iterator(*F); } inline const_inst_iterator inst_end(const Function *F) { return const_inst_iterator(*F, true); } inline iterator_range<const_inst_iterator> inst_range(const Function *F) { return iterator_range<const_inst_iterator>(inst_begin(F), inst_end(F)); } inline inst_iterator inst_begin(Function &F) { return inst_iterator(F); } inline inst_iterator inst_end(Function &F) { return inst_iterator(F, true); } inline iterator_range<inst_iterator> inst_range(Function &F) { return iterator_range<inst_iterator>(inst_begin(F), inst_end(F)); } inline const_inst_iterator inst_begin(const Function &F) { return const_inst_iterator(F); } inline const_inst_iterator inst_end(const Function &F) { return const_inst_iterator(F, true); } inline iterator_range<const_inst_iterator> inst_range(const Function &F) { return iterator_range<const_inst_iterator>(inst_begin(F), inst_end(F)); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/PatternMatch.h
//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides a simple and efficient mechanism for performing general // tree-based pattern matches on the LLVM IR. The power of these routines is // that it allows you to write concise patterns that are expressive and easy to // understand. The other major advantage of this is that it allows you to // trivially capture/bind elements in the pattern to variables. For example, // you can do something like this: // // Value *Exp = ... // Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2) // if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)), // m_And(m_Value(Y), m_ConstantInt(C2))))) { // ... Pattern is matched and variables are bound ... // } // // This is primarily useful to things like the instruction combiner, but can // also be useful for static analysis tools or code generators. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_PATTERNMATCH_H #define LLVM_IR_PATTERNMATCH_H #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Operator.h" namespace llvm { namespace PatternMatch { template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) { return const_cast<Pattern &>(P).match(V); } template <typename SubPattern_t> struct OneUse_match { SubPattern_t SubPattern; OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {} template <typename OpTy> bool match(OpTy *V) { return V->hasOneUse() && SubPattern.match(V); } }; template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) { return SubPattern; } template <typename Class> struct class_match { template <typename ITy> bool match(ITy *V) { return isa<Class>(V); } }; /// \brief Match an arbitrary value and ignore it. inline class_match<Value> m_Value() { return class_match<Value>(); } /// \brief Match an arbitrary binary operation and ignore it. inline class_match<BinaryOperator> m_BinOp() { return class_match<BinaryOperator>(); } /// \brief Matches any compare instruction and ignore it. inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); } /// \brief Match an arbitrary ConstantInt and ignore it. inline class_match<ConstantInt> m_ConstantInt() { return class_match<ConstantInt>(); } /// \brief Match an arbitrary undef constant. inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); } /// \brief Match an arbitrary Constant and ignore it. inline class_match<Constant> m_Constant() { return class_match<Constant>(); } /// Matching combinators template <typename LTy, typename RTy> struct match_combine_or { LTy L; RTy R; match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {} template <typename ITy> bool match(ITy *V) { if (L.match(V)) return true; if (R.match(V)) return true; return false; } }; template <typename LTy, typename RTy> struct match_combine_and { LTy L; RTy R; match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {} template <typename ITy> bool match(ITy *V) { if (L.match(V)) if (R.match(V)) return true; return false; } }; /// Combine two pattern matchers matching L || R template <typename LTy, typename RTy> inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) { return match_combine_or<LTy, RTy>(L, R); } /// Combine two pattern matchers matching L && R template <typename LTy, typename RTy> inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) { return match_combine_and<LTy, RTy>(L, R); } struct match_zero { template <typename ITy> bool match(ITy *V) { if (const auto *C = dyn_cast<Constant>(V)) return C->isNullValue(); return false; } }; /// \brief Match an arbitrary zero/null constant. This includes /// zero_initializer for vectors and ConstantPointerNull for pointers. inline match_zero m_Zero() { return match_zero(); } struct match_neg_zero { template <typename ITy> bool match(ITy *V) { if (const auto *C = dyn_cast<Constant>(V)) return C->isNegativeZeroValue(); return false; } }; /// \brief Match an arbitrary zero/null constant. This includes /// zero_initializer for vectors and ConstantPointerNull for pointers. For /// floating point constants, this will match negative zero but not positive /// zero inline match_neg_zero m_NegZero() { return match_neg_zero(); } /// \brief - Match an arbitrary zero/null constant. This includes /// zero_initializer for vectors and ConstantPointerNull for pointers. For /// floating point constants, this will match negative zero and positive zero inline match_combine_or<match_zero, match_neg_zero> m_AnyZero() { return m_CombineOr(m_Zero(), m_NegZero()); } struct apint_match { const APInt *&Res; apint_match(const APInt *&R) : Res(R) {} template <typename ITy> bool match(ITy *V) { if (auto *CI = dyn_cast<ConstantInt>(V)) { Res = &CI->getValue(); return true; } if (V->getType()->isVectorTy()) if (const auto *C = dyn_cast<Constant>(V)) if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) { Res = &CI->getValue(); return true; } return false; } }; /// \brief Match a ConstantInt or splatted ConstantVector, binding the /// specified pointer to the contained APInt. inline apint_match m_APInt(const APInt *&Res) { return Res; } template <int64_t Val> struct constantint_match { template <typename ITy> bool match(ITy *V) { if (const auto *CI = dyn_cast<ConstantInt>(V)) { const APInt &CIV = CI->getValue(); if (Val >= 0) return CIV == static_cast<uint64_t>(Val); // If Val is negative, and CI is shorter than it, truncate to the right // number of bits. If it is larger, then we have to sign extend. Just // compare their negated values. return -CIV == -Val; } return false; } }; /// \brief Match a ConstantInt with a specific value. template <int64_t Val> inline constantint_match<Val> m_ConstantInt() { return constantint_match<Val>(); } /// \brief This helper class is used to match scalar and vector constants that /// satisfy a specified predicate. template <typename Predicate> struct cst_pred_ty : public Predicate { template <typename ITy> bool match(ITy *V) { if (const auto *CI = dyn_cast<ConstantInt>(V)) return this->isValue(CI->getValue()); if (V->getType()->isVectorTy()) if (const auto *C = dyn_cast<Constant>(V)) if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) return this->isValue(CI->getValue()); return false; } }; /// \brief This helper class is used to match scalar and vector constants that /// satisfy a specified predicate, and bind them to an APInt. template <typename Predicate> struct api_pred_ty : public Predicate { const APInt *&Res; api_pred_ty(const APInt *&R) : Res(R) {} template <typename ITy> bool match(ITy *V) { if (const auto *CI = dyn_cast<ConstantInt>(V)) if (this->isValue(CI->getValue())) { Res = &CI->getValue(); return true; } if (V->getType()->isVectorTy()) if (const auto *C = dyn_cast<Constant>(V)) if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) if (this->isValue(CI->getValue())) { Res = &CI->getValue(); return true; } return false; } }; struct is_one { bool isValue(const APInt &C) { return C == 1; } }; /// \brief Match an integer 1 or a vector with all elements equal to 1. inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); } inline api_pred_ty<is_one> m_One(const APInt *&V) { return V; } struct is_all_ones { bool isValue(const APInt &C) { return C.isAllOnesValue(); } }; /// \brief Match an integer or vector with all bits set to true. inline cst_pred_ty<is_all_ones> m_AllOnes() { return cst_pred_ty<is_all_ones>(); } inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; } struct is_sign_bit { bool isValue(const APInt &C) { return C.isSignBit(); } }; /// \brief Match an integer or vector with only the sign bit(s) set. inline cst_pred_ty<is_sign_bit> m_SignBit() { return cst_pred_ty<is_sign_bit>(); } inline api_pred_ty<is_sign_bit> m_SignBit(const APInt *&V) { return V; } struct is_power2 { bool isValue(const APInt &C) { return C.isPowerOf2(); } }; /// \brief Match an integer or vector power of 2. inline cst_pred_ty<is_power2> m_Power2() { return cst_pred_ty<is_power2>(); } inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { return V; } struct is_maxsignedvalue { bool isValue(const APInt &C) { return C.isMaxSignedValue(); } }; inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() { return cst_pred_ty<is_maxsignedvalue>(); } inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) { return V; } template <typename Class> struct bind_ty { Class *&VR; bind_ty(Class *&V) : VR(V) {} template <typename ITy> bool match(ITy *V) { if (auto *CV = dyn_cast<Class>(V)) { VR = CV; return true; } return false; } }; /// \brief Match a value, capturing it if we match. inline bind_ty<Value> m_Value(Value *&V) { return V; } /// \brief Match an instruction, capturing it if we match. inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; } /// \brief Match a binary operator, capturing it if we match. inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; } /// \brief Match a ConstantInt, capturing the value if we match. inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; } /// \brief Match a Constant, capturing the value if we match. inline bind_ty<Constant> m_Constant(Constant *&C) { return C; } /// \brief Match a ConstantFP, capturing the value if we match. inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; } /// \brief Match a specified Value*. struct specificval_ty { const Value *Val; specificval_ty(const Value *V) : Val(V) {} template <typename ITy> bool match(ITy *V) { return V == Val; } }; /// \brief Match if we have a specific specified value. inline specificval_ty m_Specific(const Value *V) { return V; } /// \brief Match a specified floating point value or vector of all elements of /// that value. struct specific_fpval { double Val; specific_fpval(double V) : Val(V) {} template <typename ITy> bool match(ITy *V) { if (const auto *CFP = dyn_cast<ConstantFP>(V)) return CFP->isExactlyValue(Val); if (V->getType()->isVectorTy()) if (const auto *C = dyn_cast<Constant>(V)) if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) return CFP->isExactlyValue(Val); return false; } }; /// \brief Match a specific floating point value or vector with all elements /// equal to the value. inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); } /// \brief Match a float 1.0 or vector with all elements equal to 1.0. inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); } struct bind_const_intval_ty { uint64_t &VR; bind_const_intval_ty(uint64_t &V) : VR(V) {} template <typename ITy> bool match(ITy *V) { if (const auto *CV = dyn_cast<ConstantInt>(V)) if (CV->getBitWidth() <= 64) { VR = CV->getZExtValue(); return true; } return false; } }; /// \brief Match a specified integer value or vector of all elements of that // value. struct specific_intval { uint64_t Val; specific_intval(uint64_t V) : Val(V) {} template <typename ITy> bool match(ITy *V) { const auto *CI = dyn_cast<ConstantInt>(V); if (!CI && V->getType()->isVectorTy()) if (const auto *C = dyn_cast<Constant>(V)) CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()); if (CI && CI->getBitWidth() <= 64) return CI->getZExtValue() == Val; return false; } }; /// \brief Match a specific integer value or vector with all elements equal to /// the value. inline specific_intval m_SpecificInt(uint64_t V) { return specific_intval(V); } /// \brief Match a ConstantInt and bind to its value. This does not match /// ConstantInts wider than 64-bits. inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; } //===----------------------------------------------------------------------===// // Matcher for any binary operator. // template <typename LHS_t, typename RHS_t> struct AnyBinaryOp_match { LHS_t L; RHS_t R; AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} template <typename OpTy> bool match(OpTy *V) { if (auto *I = dyn_cast<BinaryOperator>(V)) return L.match(I->getOperand(0)) && R.match(I->getOperand(1)); return false; } }; template <typename LHS, typename RHS> inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) { return AnyBinaryOp_match<LHS, RHS>(L, R); } //===----------------------------------------------------------------------===// // Matchers for specific binary operators. // template <typename LHS_t, typename RHS_t, unsigned Opcode> struct BinaryOp_match { LHS_t L; RHS_t R; BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} template <typename OpTy> bool match(OpTy *V) { if (V->getValueID() == Value::InstructionVal + Opcode) { auto *I = cast<BinaryOperator>(V); return L.match(I->getOperand(0)) && R.match(I->getOperand(1)); } if (auto *CE = dyn_cast<ConstantExpr>(V)) return CE->getOpcode() == Opcode && L.match(CE->getOperand(0)) && R.match(CE->getOperand(1)); return false; } }; template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::And>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R); } template <typename LHS, typename RHS> inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L, const RHS &R) { return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R); } template <typename LHS_t, typename RHS_t, unsigned Opcode, unsigned WrapFlags = 0> struct OverflowingBinaryOp_match { LHS_t L; RHS_t R; OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} template <typename OpTy> bool match(OpTy *V) { if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) { if (Op->getOpcode() != Opcode) return false; if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap && !Op->hasNoUnsignedWrap()) return false; if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap && !Op->hasNoSignedWrap()) return false; return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1)); } return false; } }; template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap> m_NSWAdd(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap>( L, R); } template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap> m_NSWSub(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap>( L, R); } template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap> m_NSWMul(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap>( L, R); } template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap> m_NSWShl(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap>( L, R); } template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap> m_NUWAdd(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap>( L, R); } template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap> m_NUWSub(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap>( L, R); } template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap> m_NUWMul(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap>( L, R); } template <typename LHS, typename RHS> inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap> m_NUWShl(const LHS &L, const RHS &R) { return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap>( L, R); } //===----------------------------------------------------------------------===// // Class that matches two different binary ops. // template <typename LHS_t, typename RHS_t, unsigned Opc1, unsigned Opc2> struct BinOp2_match { LHS_t L; RHS_t R; BinOp2_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} template <typename OpTy> bool match(OpTy *V) { if (V->getValueID() == Value::InstructionVal + Opc1 || V->getValueID() == Value::InstructionVal + Opc2) { auto *I = cast<BinaryOperator>(V); return L.match(I->getOperand(0)) && R.match(I->getOperand(1)); } if (auto *CE = dyn_cast<ConstantExpr>(V)) return (CE->getOpcode() == Opc1 || CE->getOpcode() == Opc2) && L.match(CE->getOperand(0)) && R.match(CE->getOperand(1)); return false; } }; /// \brief Matches LShr or AShr. template <typename LHS, typename RHS> inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr> m_Shr(const LHS &L, const RHS &R) { return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr>(L, R); } /// \brief Matches LShr or Shl. template <typename LHS, typename RHS> inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl> m_LogicalShift(const LHS &L, const RHS &R) { return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl>(L, R); } /// \brief Matches UDiv and SDiv. template <typename LHS, typename RHS> inline BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv> m_IDiv(const LHS &L, const RHS &R) { return BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv>(L, R); } //===----------------------------------------------------------------------===// // Class that matches exact binary ops. // template <typename SubPattern_t> struct Exact_match { SubPattern_t SubPattern; Exact_match(const SubPattern_t &SP) : SubPattern(SP) {} template <typename OpTy> bool match(OpTy *V) { if (PossiblyExactOperator *PEO = dyn_cast<PossiblyExactOperator>(V)) return PEO->isExact() && SubPattern.match(V); return false; } }; template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) { return SubPattern; } //===----------------------------------------------------------------------===// // Matchers for CmpInst classes // template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy> struct CmpClass_match { PredicateTy &Predicate; LHS_t L; RHS_t R; CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS) : Predicate(Pred), L(LHS), R(RHS) {} template <typename OpTy> bool match(OpTy *V) { if (Class *I = dyn_cast<Class>(V)) if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) { Predicate = I->getPredicate(); return true; } return false; } }; template <typename LHS, typename RHS> inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate> m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) { return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R); } template <typename LHS, typename RHS> inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate> m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R); } template <typename LHS, typename RHS> inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate> m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) { return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R); } //===----------------------------------------------------------------------===// // Matchers for SelectInst classes // template <typename Cond_t, typename LHS_t, typename RHS_t> struct SelectClass_match { Cond_t C; LHS_t L; RHS_t R; SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, const RHS_t &RHS) : C(Cond), L(LHS), R(RHS) {} template <typename OpTy> bool match(OpTy *V) { if (auto *I = dyn_cast<SelectInst>(V)) return C.match(I->getOperand(0)) && L.match(I->getOperand(1)) && R.match(I->getOperand(2)); return false; } }; template <typename Cond, typename LHS, typename RHS> inline SelectClass_match<Cond, LHS, RHS> m_Select(const Cond &C, const LHS &L, const RHS &R) { return SelectClass_match<Cond, LHS, RHS>(C, L, R); } /// \brief This matches a select of two constants, e.g.: /// m_SelectCst<-1, 0>(m_Value(V)) template <int64_t L, int64_t R, typename Cond> inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R>> m_SelectCst(const Cond &C) { return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>()); } //===----------------------------------------------------------------------===// // Matchers for CastInst classes // template <typename Op_t, unsigned Opcode> struct CastClass_match { Op_t Op; CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {} template <typename OpTy> bool match(OpTy *V) { if (auto *O = dyn_cast<Operator>(V)) return O->getOpcode() == Opcode && Op.match(O->getOperand(0)); return false; } }; /// \brief Matches BitCast. template <typename OpTy> inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) { return CastClass_match<OpTy, Instruction::BitCast>(Op); } /// \brief Matches PtrToInt. template <typename OpTy> inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) { return CastClass_match<OpTy, Instruction::PtrToInt>(Op); } /// \brief Matches Trunc. template <typename OpTy> inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) { return CastClass_match<OpTy, Instruction::Trunc>(Op); } /// \brief Matches SExt. template <typename OpTy> inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) { return CastClass_match<OpTy, Instruction::SExt>(Op); } /// \brief Matches ZExt. template <typename OpTy> inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) { return CastClass_match<OpTy, Instruction::ZExt>(Op); } /// \brief Matches UIToFP. template <typename OpTy> inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) { return CastClass_match<OpTy, Instruction::UIToFP>(Op); } /// \brief Matches SIToFP. template <typename OpTy> inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) { return CastClass_match<OpTy, Instruction::SIToFP>(Op); } //===----------------------------------------------------------------------===// // Matchers for unary operators // template <typename LHS_t> struct not_match { LHS_t L; not_match(const LHS_t &LHS) : L(LHS) {} template <typename OpTy> bool match(OpTy *V) { if (auto *O = dyn_cast<Operator>(V)) if (O->getOpcode() == Instruction::Xor) return matchIfNot(O->getOperand(0), O->getOperand(1)); return false; } private: bool matchIfNot(Value *LHS, Value *RHS) { return (isa<ConstantInt>(RHS) || isa<ConstantDataVector>(RHS) || // FIXME: Remove CV. isa<ConstantVector>(RHS)) && cast<Constant>(RHS)->isAllOnesValue() && L.match(LHS); } }; template <typename LHS> inline not_match<LHS> m_Not(const LHS &L) { return L; } template <typename LHS_t> struct neg_match { LHS_t L; neg_match(const LHS_t &LHS) : L(LHS) {} template <typename OpTy> bool match(OpTy *V) { if (auto *O = dyn_cast<Operator>(V)) if (O->getOpcode() == Instruction::Sub) return matchIfNeg(O->getOperand(0), O->getOperand(1)); return false; } private: bool matchIfNeg(Value *LHS, Value *RHS) { return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) || isa<ConstantAggregateZero>(LHS)) && L.match(RHS); } }; /// \brief Match an integer negate. template <typename LHS> inline neg_match<LHS> m_Neg(const LHS &L) { return L; } template <typename LHS_t> struct fneg_match { LHS_t L; fneg_match(const LHS_t &LHS) : L(LHS) {} template <typename OpTy> bool match(OpTy *V) { if (auto *O = dyn_cast<Operator>(V)) if (O->getOpcode() == Instruction::FSub) return matchIfFNeg(O->getOperand(0), O->getOperand(1)); return false; } private: bool matchIfFNeg(Value *LHS, Value *RHS) { if (const auto *C = dyn_cast<ConstantFP>(LHS)) return C->isNegativeZeroValue() && L.match(RHS); return false; } }; /// \brief Match a floating point negate. template <typename LHS> inline fneg_match<LHS> m_FNeg(const LHS &L) { return L; } //===----------------------------------------------------------------------===// // Matchers for control flow. // struct br_match { BasicBlock *&Succ; br_match(BasicBlock *&Succ) : Succ(Succ) {} template <typename OpTy> bool match(OpTy *V) { if (auto *BI = dyn_cast<BranchInst>(V)) if (BI->isUnconditional()) { Succ = BI->getSuccessor(0); return true; } return false; } }; inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); } template <typename Cond_t> struct brc_match { Cond_t Cond; BasicBlock *&T, *&F; brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f) : Cond(C), T(t), F(f) {} template <typename OpTy> bool match(OpTy *V) { if (auto *BI = dyn_cast<BranchInst>(V)) if (BI->isConditional() && Cond.match(BI->getCondition())) { T = BI->getSuccessor(0); F = BI->getSuccessor(1); return true; } return false; } }; template <typename Cond_t> inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) { return brc_match<Cond_t>(C, T, F); } //===----------------------------------------------------------------------===// // Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y). // template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t> struct MaxMin_match { LHS_t L; RHS_t R; MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} template <typename OpTy> bool match(OpTy *V) { // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x". auto *SI = dyn_cast<SelectInst>(V); if (!SI) return false; auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition()); if (!Cmp) return false; // At this point we have a select conditioned on a comparison. Check that // it is the values returned by the select that are being compared. Value *TrueVal = SI->getTrueValue(); Value *FalseVal = SI->getFalseValue(); Value *LHS = Cmp->getOperand(0); Value *RHS = Cmp->getOperand(1); if ((TrueVal != LHS || FalseVal != RHS) && (TrueVal != RHS || FalseVal != LHS)) return false; typename CmpInst_t::Predicate Pred = LHS == TrueVal ? Cmp->getPredicate() : Cmp->getSwappedPredicate(); // Does "(x pred y) ? x : y" represent the desired max/min operation? if (!Pred_t::match(Pred)) return false; // It does! Bind the operands. return L.match(LHS) && R.match(RHS); } }; /// \brief Helper class for identifying signed max predicates. struct smax_pred_ty { static bool match(ICmpInst::Predicate Pred) { return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE; } }; /// \brief Helper class for identifying signed min predicates. struct smin_pred_ty { static bool match(ICmpInst::Predicate Pred) { return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE; } }; /// \brief Helper class for identifying unsigned max predicates. struct umax_pred_ty { static bool match(ICmpInst::Predicate Pred) { return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE; } }; /// \brief Helper class for identifying unsigned min predicates. struct umin_pred_ty { static bool match(ICmpInst::Predicate Pred) { return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE; } }; /// \brief Helper class for identifying ordered max predicates. struct ofmax_pred_ty { static bool match(FCmpInst::Predicate Pred) { return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE; } }; /// \brief Helper class for identifying ordered min predicates. struct ofmin_pred_ty { static bool match(FCmpInst::Predicate Pred) { return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE; } }; /// \brief Helper class for identifying unordered max predicates. struct ufmax_pred_ty { static bool match(FCmpInst::Predicate Pred) { return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE; } }; /// \brief Helper class for identifying unordered min predicates. struct ufmin_pred_ty { static bool match(FCmpInst::Predicate Pred) { return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE; } }; template <typename LHS, typename RHS> inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L, const RHS &R) { return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R); } template <typename LHS, typename RHS> inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L, const RHS &R) { return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R); } template <typename LHS, typename RHS> inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L, const RHS &R) { return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R); } template <typename LHS, typename RHS> inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L, const RHS &R) { return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R); } /// \brief Match an 'ordered' floating point maximum function. /// Floating point has one special value 'NaN'. Therefore, there is no total /// order. However, if we can ignore the 'NaN' value (for example, because of a /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum' /// semantics. In the presence of 'NaN' we have to preserve the original /// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate. /// /// max(L, R) iff L and R are not NaN /// m_OrdFMax(L, R) = R iff L or R are NaN template <typename LHS, typename RHS> inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L, const RHS &R) { return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R); } /// \brief Match an 'ordered' floating point minimum function. /// Floating point has one special value 'NaN'. Therefore, there is no total /// order. However, if we can ignore the 'NaN' value (for example, because of a /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum' /// semantics. In the presence of 'NaN' we have to preserve the original /// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate. /// /// max(L, R) iff L and R are not NaN /// m_OrdFMin(L, R) = R iff L or R are NaN template <typename LHS, typename RHS> inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L, const RHS &R) { return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R); } /// \brief Match an 'unordered' floating point maximum function. /// Floating point has one special value 'NaN'. Therefore, there is no total /// order. However, if we can ignore the 'NaN' value (for example, because of a /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum' /// semantics. In the presence of 'NaN' we have to preserve the original /// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate. /// /// max(L, R) iff L and R are not NaN /// m_UnordFMin(L, R) = L iff L or R are NaN template <typename LHS, typename RHS> inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty> m_UnordFMax(const LHS &L, const RHS &R) { return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R); } // // /////////////////////////////////////////////////////////////////////////////// // Matchers for overflow check patterns: e.g. (a + b) u< a // template <typename LHS_t, typename RHS_t, typename Sum_t> struct UAddWithOverflow_match { LHS_t L; RHS_t R; Sum_t S; UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S) : L(L), R(R), S(S) {} template <typename OpTy> bool match(OpTy *V) { Value *ICmpLHS, *ICmpRHS; ICmpInst::Predicate Pred; if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V)) return false; Value *AddLHS, *AddRHS; auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS)); // (a + b) u< a, (a + b) u< b if (Pred == ICmpInst::ICMP_ULT) if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS)) return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS); // a >u (a + b), b >u (a + b) if (Pred == ICmpInst::ICMP_UGT) if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS)) return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS); return false; } }; /// \brief Match an icmp instruction checking for unsigned overflow on addition. /// /// S is matched to the addition whose result is being checked for overflow, and /// L and R are matched to the LHS and RHS of S. template <typename LHS_t, typename RHS_t, typename Sum_t> UAddWithOverflow_match<LHS_t, RHS_t, Sum_t> m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) { return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S); } /// \brief Match an 'unordered' floating point minimum function. /// Floating point has one special value 'NaN'. Therefore, there is no total /// order. However, if we can ignore the 'NaN' value (for example, because of a /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum' /// semantics. In the presence of 'NaN' we have to preserve the original /// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate. /// /// max(L, R) iff L and R are not NaN /// m_UnordFMin(L, R) = L iff L or R are NaN template <typename LHS, typename RHS> inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty> m_UnordFMin(const LHS &L, const RHS &R) { return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R); } template <typename Opnd_t> struct Argument_match { unsigned OpI; Opnd_t Val; Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {} template <typename OpTy> bool match(OpTy *V) { CallSite CS(V); return CS.isCall() && Val.match(CS.getArgument(OpI)); } }; /// \brief Match an argument. template <unsigned OpI, typename Opnd_t> inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) { return Argument_match<Opnd_t>(OpI, Op); } /// \brief Intrinsic matchers. struct IntrinsicID_match { unsigned ID; IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {} template <typename OpTy> bool match(OpTy *V) { if (const auto *CI = dyn_cast<CallInst>(V)) if (const auto *F = CI->getCalledFunction()) return F->getIntrinsicID() == ID; return false; } }; /// Intrinsic matches are combinations of ID matchers, and argument /// matchers. Higher arity matcher are defined recursively in terms of and-ing /// them with lower arity matchers. Here's some convenient typedefs for up to /// several arguments, and more can be added as needed template <typename T0 = void, typename T1 = void, typename T2 = void, typename T3 = void, typename T4 = void, typename T5 = void, typename T6 = void, typename T7 = void, typename T8 = void, typename T9 = void, typename T10 = void> struct m_Intrinsic_Ty; template <typename T0> struct m_Intrinsic_Ty<T0> { typedef match_combine_and<IntrinsicID_match, Argument_match<T0>> Ty; }; template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> { typedef match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>> Ty; }; template <typename T0, typename T1, typename T2> struct m_Intrinsic_Ty<T0, T1, T2> { typedef match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty, Argument_match<T2>> Ty; }; template <typename T0, typename T1, typename T2, typename T3> struct m_Intrinsic_Ty<T0, T1, T2, T3> { typedef match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty, Argument_match<T3>> Ty; }; /// \brief Match intrinsic calls like this: /// m_Intrinsic<Intrinsic::fabs>(m_Value(X)) template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() { return IntrinsicID_match(IntrID); } template <Intrinsic::ID IntrID, typename T0> inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) { return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0)); } template <Intrinsic::ID IntrID, typename T0, typename T1> inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0, const T1 &Op1) { return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1)); } template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2> inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) { return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2)); } template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2, typename T3> inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) { return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3)); } // Helper intrinsic matching specializations. template <typename Opnd0> inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) { return m_Intrinsic<Intrinsic::bswap>(Op0); } template <typename Opnd0, typename Opnd1> inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0, const Opnd1 &Op1) { return m_Intrinsic<Intrinsic::minnum>(Op0, Op1); } template <typename Opnd0, typename Opnd1> inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0, const Opnd1 &Op1) { return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1); } } // end namespace PatternMatch } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/Metadata.def
//===- llvm/IR/Metadata.def - Metadata definitions --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Macros for running through all types of metadata. // //===----------------------------------------------------------------------===// #if !(defined HANDLE_METADATA || defined HANDLE_METADATA_LEAF || \ defined HANDLE_METADATA_BRANCH || defined HANDLE_MDNODE_LEAF || \ defined HANDLE_MDNODE_BRANCH || \ defined HANDLE_SPECIALIZED_MDNODE_LEAF || \ defined HANDLE_SPECIALIZED_MDNODE_BRANCH) #error "Missing macro definition of HANDLE_METADATA*" #endif // Handler for all types of metadata. #ifndef HANDLE_METADATA #define HANDLE_METADATA(CLASS) #endif // Handler for leaf nodes in the class hierarchy. #ifndef HANDLE_METADATA_LEAF #define HANDLE_METADATA_LEAF(CLASS) HANDLE_METADATA(CLASS) #endif // Handler for non-leaf nodes in the class hierarchy. #ifndef HANDLE_METADATA_BRANCH #define HANDLE_METADATA_BRANCH(CLASS) HANDLE_METADATA(CLASS) #endif // Handler for leaf nodes under MDNode. #ifndef HANDLE_MDNODE_LEAF #define HANDLE_MDNODE_LEAF(CLASS) HANDLE_METADATA_LEAF(CLASS) #endif // Handler for non-leaf nodes under MDNode. #ifndef HANDLE_MDNODE_BRANCH #define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_METADATA_BRANCH(CLASS) #endif // Handler for specialized leaf nodes under MDNode. #ifndef HANDLE_SPECIALIZED_MDNODE_LEAF #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) HANDLE_MDNODE_LEAF(CLASS) #endif // Handler for specialized non-leaf nodes under MDNode. #ifndef HANDLE_SPECIALIZED_MDNODE_BRANCH #define HANDLE_SPECIALIZED_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_BRANCH(CLASS) #endif HANDLE_METADATA_LEAF(MDString) HANDLE_METADATA_BRANCH(ValueAsMetadata) HANDLE_METADATA_LEAF(ConstantAsMetadata) HANDLE_METADATA_LEAF(LocalAsMetadata) HANDLE_MDNODE_BRANCH(MDNode) HANDLE_MDNODE_LEAF(MDTuple) HANDLE_SPECIALIZED_MDNODE_LEAF(DILocation) HANDLE_SPECIALIZED_MDNODE_LEAF(DIExpression) HANDLE_SPECIALIZED_MDNODE_BRANCH(DINode) HANDLE_SPECIALIZED_MDNODE_LEAF(GenericDINode) HANDLE_SPECIALIZED_MDNODE_LEAF(DISubrange) HANDLE_SPECIALIZED_MDNODE_LEAF(DIEnumerator) HANDLE_SPECIALIZED_MDNODE_BRANCH(DIScope) HANDLE_SPECIALIZED_MDNODE_BRANCH(DIType) HANDLE_SPECIALIZED_MDNODE_LEAF(DIBasicType) HANDLE_SPECIALIZED_MDNODE_BRANCH(DIDerivedTypeBase) HANDLE_SPECIALIZED_MDNODE_LEAF(DIDerivedType) HANDLE_SPECIALIZED_MDNODE_BRANCH(DICompositeTypeBase) HANDLE_SPECIALIZED_MDNODE_LEAF(DICompositeType) HANDLE_SPECIALIZED_MDNODE_LEAF(DISubroutineType) HANDLE_SPECIALIZED_MDNODE_LEAF(DIFile) HANDLE_SPECIALIZED_MDNODE_LEAF(DICompileUnit) HANDLE_SPECIALIZED_MDNODE_BRANCH(DILocalScope) HANDLE_SPECIALIZED_MDNODE_LEAF(DISubprogram) HANDLE_SPECIALIZED_MDNODE_BRANCH(DILexicalBlockBase) HANDLE_SPECIALIZED_MDNODE_LEAF(DILexicalBlock) HANDLE_SPECIALIZED_MDNODE_LEAF(DILexicalBlockFile) HANDLE_SPECIALIZED_MDNODE_LEAF(DINamespace) HANDLE_SPECIALIZED_MDNODE_LEAF(DIModule) HANDLE_SPECIALIZED_MDNODE_BRANCH(DITemplateParameter) HANDLE_SPECIALIZED_MDNODE_LEAF(DITemplateTypeParameter) HANDLE_SPECIALIZED_MDNODE_LEAF(DITemplateValueParameter) HANDLE_SPECIALIZED_MDNODE_BRANCH(DIVariable) HANDLE_SPECIALIZED_MDNODE_LEAF(DIGlobalVariable) HANDLE_SPECIALIZED_MDNODE_LEAF(DILocalVariable) HANDLE_SPECIALIZED_MDNODE_LEAF(DIObjCProperty) HANDLE_SPECIALIZED_MDNODE_LEAF(DIImportedEntity) #undef HANDLE_METADATA #undef HANDLE_METADATA_LEAF #undef HANDLE_METADATA_BRANCH #undef HANDLE_MDNODE_LEAF #undef HANDLE_MDNODE_BRANCH #undef HANDLE_SPECIALIZED_MDNODE_LEAF #undef HANDLE_SPECIALIZED_MDNODE_BRANCH
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/GetElementPtrTypeIterator.h
//===- GetElementPtrTypeIterator.h ------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements an iterator for walking through the types indexed by // getelementptr instructions. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_GETELEMENTPTRTYPEITERATOR_H #define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Operator.h" #include "llvm/IR/User.h" #include "llvm/ADT/PointerIntPair.h" namespace llvm { template<typename ItTy = User::const_op_iterator> class generic_gep_type_iterator { ItTy OpIt; PointerIntPair<Type *, 1> CurTy; unsigned AddrSpace; generic_gep_type_iterator() {} public: using iterator_category = std::forward_iterator_tag; using value_type = Type *; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; static generic_gep_type_iterator begin(Type *Ty, ItTy It) { generic_gep_type_iterator I; I.CurTy.setPointer(Ty); I.OpIt = It; return I; } static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace, ItTy It) { generic_gep_type_iterator I; I.CurTy.setPointer(Ty); I.CurTy.setInt(true); I.AddrSpace = AddrSpace; I.OpIt = It; return I; } static generic_gep_type_iterator end(ItTy It) { generic_gep_type_iterator I; I.OpIt = It; return I; } bool operator==(const generic_gep_type_iterator& x) const { return OpIt == x.OpIt; } bool operator!=(const generic_gep_type_iterator& x) const { return !operator==(x); } Type *operator*() const { if (CurTy.getInt()) return CurTy.getPointer()->getPointerTo(AddrSpace); return CurTy.getPointer(); } Type *getIndexedType() const { if (CurTy.getInt()) return CurTy.getPointer(); CompositeType *CT = cast<CompositeType>(CurTy.getPointer()); return CT->getTypeAtIndex(getOperand()); } // This is a non-standard operator->. It allows you to call methods on the // current type directly. Type *operator->() const { return operator*(); } Value *getOperand() const { return *OpIt; } generic_gep_type_iterator& operator++() { // Preincrement if (CurTy.getInt()) { CurTy.setInt(false); } else if (CompositeType *CT = dyn_cast<CompositeType>(CurTy.getPointer())) { CurTy.setPointer(CT->getTypeAtIndex(getOperand())); } else { CurTy.setPointer(nullptr); } ++OpIt; return *this; } generic_gep_type_iterator operator++(int) { // Postincrement generic_gep_type_iterator tmp = *this; ++*this; return tmp; } }; typedef generic_gep_type_iterator<> gep_type_iterator; inline gep_type_iterator gep_type_begin(const User *GEP) { auto *GEPOp = cast<GEPOperator>(GEP); return gep_type_iterator::begin( GEPOp->getSourceElementType(), cast<PointerType>(GEPOp->getPointerOperandType()->getScalarType()) ->getAddressSpace(), GEP->op_begin() + 1); } inline gep_type_iterator gep_type_end(const User *GEP) { return gep_type_iterator::end(GEP->op_end()); } inline gep_type_iterator gep_type_begin(const User &GEP) { auto &GEPOp = cast<GEPOperator>(GEP); return gep_type_iterator::begin( GEPOp.getSourceElementType(), cast<PointerType>(GEPOp.getPointerOperandType()->getScalarType()) ->getAddressSpace(), GEP.op_begin() + 1); } inline gep_type_iterator gep_type_end(const User &GEP) { return gep_type_iterator::end(GEP.op_end()); } template<typename T> inline generic_gep_type_iterator<const T *> gep_type_begin(Type *Op0, ArrayRef<T> A) { return generic_gep_type_iterator<const T *>::begin(Op0, A.begin()); } template<typename T> inline generic_gep_type_iterator<const T *> gep_type_end(Type * /*Op0*/, ArrayRef<T> A) { return generic_gep_type_iterator<const T *>::end(A.end()); } } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/CallSite.h
//===- CallSite.h - Abstract Call & Invoke instrs ---------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the CallSite class, which is a handy wrapper for code that // wants to treat Call and Invoke instructions in a generic way. When in non- // mutation context (e.g. an analysis) ImmutableCallSite should be used. // Finally, when some degree of customization is necessary between these two // extremes, CallSiteBase<> can be supplied with fine-tuned parameters. // // NOTE: These classes are supposed to have "value semantics". So they should be // passed by value, not by reference; they should not be "new"ed or "delete"d. // They are efficiently copyable, assignable and constructable, with cost // equivalent to copying a pointer (notice that they have only a single data // member). The internal representation carries a flag which indicates which of // the two variants is enclosed. This allows for cheaper checks when various // accessors of CallSite are employed. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_CALLSITE_H #define LLVM_IR_CALLSITE_H #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/iterator_range.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Instructions.h" namespace llvm { class CallInst; class InvokeInst; template <typename FunTy = const Function, typename BBTy = const BasicBlock, typename ValTy = const Value, typename UserTy = const User, typename InstrTy = const Instruction, typename CallTy = const CallInst, typename InvokeTy = const InvokeInst, typename IterTy = User::const_op_iterator> class CallSiteBase { protected: PointerIntPair<InstrTy*, 1, bool> I; CallSiteBase() : I(nullptr, false) {} CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); } CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); } explicit CallSiteBase(ValTy *II) { *this = get(II); } private: /// CallSiteBase::get - This static method is sort of like a constructor. It /// will create an appropriate call site for a Call or Invoke instruction, but /// it can also create a null initialized CallSiteBase object for something /// which is NOT a call site. /// static CallSiteBase get(ValTy *V) { if (InstrTy *II = dyn_cast<InstrTy>(V)) { if (II->getOpcode() == Instruction::Call) return CallSiteBase(static_cast<CallTy*>(II)); else if (II->getOpcode() == Instruction::Invoke) return CallSiteBase(static_cast<InvokeTy*>(II)); } return CallSiteBase(); } public: /// isCall - true if a CallInst is enclosed. /// Note that !isCall() does not mean it is an InvokeInst enclosed, /// it also could signify a NULL Instruction pointer. bool isCall() const { return I.getInt(); } /// isInvoke - true if a InvokeInst is enclosed. /// bool isInvoke() const { return getInstruction() && !I.getInt(); } InstrTy *getInstruction() const { return I.getPointer(); } InstrTy *operator->() const { return I.getPointer(); } explicit operator bool() const { return I.getPointer(); } /// Get the basic block containing the call site BBTy* getParent() const { return getInstruction()->getParent(); } /// getCalledValue - Return the pointer to function that is being called. /// ValTy *getCalledValue() const { assert(getInstruction() && "Not a call or invoke instruction!"); return *getCallee(); } /// getCalledFunction - Return the function being called if this is a direct /// call, otherwise return null (if it's an indirect call). /// FunTy *getCalledFunction() const { return dyn_cast<FunTy>(getCalledValue()); } /// setCalledFunction - Set the callee to the specified value. /// void setCalledFunction(Value *V) { assert(getInstruction() && "Not a call or invoke instruction!"); *getCallee() = V; } /// isCallee - Determine whether the passed iterator points to the /// callee operand's Use. bool isCallee(Value::const_user_iterator UI) const { return isCallee(&UI.getUse()); } /// Determine whether this Use is the callee operand's Use. bool isCallee(const Use *U) const { return getCallee() == U; } ValTy *getArgument(unsigned ArgNo) const { assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!"); return *(arg_begin() + ArgNo); } void setArgument(unsigned ArgNo, Value* newVal) { assert(getInstruction() && "Not a call or invoke instruction!"); assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!"); getInstruction()->setOperand(ArgNo, newVal); } /// Given a value use iterator, returns the argument that corresponds to it. /// Iterator must actually correspond to an argument. unsigned getArgumentNo(Value::const_user_iterator I) const { return getArgumentNo(&I.getUse()); } /// Given a use for an argument, get the argument number that corresponds to /// it. unsigned getArgumentNo(const Use *U) const { assert(getInstruction() && "Not a call or invoke instruction!"); assert(arg_begin() <= U && U < arg_end() && "Argument # out of range!"); return U - arg_begin(); } /// arg_iterator - The type of iterator to use when looping over actual /// arguments at this call site. typedef IterTy arg_iterator; /// arg_begin/arg_end - Return iterators corresponding to the actual argument /// list for a call site. IterTy arg_begin() const { assert(getInstruction() && "Not a call or invoke instruction!"); // Skip non-arguments return (*this)->op_begin(); } IterTy arg_end() const { return (*this)->op_end() - getArgumentEndOffset(); } iterator_range<IterTy> args() const { return iterator_range<IterTy>(arg_begin(), arg_end()); } bool arg_empty() const { return arg_end() == arg_begin(); } unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); } /// getType - Return the type of the instruction that generated this call site /// Type *getType() const { return (*this)->getType(); } /// getCaller - Return the caller function for this call site /// FunTy *getCaller() const { return (*this)->getParent()->getParent(); } /// \brief Tests if this call site must be tail call optimized. Only a /// CallInst can be tail call optimized. bool isMustTailCall() const { return isCall() && cast<CallInst>(getInstruction())->isMustTailCall(); } /// \brief Tests if this call site is marked as a tail call. bool isTailCall() const { return isCall() && cast<CallInst>(getInstruction())->isTailCall(); } #define CALLSITE_DELEGATE_GETTER(METHOD) \ InstrTy *II = getInstruction(); \ return isCall() \ ? cast<CallInst>(II)->METHOD \ : cast<InvokeInst>(II)->METHOD #define CALLSITE_DELEGATE_SETTER(METHOD) \ InstrTy *II = getInstruction(); \ if (isCall()) \ cast<CallInst>(II)->METHOD; \ else \ cast<InvokeInst>(II)->METHOD unsigned getNumArgOperands() const { CALLSITE_DELEGATE_GETTER(getNumArgOperands()); } ValTy *getArgOperand(unsigned i) const { CALLSITE_DELEGATE_GETTER(getArgOperand(i)); } bool isInlineAsm() const { if (isCall()) return cast<CallInst>(getInstruction())->isInlineAsm(); return false; } /// getCallingConv/setCallingConv - get or set the calling convention of the /// call. CallingConv::ID getCallingConv() const { CALLSITE_DELEGATE_GETTER(getCallingConv()); } void setCallingConv(CallingConv::ID CC) { CALLSITE_DELEGATE_SETTER(setCallingConv(CC)); } FunctionType *getFunctionType() const { CALLSITE_DELEGATE_GETTER(getFunctionType()); } void mutateFunctionType(FunctionType *Ty) const { CALLSITE_DELEGATE_SETTER(mutateFunctionType(Ty)); } /// getAttributes/setAttributes - get or set the parameter attributes of /// the call. const AttributeSet &getAttributes() const { CALLSITE_DELEGATE_GETTER(getAttributes()); } void setAttributes(const AttributeSet &PAL) { CALLSITE_DELEGATE_SETTER(setAttributes(PAL)); } /// \brief Return true if this function has the given attribute. bool hasFnAttr(Attribute::AttrKind A) const { CALLSITE_DELEGATE_GETTER(hasFnAttr(A)); } /// \brief Return true if the call or the callee has the given attribute. bool paramHasAttr(unsigned i, Attribute::AttrKind A) const { CALLSITE_DELEGATE_GETTER(paramHasAttr(i, A)); } /// @brief Extract the alignment for a call or parameter (0=unknown). uint16_t getParamAlignment(uint16_t i) const { CALLSITE_DELEGATE_GETTER(getParamAlignment(i)); } /// @brief Extract the number of dereferenceable bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableBytes(uint16_t i) const { CALLSITE_DELEGATE_GETTER(getDereferenceableBytes(i)); } /// @brief Extract the number of dereferenceable_or_null bytes for a call or /// parameter (0=unknown). uint64_t getDereferenceableOrNullBytes(uint16_t i) const { CALLSITE_DELEGATE_GETTER(getDereferenceableOrNullBytes(i)); } /// \brief Return true if the call should not be treated as a call to a /// builtin. bool isNoBuiltin() const { CALLSITE_DELEGATE_GETTER(isNoBuiltin()); } /// @brief Return true if the call should not be inlined. bool isNoInline() const { CALLSITE_DELEGATE_GETTER(isNoInline()); } void setIsNoInline(bool Value = true) { CALLSITE_DELEGATE_SETTER(setIsNoInline(Value)); } /// @brief Determine if the call does not access memory. bool doesNotAccessMemory() const { CALLSITE_DELEGATE_GETTER(doesNotAccessMemory()); } void setDoesNotAccessMemory() { CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory()); } /// @brief Determine if the call does not access or only reads memory. bool onlyReadsMemory() const { CALLSITE_DELEGATE_GETTER(onlyReadsMemory()); } void setOnlyReadsMemory() { CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory()); } /// @brief Determine if the call can access memmory only using pointers based /// on its arguments. bool onlyAccessesArgMemory() const { CALLSITE_DELEGATE_GETTER(onlyAccessesArgMemory()); } void setOnlyAccessesArgMemory() { CALLSITE_DELEGATE_SETTER(setOnlyAccessesArgMemory()); } /// @brief Determine if the call cannot return. bool doesNotReturn() const { CALLSITE_DELEGATE_GETTER(doesNotReturn()); } void setDoesNotReturn() { CALLSITE_DELEGATE_SETTER(setDoesNotReturn()); } /// @brief Determine if the call cannot unwind. bool doesNotThrow() const { CALLSITE_DELEGATE_GETTER(doesNotThrow()); } void setDoesNotThrow() { CALLSITE_DELEGATE_SETTER(setDoesNotThrow()); } #undef CALLSITE_DELEGATE_GETTER #undef CALLSITE_DELEGATE_SETTER /// @brief Determine whether this argument is not captured. bool doesNotCapture(unsigned ArgNo) const { return paramHasAttr(ArgNo + 1, Attribute::NoCapture); } /// @brief Determine whether this argument is passed by value. bool isByValArgument(unsigned ArgNo) const { return paramHasAttr(ArgNo + 1, Attribute::ByVal); } /// @brief Determine whether this argument is passed in an alloca. bool isInAllocaArgument(unsigned ArgNo) const { return paramHasAttr(ArgNo + 1, Attribute::InAlloca); } /// @brief Determine whether this argument is passed by value or in an alloca. bool isByValOrInAllocaArgument(unsigned ArgNo) const { return paramHasAttr(ArgNo + 1, Attribute::ByVal) || paramHasAttr(ArgNo + 1, Attribute::InAlloca); } /// @brief Determine if there are is an inalloca argument. Only the last /// argument can have the inalloca attribute. bool hasInAllocaArgument() const { return paramHasAttr(arg_size(), Attribute::InAlloca); } bool doesNotAccessMemory(unsigned ArgNo) const { return paramHasAttr(ArgNo + 1, Attribute::ReadNone); } bool onlyReadsMemory(unsigned ArgNo) const { return paramHasAttr(ArgNo + 1, Attribute::ReadOnly) || paramHasAttr(ArgNo + 1, Attribute::ReadNone); } /// @brief Return true if the return value is known to be not null. /// This may be because it has the nonnull attribute, or because at least /// one byte is dereferenceable and the pointer is in addrspace(0). bool isReturnNonNull() const { if (paramHasAttr(0, Attribute::NonNull)) return true; else if (getDereferenceableBytes(0) > 0 && getType()->getPointerAddressSpace() == 0) return true; return false; } /// hasArgument - Returns true if this CallSite passes the given Value* as an /// argument to the called function. bool hasArgument(const Value *Arg) const { for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E; ++AI) if (AI->get() == Arg) return true; return false; } private: unsigned getArgumentEndOffset() const { if (isCall()) return 1; // Skip Callee else return 3; // Skip BB, BB, Callee } IterTy getCallee() const { if (isCall()) // Skip Callee return cast<CallInst>(getInstruction())->op_end() - 1; else // Skip BB, BB, Callee return cast<InvokeInst>(getInstruction())->op_end() - 3; } }; class CallSite : public CallSiteBase<Function, BasicBlock, Value, User, Instruction, CallInst, InvokeInst, User::op_iterator> { public: CallSite() {} CallSite(CallSiteBase B) : CallSiteBase(B) {} CallSite(CallInst *CI) : CallSiteBase(CI) {} CallSite(InvokeInst *II) : CallSiteBase(II) {} explicit CallSite(Instruction *II) : CallSiteBase(II) {} explicit CallSite(Value *V) : CallSiteBase(V) {} bool operator==(const CallSite &CS) const { return I == CS.I; } bool operator!=(const CallSite &CS) const { return I != CS.I; } bool operator<(const CallSite &CS) const { return getInstruction() < CS.getInstruction(); } private: User::op_iterator getCallee() const; }; /// ImmutableCallSite - establish a view to a call site for examination class ImmutableCallSite : public CallSiteBase<> { public: ImmutableCallSite() {} ImmutableCallSite(const CallInst *CI) : CallSiteBase(CI) {} ImmutableCallSite(const InvokeInst *II) : CallSiteBase(II) {} explicit ImmutableCallSite(const Instruction *II) : CallSiteBase(II) {} explicit ImmutableCallSite(const Value *V) : CallSiteBase(V) {} ImmutableCallSite(CallSite CS) : CallSiteBase(CS.getInstruction()) {} }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/IR/DIBuilder.h
//===- DIBuilder.h - Debug Information Builder ------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a DIBuilder that is useful for creating debugging // information entries in LLVM IR form. // //===----------------------------------------------------------------------===// #ifndef LLVM_IR_DIBUILDER_H #define LLVM_IR_DIBUILDER_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/TrackingMDRef.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/DataTypes.h" namespace llvm { class BasicBlock; class Instruction; class Function; class Module; class Value; class Constant; class LLVMContext; class StringRef; class DIBuilder { Module &M; LLVMContext &VMContext; DICompileUnit *CUNode; ///< The one compile unit created by this DIBuiler. Function *DeclareFn; ///< llvm.dbg.declare Function *ValueFn; ///< llvm.dbg.value SmallVector<Metadata *, 4> AllEnumTypes; /// Track the RetainTypes, since they can be updated later on. SmallVector<TrackingMDNodeRef, 4> AllRetainTypes; SmallVector<Metadata *, 4> AllSubprograms; SmallVector<Metadata *, 4> AllGVs; SmallVector<TrackingMDNodeRef, 4> AllImportedModules; /// Track nodes that may be unresolved. SmallVector<TrackingMDNodeRef, 4> UnresolvedNodes; bool AllowUnresolvedNodes; /// Each subprogram's preserved local variables. DenseMap<MDNode *, std::vector<TrackingMDNodeRef>> PreservedVariables; DIBuilder(const DIBuilder &) = delete; void operator=(const DIBuilder &) = delete; /// Create a temporary. /// /// Create an \a temporary node and track it in \a UnresolvedNodes. void trackIfUnresolved(MDNode *N); public: /// Construct a builder for a module. /// /// If \c AllowUnresolved, collect unresolved nodes attached to the module /// in order to resolve cycles during \a finalize(). explicit DIBuilder(Module &M, bool AllowUnresolved = true); enum DebugEmissionKind { FullDebug=1, LineTablesOnly }; /// Construct any deferred debug info descriptors. void finalize(); /// A CompileUnit provides an anchor for all debugging /// information generated during this instance of compilation. /// \param Lang Source programming language, eg. dwarf::DW_LANG_C99 /// \param File File name /// \param Dir Directory /// \param Producer Identify the producer of debugging information /// and code. Usually this is a compiler /// version string. /// \param isOptimized A boolean flag which indicates whether optimization /// is enabled or not. /// \param Flags This string lists command line options. This /// string is directly embedded in debug info /// output which may be used by a tool /// analyzing generated debugging information. /// \param RV This indicates runtime version for languages like /// Objective-C. /// \param SplitName The name of the file that we'll split debug info /// out into. /// \param Kind The kind of debug information to generate. /// \param DWOId The DWOId if this is a split skeleton compile unit. /// \param EmitDebugInfo A boolean flag which indicates whether /// debug information should be written to /// the final output or not. When this is /// false, debug information annotations will /// be present in the IL but they are not /// written to the final assembly or object /// file. This supports tracking source /// location information in the back end /// without actually changing the output /// (e.g., when using optimization remarks). DICompileUnit * createCompileUnit(unsigned Lang, StringRef File, StringRef Dir, StringRef Producer, bool isOptimized, StringRef Flags, unsigned RV, StringRef SplitName = StringRef(), DebugEmissionKind Kind = FullDebug, uint64_t DWOId = 0, bool EmitDebugInfo = true); /// Create a file descriptor to hold debugging information /// for a file. DIFile *createFile(StringRef Filename, StringRef Directory); /// Create a single enumerator value. DIEnumerator *createEnumerator(StringRef Name, int64_t Val); /// Create a DWARF unspecified type. DIBasicType *createUnspecifiedType(StringRef Name); /// Create C++11 nullptr type. DIBasicType *createNullPtrType(); /// Create debugging information entry for a basic /// type. /// \param Name Type name. /// \param SizeInBits Size of the type. /// \param AlignInBits Type alignment. /// \param Encoding DWARF encoding code, e.g. dwarf::DW_ATE_float. DIBasicType *createBasicType(StringRef Name, uint64_t SizeInBits, uint64_t AlignInBits, unsigned Encoding); /// Create debugging information entry for a qualified /// type, e.g. 'const int'. /// \param Tag Tag identifing type, e.g. dwarf::TAG_volatile_type /// \param FromTy Base Type. DIDerivedType *createQualifiedType(unsigned Tag, DIType *FromTy); /// Create debugging information entry for a pointer. /// \param PointeeTy Type pointed by this pointer. /// \param SizeInBits Size. /// \param AlignInBits Alignment. (optional) /// \param Name Pointer type name. (optional) DIDerivedType *createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint64_t AlignInBits = 0, StringRef Name = ""); /// Create debugging information entry for a pointer to member. /// \param PointeeTy Type pointed to by this pointer. /// \param SizeInBits Size. /// \param AlignInBits Alignment. (optional) /// \param Class Type for which this pointer points to members of. DIDerivedType *createMemberPointerType(DIType *PointeeTy, DIType *Class, uint64_t SizeInBits, uint64_t AlignInBits = 0); /// Create debugging information entry for a c++ /// style reference or rvalue reference type. DIDerivedType *createReferenceType(unsigned Tag, DIType *RTy); /// Create debugging information entry for a typedef. /// \param Ty Original type. /// \param Name Typedef name. /// \param File File where this type is defined. /// \param LineNo Line number. /// \param Context The surrounding context for the typedef. DIDerivedType *createTypedef(DIType *Ty, StringRef Name, DIFile *File, unsigned LineNo, DIScope *Context); /// Create debugging information entry for a 'friend'. DIDerivedType *createFriend(DIType *Ty, DIType *FriendTy); /// Create debugging information entry to establish /// inheritance relationship between two types. /// \param Ty Original type. /// \param BaseTy Base type. Ty is inherits from base. /// \param BaseOffset Base offset. /// \param Flags Flags to describe inheritance attribute, /// e.g. private DIDerivedType *createInheritance(DIType *Ty, DIType *BaseTy, uint64_t BaseOffset, unsigned Flags); /// Create debugging information entry for a member. /// \param Scope Member scope. /// \param Name Member name. /// \param File File where this member is defined. /// \param LineNo Line number. /// \param SizeInBits Member size. /// \param AlignInBits Member alignment. /// \param OffsetInBits Member offset. /// \param Flags Flags to encode member attribute, e.g. private /// \param Ty Parent type. DIDerivedType *createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags, DIType *Ty); /// Create debugging information entry for a /// C++ static data member. /// \param Scope Member scope. /// \param Name Member name. /// \param File File where this member is declared. /// \param LineNo Line number. /// \param Ty Type of the static member. /// \param Flags Flags to encode member attribute, e.g. private. /// \param Val Const initializer of the member. DIDerivedType *createStaticMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, unsigned Flags, llvm::Constant *Val); /// Create debugging information entry for Objective-C /// instance variable. /// \param Name Member name. /// \param File File where this member is defined. /// \param LineNo Line number. /// \param SizeInBits Member size. /// \param AlignInBits Member alignment. /// \param OffsetInBits Member offset. /// \param Flags Flags to encode member attribute, e.g. private /// \param Ty Parent type. /// \param PropertyNode Property associated with this ivar. DIDerivedType *createObjCIVar(StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags, DIType *Ty, MDNode *PropertyNode); /// Create debugging information entry for Objective-C /// property. /// \param Name Property name. /// \param File File where this property is defined. /// \param LineNumber Line number. /// \param GetterName Name of the Objective C property getter selector. /// \param SetterName Name of the Objective C property setter selector. /// \param PropertyAttributes Objective C property attributes. /// \param Ty Type. DIObjCProperty *createObjCProperty(StringRef Name, DIFile *File, unsigned LineNumber, StringRef GetterName, StringRef SetterName, unsigned PropertyAttributes, DIType *Ty); /// Create debugging information entry for a class. /// \param Scope Scope in which this class is defined. /// \param Name class name. /// \param File File where this member is defined. /// \param LineNumber Line number. /// \param SizeInBits Member size. /// \param AlignInBits Member alignment. /// \param OffsetInBits Member offset. /// \param Flags Flags to encode member attribute, e.g. private /// \param Elements class members. /// \param VTableHolder Debug info of the base class that contains vtable /// for this type. This is used in /// DW_AT_containing_type. See DWARF documentation /// for more info. /// \param TemplateParms Template type parameters. /// \param UniqueIdentifier A unique identifier for the class. DICompositeType *createClassType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags, DIType *DerivedFrom, DINodeArray Elements, DIType *VTableHolder = nullptr, MDNode *TemplateParms = nullptr, StringRef UniqueIdentifier = ""); /// Create debugging information entry for a struct. /// \param Scope Scope in which this struct is defined. /// \param Name Struct name. /// \param File File where this member is defined. /// \param LineNumber Line number. /// \param SizeInBits Member size. /// \param AlignInBits Member alignment. /// \param Flags Flags to encode member attribute, e.g. private /// \param Elements Struct elements. /// \param RunTimeLang Optional parameter, Objective-C runtime version. /// \param UniqueIdentifier A unique identifier for the struct. DICompositeType *createStructType( DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang = 0, DIType *VTableHolder = nullptr, StringRef UniqueIdentifier = ""); /// Create debugging information entry for an union. /// \param Scope Scope in which this union is defined. /// \param Name Union name. /// \param File File where this member is defined. /// \param LineNumber Line number. /// \param SizeInBits Member size. /// \param AlignInBits Member alignment. /// \param Flags Flags to encode member attribute, e.g. private /// \param Elements Union elements. /// \param RunTimeLang Optional parameter, Objective-C runtime version. /// \param UniqueIdentifier A unique identifier for the union. DICompositeType *createUnionType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags, DINodeArray Elements, unsigned RunTimeLang = 0, StringRef UniqueIdentifier = ""); /// Create debugging information for template /// type parameter. /// \param Scope Scope in which this type is defined. /// \param Name Type parameter name. /// \param Ty Parameter type. DITemplateTypeParameter * createTemplateTypeParameter(DIScope *Scope, StringRef Name, DIType *Ty); /// Create debugging information for template /// value parameter. /// \param Scope Scope in which this type is defined. /// \param Name Value parameter name. /// \param Ty Parameter type. /// \param Val Constant parameter value. DITemplateValueParameter *createTemplateValueParameter(DIScope *Scope, StringRef Name, DIType *Ty, Constant *Val); /// Create debugging information for a template template parameter. /// \param Scope Scope in which this type is defined. /// \param Name Value parameter name. /// \param Ty Parameter type. /// \param Val The fully qualified name of the template. DITemplateValueParameter *createTemplateTemplateParameter(DIScope *Scope, StringRef Name, DIType *Ty, StringRef Val); /// Create debugging information for a template parameter pack. /// \param Scope Scope in which this type is defined. /// \param Name Value parameter name. /// \param Ty Parameter type. /// \param Val An array of types in the pack. DITemplateValueParameter *createTemplateParameterPack(DIScope *Scope, StringRef Name, DIType *Ty, DINodeArray Val); /// Create debugging information entry for an array. /// \param Size Array size. /// \param AlignInBits Alignment. /// \param Ty Element type. /// \param Subscripts Subscripts. DICompositeType *createArrayType(uint64_t Size, uint64_t AlignInBits, DIType *Ty, DINodeArray Subscripts); /// Create debugging information entry for a vector type. /// \param Size Array size. /// \param AlignInBits Alignment. /// \param Ty Element type. /// \param Subscripts Subscripts. DICompositeType *createVectorType(uint64_t Size, uint64_t AlignInBits, DIType *Ty, DINodeArray Subscripts); /// Create debugging information entry for an /// enumeration. /// \param Scope Scope in which this enumeration is defined. /// \param Name Union name. /// \param File File where this member is defined. /// \param LineNumber Line number. /// \param SizeInBits Member size. /// \param AlignInBits Member alignment. /// \param Elements Enumeration elements. /// \param UnderlyingType Underlying type of a C++11/ObjC fixed enum. /// \param UniqueIdentifier A unique identifier for the enum. DICompositeType *createEnumerationType( DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, DINodeArray Elements, DIType *UnderlyingType, StringRef UniqueIdentifier = ""); /// Create subroutine type. /// \param File File in which this subroutine is defined. /// \param ParameterTypes An array of subroutine parameter types. This /// includes return type at 0th index. /// \param Flags E.g.: LValueReference. /// These flags are used to emit dwarf attributes. DISubroutineType *createSubroutineType(DIFile *File, DITypeRefArray ParameterTypes, unsigned Flags = 0); /// Create a new DIType* with "artificial" flag set. DIType *createArtificialType(DIType *Ty); /// Create a new DIType* with the "object pointer" /// flag set. DIType *createObjectPointerType(DIType *Ty); /// Create a permanent forward-declared type. DICompositeType *createForwardDecl(unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line, unsigned RuntimeLang = 0, uint64_t SizeInBits = 0, uint64_t AlignInBits = 0, StringRef UniqueIdentifier = ""); /// Create a temporary forward-declared type. DICompositeType *createReplaceableCompositeType( unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line, unsigned RuntimeLang = 0, uint64_t SizeInBits = 0, uint64_t AlignInBits = 0, unsigned Flags = DINode::FlagFwdDecl, StringRef UniqueIdentifier = ""); /// Retain DIType* in a module even if it is not referenced /// through debug info anchors. void retainType(DIType *T); /// Create unspecified parameter type /// for a subroutine type. DIBasicType *createUnspecifiedParameter(); /// Get a DINodeArray, create one if required. DINodeArray getOrCreateArray(ArrayRef<Metadata *> Elements); /// Get a DITypeRefArray, create one if required. DITypeRefArray getOrCreateTypeArray(ArrayRef<Metadata *> Elements); /// Create a descriptor for a value range. This /// implicitly uniques the values returned. DISubrange *getOrCreateSubrange(int64_t Lo, int64_t Count); /// Create a new descriptor for the specified /// variable. /// \param Context Variable scope. /// \param Name Name of the variable. /// \param LinkageName Mangled name of the variable. /// \param File File where this variable is defined. /// \param LineNo Line number. /// \param Ty Variable Type. /// \param isLocalToUnit Boolean flag indicate whether this variable is /// externally visible or not. /// \param Val llvm::Value of the variable. /// \param Decl Reference to the corresponding declaration. DIGlobalVariable *createGlobalVariable(DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DIType *Ty, bool isLocalToUnit, llvm::Constant *Val, MDNode *Decl = nullptr); /// Identical to createGlobalVariable /// except that the resulting DbgNode is temporary and meant to be RAUWed. DIGlobalVariable *createTempGlobalVariableFwdDecl( DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DIType *Ty, bool isLocalToUnit, llvm::Constant *Val, MDNode *Decl = nullptr); /// Create a new descriptor for the specified /// local variable. /// \param Tag Dwarf TAG. Usually DW_TAG_auto_variable or /// DW_TAG_arg_variable. /// \param Scope Variable scope. /// \param Name Variable name. /// \param File File where this variable is defined. /// \param LineNo Line number. /// \param Ty Variable Type /// \param AlwaysPreserve Boolean. Set to true if debug info for this /// variable should be preserved in optimized build. /// \param Flags Flags, e.g. artificial variable. /// \param ArgNo If this variable is an argument then this argument's /// number. 1 indicates 1st argument. DILocalVariable *createLocalVariable(unsigned Tag, DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve = false, unsigned Flags = 0, unsigned ArgNo = 0); /// Create a new descriptor for the specified /// variable which has a complex address expression for its address. /// \param Addr An array of complex address operations. DIExpression *createExpression(ArrayRef<uint64_t> Addr = None); DIExpression *createExpression(ArrayRef<int64_t> Addr); /// Create a descriptor to describe one part /// of aggregate variable that is fragmented across multiple Values. /// /// \param OffsetInBits Offset of the piece in bits. /// \param SizeInBits Size of the piece in bits. DIExpression *createBitPieceExpression(unsigned OffsetInBits, unsigned SizeInBits); /// Create a new descriptor for the specified subprogram. /// See comments in DISubprogram* for descriptions of these fields. /// \param Scope Function scope. /// \param Name Function name. /// \param LinkageName Mangled function name. /// \param File File where this variable is defined. /// \param LineNo Line number. /// \param Ty Function type. /// \param isLocalToUnit True if this function is not externally visible. /// \param isDefinition True if this is a function definition. /// \param ScopeLine Set to the beginning of the scope this starts /// \param Flags e.g. is this function prototyped or not. /// These flags are used to emit dwarf attributes. /// \param isOptimized True if optimization is ON. /// \param Fn llvm::Function pointer. /// \param TParam Function template parameters. DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit, bool isDefinition, unsigned ScopeLine, unsigned Flags = 0, bool isOptimized = false, Function *Fn = nullptr, MDNode *TParam = nullptr, MDNode *Decl = nullptr); /// Identical to createFunction, /// except that the resulting DbgNode is meant to be RAUWed. DISubprogram *createTempFunctionFwdDecl( DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit, bool isDefinition, unsigned ScopeLine, unsigned Flags = 0, bool isOptimized = false, Function *Fn = nullptr, MDNode *TParam = nullptr, MDNode *Decl = nullptr); /// FIXME: this is added for dragonegg. Once we update dragonegg /// to call resolve function, this will be removed. DISubprogram * createFunction(DIScopeRef Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit, bool isDefinition, unsigned ScopeLine, unsigned Flags = 0, bool isOptimized = false, Function *Fn = nullptr, MDNode *TParam = nullptr, MDNode *Decl = nullptr); /// Create a new descriptor for the specified C++ method. /// See comments in \a DISubprogram* for descriptions of these fields. /// \param Scope Function scope. /// \param Name Function name. /// \param LinkageName Mangled function name. /// \param File File where this variable is defined. /// \param LineNo Line number. /// \param Ty Function type. /// \param isLocalToUnit True if this function is not externally visible.. /// \param isDefinition True if this is a function definition. /// \param Virtuality Attributes describing virtualness. e.g. pure /// virtual function. /// \param VTableIndex Index no of this method in virtual table. /// \param VTableHolder Type that holds vtable. /// \param Flags e.g. is this function prototyped or not. /// This flags are used to emit dwarf attributes. /// \param isOptimized True if optimization is ON. /// \param Fn llvm::Function pointer. /// \param TParam Function template parameters. DISubprogram * createMethod(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit, bool isDefinition, unsigned Virtuality = 0, unsigned VTableIndex = 0, DIType *VTableHolder = nullptr, unsigned Flags = 0, bool isOptimized = false, Function *Fn = nullptr, MDNode *TParam = nullptr); /// This creates new descriptor for a namespace with the specified /// parent scope. /// \param Scope Namespace scope /// \param Name Name of this namespace /// \param File Source file /// \param LineNo Line number DINamespace *createNameSpace(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo); /// This creates new descriptor for a module with the specified /// parent scope. /// \param Scope Parent scope /// \param Name Name of this module /// \param ConfigurationMacros /// A space-separated shell-quoted list of -D macro /// definitions as they would appear on a command line. /// \param IncludePath The path to the module map file. /// \param ISysRoot The clang system root (value of -isysroot). DIModule *createModule(DIScope *Scope, StringRef Name, StringRef ConfigurationMacros, StringRef IncludePath, StringRef ISysRoot); /// This creates a descriptor for a lexical block with a new file /// attached. This merely extends the existing /// lexical block as it crosses a file. /// \param Scope Lexical block. /// \param File Source file. /// \param Discriminator DWARF path discriminator value. DILexicalBlockFile *createLexicalBlockFile(DIScope *Scope, DIFile *File, unsigned Discriminator = 0); /// This creates a descriptor for a lexical block with the /// specified parent context. /// \param Scope Parent lexical scope. /// \param File Source file. /// \param Line Line number. /// \param Col Column number. DILexicalBlock *createLexicalBlock(DIScope *Scope, DIFile *File, unsigned Line, unsigned Col); /// Create a descriptor for an imported module. /// \param Context The scope this module is imported into /// \param NS The namespace being imported here /// \param Line Line number DIImportedEntity *createImportedModule(DIScope *Context, DINamespace *NS, unsigned Line); /// Create a descriptor for an imported module. /// \param Context The scope this module is imported into /// \param NS An aliased namespace /// \param Line Line number DIImportedEntity *createImportedModule(DIScope *Context, DIImportedEntity *NS, unsigned Line); /// Create a descriptor for an imported module. /// \param Context The scope this module is imported into /// \param M The module being imported here /// \param Line Line number DIImportedEntity *createImportedModule(DIScope *Context, DIModule *M, unsigned Line); /// Create a descriptor for an imported function. /// \param Context The scope this module is imported into /// \param Decl The declaration (or definition) of a function, type, or /// variable /// \param Line Line number DIImportedEntity *createImportedDeclaration(DIScope *Context, DINode *Decl, unsigned Line, StringRef Name = ""); /// Insert a new llvm.dbg.declare intrinsic call. /// \param Storage llvm::Value of the variable /// \param VarInfo Variable's debug info descriptor. /// \param Expr A complex location expression. /// \param DL Debug info location. /// \param InsertAtEnd Location for the new intrinsic. Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo, DIExpression *Expr, const DILocation *DL, BasicBlock *InsertAtEnd); /// Insert a new llvm.dbg.declare intrinsic call. /// \param Storage llvm::Value of the variable /// \param VarInfo Variable's debug info descriptor. /// \param Expr A complex location expression. /// \param DL Debug info location. /// \param InsertBefore Location for the new intrinsic. Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo, DIExpression *Expr, const DILocation *DL, Instruction *InsertBefore); /// Insert a new llvm.dbg.value intrinsic call. /// \param Val llvm::Value of the variable /// \param Offset Offset /// \param VarInfo Variable's debug info descriptor. /// \param Expr A complex location expression. /// \param DL Debug info location. /// \param InsertAtEnd Location for the new intrinsic. Instruction *insertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset, DILocalVariable *VarInfo, DIExpression *Expr, const DILocation *DL, BasicBlock *InsertAtEnd); /// Insert a new llvm.dbg.value intrinsic call. /// \param Val llvm::Value of the variable /// \param Offset Offset /// \param VarInfo Variable's debug info descriptor. /// \param Expr A complex location expression. /// \param DL Debug info location. /// \param InsertBefore Location for the new intrinsic. Instruction *insertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset, DILocalVariable *VarInfo, DIExpression *Expr, const DILocation *DL, Instruction *InsertBefore); /// Replace the vtable holder in the given composite type. /// /// If this creates a self reference, it may orphan some unresolved cycles /// in the operands of \c T, so \a DIBuilder needs to track that. void replaceVTableHolder(DICompositeType *&T, DICompositeType *VTableHolder); /// Replace arrays on a composite type. /// /// If \c T is resolved, but the arrays aren't -- which can happen if \c T /// has a self-reference -- \a DIBuilder needs to track the array to /// resolve cycles. void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParems = DINodeArray()); /// Replace a temporary node. /// /// Call \a MDNode::replaceAllUsesWith() on \c N, replacing it with \c /// Replacement. /// /// If \c Replacement is the same as \c N.get(), instead call \a /// MDNode::replaceWithUniqued(). In this case, the uniqued node could /// have a different address, so we return the final address. template <class NodeTy> NodeTy *replaceTemporary(TempMDNode &&N, NodeTy *Replacement) { if (N.get() == Replacement) return cast<NodeTy>(MDNode::replaceWithUniqued(std::move(N))); N->replaceAllUsesWith(Replacement); return Replacement; } }; } // end namespace llvm #endif