Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/GenericDomTree.h
//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This file defines a set of templates that efficiently compute a dominator /// tree over a generic graph. This is used typically in LLVM for fast /// dominance queries on the CFG, but is fully generic w.r.t. the underlying /// graph types. /// //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_GENERICDOMTREE_H #define LLVM_SUPPORT_GENERICDOMTREE_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> namespace llvm { /// \brief Base class that other, more interesting dominator analyses /// inherit from. template <class NodeT> class DominatorBase { protected: std::vector<NodeT *> Roots; bool IsPostDominators; explicit DominatorBase(bool isPostDom) : Roots(), IsPostDominators(isPostDom) {} DominatorBase(DominatorBase &&Arg) : Roots(std::move(Arg.Roots)), IsPostDominators(std::move(Arg.IsPostDominators)) { Arg.Roots.clear(); } DominatorBase &operator=(DominatorBase &&RHS) { Roots = std::move(RHS.Roots); IsPostDominators = std::move(RHS.IsPostDominators); RHS.Roots.clear(); return *this; } public: /// getRoots - Return the root blocks of the current CFG. This may include /// multiple blocks if we are computing post dominators. For forward /// dominators, this will always be a single block (the entry node). /// const std::vector<NodeT *> &getRoots() const { return Roots; } /// isPostDominator - Returns true if analysis based of postdoms /// bool isPostDominator() const { return IsPostDominators; } }; template <class NodeT> class DominatorTreeBase; struct PostDominatorTree; /// \brief Base class for the actual dominator tree node. template <class NodeT> class DomTreeNodeBase { NodeT *TheBB; DomTreeNodeBase<NodeT> *IDom; std::vector<DomTreeNodeBase<NodeT> *> Children; mutable int DFSNumIn, DFSNumOut; template <class N> friend class DominatorTreeBase; friend struct PostDominatorTree; public: typedef typename std::vector<DomTreeNodeBase<NodeT> *>::iterator iterator; typedef typename std::vector<DomTreeNodeBase<NodeT> *>::const_iterator const_iterator; iterator begin() { return Children.begin(); } iterator end() { return Children.end(); } const_iterator begin() const { return Children.begin(); } const_iterator end() const { return Children.end(); } NodeT *getBlock() const { return TheBB; } DomTreeNodeBase<NodeT> *getIDom() const { return IDom; } const std::vector<DomTreeNodeBase<NodeT> *> &getChildren() const { return Children; } DomTreeNodeBase(NodeT *BB, DomTreeNodeBase<NodeT> *iDom) : TheBB(BB), IDom(iDom), DFSNumIn(-1), DFSNumOut(-1) {} std::unique_ptr<DomTreeNodeBase<NodeT>> addChild(std::unique_ptr<DomTreeNodeBase<NodeT>> C) { Children.push_back(C.get()); return C; } size_t getNumChildren() const { return Children.size(); } void clearAllChildren() { Children.clear(); } bool compare(const DomTreeNodeBase<NodeT> *Other) const { if (getNumChildren() != Other->getNumChildren()) return true; SmallPtrSet<const NodeT *, 4> OtherChildren; for (const_iterator I = Other->begin(), E = Other->end(); I != E; ++I) { const NodeT *Nd = (*I)->getBlock(); OtherChildren.insert(Nd); } for (const_iterator I = begin(), E = end(); I != E; ++I) { const NodeT *N = (*I)->getBlock(); if (OtherChildren.count(N) == 0) return true; } return false; } void setIDom(DomTreeNodeBase<NodeT> *NewIDom) { assert(IDom && "No immediate dominator?"); if (IDom != NewIDom) { typename std::vector<DomTreeNodeBase<NodeT> *>::iterator I = std::find(IDom->Children.begin(), IDom->Children.end(), this); assert(I != IDom->Children.end() && "Not in immediate dominator children set!"); // I am no longer your child... IDom->Children.erase(I); // Switch to new dominator IDom = NewIDom; IDom->Children.push_back(this); } } /// getDFSNumIn/getDFSNumOut - These are an internal implementation detail, do /// not call them. unsigned getDFSNumIn() const { return DFSNumIn; } unsigned getDFSNumOut() const { return DFSNumOut; } private: // Return true if this node is dominated by other. Use this only if DFS info // is valid. bool DominatedBy(const DomTreeNodeBase<NodeT> *other) const { return this->DFSNumIn >= other->DFSNumIn && this->DFSNumOut <= other->DFSNumOut; } }; template <class NodeT> raw_ostream &operator<<(raw_ostream &o, const DomTreeNodeBase<NodeT> *Node) { if (Node->getBlock()) Node->getBlock()->printAsOperand(o, false); else o << " <<exit node>>"; o << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "}"; return o << "\n"; } template <class NodeT> void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &o, unsigned Lev) { o.indent(2 * Lev) << "[" << Lev << "] " << N; for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(), E = N->end(); I != E; ++I) PrintDomTree<NodeT>(*I, o, Lev + 1); } // The calculate routine is provided in a separate header but referenced here. template <class FuncT, class N> void Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType> &DT, FuncT &F); /// \brief Core dominator tree base class. /// /// This class is a generic template over graph nodes. It is instantiated for /// various graphs in the LLVM IR or in the code generator. template <class NodeT> class DominatorTreeBase : public DominatorBase<NodeT> { DominatorTreeBase(const DominatorTreeBase &) = delete; DominatorTreeBase &operator=(const DominatorTreeBase &) = delete; bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A, const DomTreeNodeBase<NodeT> *B) const { assert(A != B); assert(isReachableFromEntry(B)); assert(isReachableFromEntry(A)); const DomTreeNodeBase<NodeT> *IDom; while ((IDom = B->getIDom()) != nullptr && IDom != A && IDom != B) B = IDom; // Walk up the tree return IDom != nullptr; } /// \brief Wipe this tree's state without releasing any resources. /// /// This is essentially a post-move helper only. It leaves the object in an /// assignable and destroyable state, but otherwise invalid. void wipe() { DomTreeNodes.clear(); IDoms.clear(); Vertex.clear(); Info.clear(); RootNode = nullptr; } protected: typedef DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>> DomTreeNodeMapType; DomTreeNodeMapType DomTreeNodes; DomTreeNodeBase<NodeT> *RootNode; mutable bool DFSInfoValid; mutable unsigned int SlowQueries; // Information record used during immediate dominators computation. struct InfoRec { unsigned DFSNum; unsigned Parent; unsigned Semi; NodeT *Label; InfoRec() : DFSNum(0), Parent(0), Semi(0), Label(nullptr) {} }; DenseMap<NodeT *, NodeT *> IDoms; // Vertex - Map the DFS number to the NodeT* std::vector<NodeT *> Vertex; // Info - Collection of information used during the computation of idoms. DenseMap<NodeT *, InfoRec> Info; void reset() { DomTreeNodes.clear(); IDoms.clear(); this->Roots.clear(); Vertex.clear(); RootNode = nullptr; DFSInfoValid = false; SlowQueries = 0; } // NewBB is split and now it has one successor. Update dominator tree to // reflect this change. template <class N, class GraphT> void Split(DominatorTreeBase<typename GraphT::NodeType> &DT, typename GraphT::NodeType *NewBB) { assert(std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1 && "NewBB should have a single successor!"); typename GraphT::NodeType *NewBBSucc = *GraphT::child_begin(NewBB); std::vector<typename GraphT::NodeType *> PredBlocks; typedef GraphTraits<Inverse<N>> InvTraits; for (typename InvTraits::ChildIteratorType PI = InvTraits::child_begin(NewBB), PE = InvTraits::child_end(NewBB); PI != PE; ++PI) PredBlocks.push_back(*PI); assert(!PredBlocks.empty() && "No predblocks?"); bool NewBBDominatesNewBBSucc = true; for (typename InvTraits::ChildIteratorType PI = InvTraits::child_begin(NewBBSucc), E = InvTraits::child_end(NewBBSucc); PI != E; ++PI) { typename InvTraits::NodeType *ND = *PI; if (ND != NewBB && !DT.dominates(NewBBSucc, ND) && DT.isReachableFromEntry(ND)) { NewBBDominatesNewBBSucc = false; break; } } // Find NewBB's immediate dominator and create new dominator tree node for // NewBB. NodeT *NewBBIDom = nullptr; unsigned i = 0; for (i = 0; i < PredBlocks.size(); ++i) if (DT.isReachableFromEntry(PredBlocks[i])) { NewBBIDom = PredBlocks[i]; break; } // It's possible that none of the predecessors of NewBB are reachable; // in that case, NewBB itself is unreachable, so nothing needs to be // changed. if (!NewBBIDom) return; for (i = i + 1; i < PredBlocks.size(); ++i) { if (DT.isReachableFromEntry(PredBlocks[i])) NewBBIDom = DT.findNearestCommonDominator(NewBBIDom, PredBlocks[i]); } // Create the new dominator tree node... and set the idom of NewBB. DomTreeNodeBase<NodeT> *NewBBNode = DT.addNewBlock(NewBB, NewBBIDom); // If NewBB strictly dominates other blocks, then it is now the immediate // dominator of NewBBSucc. Update the dominator tree as appropriate. if (NewBBDominatesNewBBSucc) { DomTreeNodeBase<NodeT> *NewBBSuccNode = DT.getNode(NewBBSucc); DT.changeImmediateDominator(NewBBSuccNode, NewBBNode); } } public: explicit DominatorTreeBase(bool isPostDom) : DominatorBase<NodeT>(isPostDom), DFSInfoValid(false), SlowQueries(0) {} DominatorTreeBase(DominatorTreeBase &&Arg) : DominatorBase<NodeT>( std::move(static_cast<DominatorBase<NodeT> &>(Arg))), DomTreeNodes(std::move(Arg.DomTreeNodes)), RootNode(std::move(Arg.RootNode)), DFSInfoValid(std::move(Arg.DFSInfoValid)), SlowQueries(std::move(Arg.SlowQueries)), IDoms(std::move(Arg.IDoms)), Vertex(std::move(Arg.Vertex)), Info(std::move(Arg.Info)) { Arg.wipe(); } DominatorTreeBase &operator=(DominatorTreeBase &&RHS) { DominatorBase<NodeT>::operator=( std::move(static_cast<DominatorBase<NodeT> &>(RHS))); DomTreeNodes = std::move(RHS.DomTreeNodes); RootNode = std::move(RHS.RootNode); DFSInfoValid = std::move(RHS.DFSInfoValid); SlowQueries = std::move(RHS.SlowQueries); IDoms = std::move(RHS.IDoms); Vertex = std::move(RHS.Vertex); Info = std::move(RHS.Info); RHS.wipe(); return *this; } /// compare - Return false if the other dominator tree base matches this /// dominator tree base. Otherwise return true. bool compare(const DominatorTreeBase &Other) const { const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes; if (DomTreeNodes.size() != OtherDomTreeNodes.size()) return true; for (typename DomTreeNodeMapType::const_iterator I = this->DomTreeNodes.begin(), E = this->DomTreeNodes.end(); I != E; ++I) { NodeT *BB = I->first; typename DomTreeNodeMapType::const_iterator OI = OtherDomTreeNodes.find(BB); if (OI == OtherDomTreeNodes.end()) return true; DomTreeNodeBase<NodeT> &MyNd = *I->second; DomTreeNodeBase<NodeT> &OtherNd = *OI->second; if (MyNd.compare(&OtherNd)) return true; } return false; } void releaseMemory() { reset(); } /// getNode - return the (Post)DominatorTree node for the specified basic /// block. This is the same as using operator[] on this class. /// DomTreeNodeBase<NodeT> *getNode(NodeT *BB) const { auto I = DomTreeNodes.find(BB); if (I != DomTreeNodes.end()) return I->second.get(); return nullptr; } DomTreeNodeBase<NodeT> *operator[](NodeT *BB) const { return getNode(BB); } /// getRootNode - This returns the entry node for the CFG of the function. If /// this tree represents the post-dominance relations for a function, however, /// this root may be a node with the block == NULL. This is the case when /// there are multiple exit nodes from a particular function. Consumers of /// post-dominance information must be capable of dealing with this /// possibility. /// DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; } const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; } /// Get all nodes dominated by R, including R itself. void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const { Result.clear(); const DomTreeNodeBase<NodeT> *RN = getNode(R); if (!RN) return; // If R is unreachable, it will not be present in the DOM tree. SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL; WL.push_back(RN); while (!WL.empty()) { const DomTreeNodeBase<NodeT> *N = WL.pop_back_val(); Result.push_back(N->getBlock()); WL.append(N->begin(), N->end()); } } /// properlyDominates - Returns true iff A dominates B and A != B. /// Note that this is not a constant time operation! /// bool properlyDominates(const DomTreeNodeBase<NodeT> *A, const DomTreeNodeBase<NodeT> *B) const { if (!A || !B) return false; if (A == B) return false; return dominates(A, B); } bool properlyDominates(const NodeT *A, const NodeT *B) const; /// isReachableFromEntry - Return true if A is dominated by the entry /// block of the function containing it. bool isReachableFromEntry(const NodeT *A) const { assert(!this->isPostDominator() && "This is not implemented for post dominators"); return isReachableFromEntry(getNode(const_cast<NodeT *>(A))); } bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; } /// dominates - Returns true iff A dominates B. Note that this is not a /// constant time operation! /// bool dominates(const DomTreeNodeBase<NodeT> *A, const DomTreeNodeBase<NodeT> *B) const { // A node trivially dominates itself. if (B == A) return true; // An unreachable node is dominated by anything. if (!isReachableFromEntry(B)) return true; // And dominates nothing. if (!isReachableFromEntry(A)) return false; // Compare the result of the tree walk and the dfs numbers, if expensive // checks are enabled. #ifdef XDEBUG assert((!DFSInfoValid || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) && "Tree walk disagrees with dfs numbers!"); #endif if (DFSInfoValid) return B->DominatedBy(A); // If we end up with too many slow queries, just update the // DFS numbers on the theory that we are going to keep querying. SlowQueries++; if (SlowQueries > 32) { updateDFSNumbers(); return B->DominatedBy(A); } return dominatedBySlowTreeWalk(A, B); } bool dominates(const NodeT *A, const NodeT *B) const; NodeT *getRoot() const { assert(this->Roots.size() == 1 && "Should always have entry node!"); return this->Roots[0]; } /// findNearestCommonDominator - Find nearest common dominator basic block /// for basic block A and B. If there is no such block then return NULL. NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) { assert(A->getParent() == B->getParent() && "Two blocks are not in same function"); // If either A or B is a entry block then it is nearest common dominator // (for forward-dominators). if (!this->isPostDominator()) { NodeT &Entry = A->getParent()->front(); if (A == &Entry || B == &Entry) return &Entry; } // If B dominates A then B is nearest common dominator. if (dominates(B, A)) return B; // If A dominates B then A is nearest common dominator. if (dominates(A, B)) return A; DomTreeNodeBase<NodeT> *NodeA = getNode(A); DomTreeNodeBase<NodeT> *NodeB = getNode(B); // If we have DFS info, then we can avoid all allocations by just querying // it from each IDom. Note that because we call 'dominates' twice above, we // expect to call through this code at most 16 times in a row without // building valid DFS information. This is important as below is a *very* // slow tree walk. if (DFSInfoValid) { DomTreeNodeBase<NodeT> *IDomA = NodeA->getIDom(); while (IDomA) { if (NodeB->DominatedBy(IDomA)) return IDomA->getBlock(); IDomA = IDomA->getIDom(); } return nullptr; } // Collect NodeA dominators set. SmallPtrSet<DomTreeNodeBase<NodeT> *, 16> NodeADoms; NodeADoms.insert(NodeA); DomTreeNodeBase<NodeT> *IDomA = NodeA->getIDom(); while (IDomA) { NodeADoms.insert(IDomA); IDomA = IDomA->getIDom(); } // Walk NodeB immediate dominators chain and find common dominator node. DomTreeNodeBase<NodeT> *IDomB = NodeB->getIDom(); while (IDomB) { if (NodeADoms.count(IDomB) != 0) return IDomB->getBlock(); IDomB = IDomB->getIDom(); } return nullptr; } const NodeT *findNearestCommonDominator(const NodeT *A, const NodeT *B) { // Cast away the const qualifiers here. This is ok since // const is re-introduced on the return type. return findNearestCommonDominator(const_cast<NodeT *>(A), const_cast<NodeT *>(B)); } //===--------------------------------------------------------------------===// // API to update (Post)DominatorTree information based on modifications to // the CFG... /// addNewBlock - Add a new node to the dominator tree information. This /// creates a new node as a child of DomBB dominator node,linking it into /// the children list of the immediate dominator. DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) { assert(getNode(BB) == nullptr && "Block already in dominator tree!"); DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB); assert(IDomNode && "Not immediate dominator specified for block!"); DFSInfoValid = false; return (DomTreeNodes[BB] = IDomNode->addChild( llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get(); } /// changeImmediateDominator - This method is used to update the dominator /// tree information when a node's immediate dominator changes. /// void changeImmediateDominator(DomTreeNodeBase<NodeT> *N, DomTreeNodeBase<NodeT> *NewIDom) { assert(N && NewIDom && "Cannot change null node pointers!"); DFSInfoValid = false; N->setIDom(NewIDom); } void changeImmediateDominator(NodeT *BB, NodeT *NewBB) { changeImmediateDominator(getNode(BB), getNode(NewBB)); } /// eraseNode - Removes a node from the dominator tree. Block must not /// dominate any other blocks. Removes node from its immediate dominator's /// children list. Deletes dominator node associated with basic block BB. void eraseNode(NodeT *BB) { DomTreeNodeBase<NodeT> *Node = getNode(BB); assert(Node && "Removing node that isn't in dominator tree."); assert(Node->getChildren().empty() && "Node is not a leaf node."); // Remove node from immediate dominator's children list. DomTreeNodeBase<NodeT> *IDom = Node->getIDom(); if (IDom) { typename std::vector<DomTreeNodeBase<NodeT> *>::iterator I = std::find(IDom->Children.begin(), IDom->Children.end(), Node); assert(I != IDom->Children.end() && "Not in immediate dominator children set!"); // I am no longer your child... IDom->Children.erase(I); } DomTreeNodes.erase(BB); } /// splitBlock - BB is split and now it has one successor. Update dominator /// tree to reflect this change. void splitBlock(NodeT *NewBB) { if (this->IsPostDominators) this->Split<Inverse<NodeT *>, GraphTraits<Inverse<NodeT *>>>(*this, NewBB); else this->Split<NodeT *, GraphTraits<NodeT *>>(*this, NewBB); } /// print - Convert to human readable form /// void print(raw_ostream &o) const { o << "=============================--------------------------------\n"; if (this->isPostDominator()) o << "Inorder PostDominator Tree: "; else o << "Inorder Dominator Tree: "; if (!this->DFSInfoValid) o << "DFSNumbers invalid: " << SlowQueries << " slow queries."; o << "\n"; // The postdom tree can have a null root if there are no returns. if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), o, 1); } protected: template <class GraphT> friend typename GraphT::NodeType * Eval(DominatorTreeBase<typename GraphT::NodeType> &DT, typename GraphT::NodeType *V, unsigned LastLinked); template <class GraphT> friend unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType> &DT, typename GraphT::NodeType *V, unsigned N); template <class FuncT, class N> friend void Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType> &DT, FuncT &F); DomTreeNodeBase<NodeT> *getNodeForBlock(NodeT *BB) { if (DomTreeNodeBase<NodeT> *Node = getNode(BB)) return Node; // Haven't calculated this node yet? Get or calculate the node for the // immediate dominator. NodeT *IDom = getIDom(BB); assert(IDom || this->DomTreeNodes[nullptr]); DomTreeNodeBase<NodeT> *IDomNode = getNodeForBlock(IDom); // Add a new tree node for this NodeT, and link it as a child of // IDomNode return (this->DomTreeNodes[BB] = IDomNode->addChild( llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get(); } NodeT *getIDom(NodeT *BB) const { return IDoms.lookup(BB); } void addRoot(NodeT *BB) { this->Roots.push_back(BB); } public: /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking /// dominator tree in dfs order. void updateDFSNumbers() const { if (DFSInfoValid) { SlowQueries = 0; return; } unsigned DFSNum = 0; SmallVector<std::pair<const DomTreeNodeBase<NodeT> *, typename DomTreeNodeBase<NodeT>::const_iterator>, 32> WorkStack; const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode(); if (!ThisRoot) return; // Even in the case of multiple exits that form the post dominator root // nodes, do not iterate over all exits, but start from the virtual root // node. Otherwise bbs, that are not post dominated by any exit but by the // virtual root node, will never be assigned a DFS number. WorkStack.push_back(std::make_pair(ThisRoot, ThisRoot->begin())); ThisRoot->DFSNumIn = DFSNum++; while (!WorkStack.empty()) { const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first; typename DomTreeNodeBase<NodeT>::const_iterator ChildIt = WorkStack.back().second; // If we visited all of the children of this node, "recurse" back up the // stack setting the DFOutNum. if (ChildIt == Node->end()) { Node->DFSNumOut = DFSNum++; WorkStack.pop_back(); } else { // Otherwise, recursively visit this child. const DomTreeNodeBase<NodeT> *Child = *ChildIt; ++WorkStack.back().second; WorkStack.push_back(std::make_pair(Child, Child->begin())); Child->DFSNumIn = DFSNum++; } } SlowQueries = 0; DFSInfoValid = true; } /// recalculate - compute a dominator tree for the given function template <class FT> void recalculate(FT &F) { typedef GraphTraits<FT *> TraitsTy; reset(); this->Vertex.push_back(nullptr); if (!this->IsPostDominators) { // Initialize root NodeT *entry = TraitsTy::getEntryNode(&F); this->Roots.push_back(entry); this->IDoms[entry] = nullptr; this->DomTreeNodes[entry] = nullptr; Calculate<FT, NodeT *>(*this, F); } else { // Initialize the roots list for (typename TraitsTy::nodes_iterator I = TraitsTy::nodes_begin(&F), E = TraitsTy::nodes_end(&F); I != E; ++I) { if (TraitsTy::child_begin(I) == TraitsTy::child_end(I)) addRoot(I); // Prepopulate maps so that we don't get iterator invalidation issues // later. this->IDoms[I] = nullptr; this->DomTreeNodes[I] = nullptr; } Calculate<FT, Inverse<NodeT *>>(*this, F); } } }; // These two functions are declared out of line as a workaround for building // with old (< r147295) versions of clang because of pr11642. template <class NodeT> bool DominatorTreeBase<NodeT>::dominates(const NodeT *A, const NodeT *B) const { if (A == B) return true; // Cast away the const qualifiers here. This is ok since // this function doesn't actually return the values returned // from getNode. return dominates(getNode(const_cast<NodeT *>(A)), getNode(const_cast<NodeT *>(B))); } template <class NodeT> bool DominatorTreeBase<NodeT>::properlyDominates(const NodeT *A, const NodeT *B) const { if (A == B) return false; // Cast away the const qualifiers here. This is ok since // this function doesn't actually return the values returned // from getNode. return dominates(getNode(const_cast<NodeT *>(A)), getNode(const_cast<NodeT *>(B))); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/FileUtilities.h
//===- llvm/Support/FileUtilities.h - File System Utilities -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a family of utility functions which are useful for doing // various things with files. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_FILEUTILITIES_H #define LLVM_SUPPORT_FILEUTILITIES_H #include "llvm/Support/FileSystem.h" #include "llvm/Support/Path.h" namespace llvm { /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if /// the files match, 1 if they are different, and 2 if there is a file error. /// This function allows you to specify an absolute and relative FP error that /// is allowed to exist. If you specify a string to fill in for the error /// option, it will set the string to an error message if an error occurs, or /// if the files are different. /// int DiffFilesWithTolerance(StringRef FileA, StringRef FileB, double AbsTol, double RelTol, std::string *Error = nullptr); /// FileRemover - This class is a simple object meant to be stack allocated. /// If an exception is thrown from a region, the object removes the filename /// specified (if deleteIt is true). /// class FileRemover { SmallString<128> Filename; bool DeleteIt; public: FileRemover() : DeleteIt(false) {} explicit FileRemover(const Twine& filename, bool deleteIt = true) : DeleteIt(deleteIt) { filename.toVector(Filename); } ~FileRemover() { if (DeleteIt) { // Ignore problems deleting the file. sys::fs::remove(Filename); } } /// setFile - Give ownership of the file to the FileRemover so it will /// be removed when the object is destroyed. If the FileRemover already /// had ownership of a file, remove it first. void setFile(const Twine& filename, bool deleteIt = true) { if (DeleteIt) { // Ignore problems deleting the file. sys::fs::remove(Filename); } Filename.clear(); filename.toVector(Filename); DeleteIt = deleteIt; } /// releaseFile - Take ownership of the file away from the FileRemover so it /// will not be removed when the object is destroyed. void releaseFile() { DeleteIt = false; } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/MSFileSystem.h
/////////////////////////////////////////////////////////////////////////////// // // // MSFileSystem.h // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Provides error code values for the DirectX compiler. // // // /////////////////////////////////////////////////////////////////////////////// #ifndef LLVM_SUPPORT_MSFILESYSTEM_H #define LLVM_SUPPORT_MSFILESYSTEM_H /////////////////////////////////////////////////////////////////////////////////////////////////// // MSFileSystem interface. struct stat; namespace llvm { namespace sys { namespace fs { class MSFileSystem { public: virtual ~MSFileSystem(){}; virtual BOOL FindNextFileW(HANDLE hFindFile, LPWIN32_FIND_DATAW lpFindFileData) throw() = 0; virtual HANDLE FindFirstFileW(LPCWSTR lpFileName, LPWIN32_FIND_DATAW lpFindFileData) throw() = 0; virtual void FindClose(HANDLE findHandle) throw() = 0; virtual HANDLE CreateFileW(LPCWSTR lpFileName, DWORD dwDesiredAccess, DWORD dwShareMode, DWORD dwCreationDisposition, DWORD dwFlagsAndAttributes) throw() = 0; virtual BOOL SetFileTime(HANDLE hFile, const FILETIME *lpCreationTime, const FILETIME *lpLastAccessTime, const FILETIME *lpLastWriteTime) throw() = 0; virtual BOOL GetFileInformationByHandle( HANDLE hFile, LPBY_HANDLE_FILE_INFORMATION lpFileInformation) throw() = 0; virtual DWORD GetFileType(HANDLE hFile) throw() = 0; virtual BOOL CreateHardLinkW(LPCWSTR lpFileName, LPCWSTR lpExistingFileName) throw() = 0; virtual BOOL MoveFileExW(LPCWSTR lpExistingFileName, LPCWSTR lpNewFileName, DWORD dwFlags) throw() = 0; virtual DWORD GetFileAttributesW(LPCWSTR lpFileName) throw() = 0; virtual BOOL CloseHandle(HANDLE hObject) throw() = 0; virtual BOOL DeleteFileW(LPCWSTR lpFileName) throw() = 0; virtual BOOL RemoveDirectoryW(LPCWSTR lpFileName) throw() = 0; virtual BOOL CreateDirectoryW(LPCWSTR lpPathName) throw() = 0; virtual DWORD GetCurrentDirectoryW(DWORD nBufferLength, LPWSTR lpBuffer) throw() = 0; virtual DWORD GetMainModuleFileNameW(LPWSTR lpFilename, DWORD nSize) throw() = 0; virtual DWORD GetTempPathW(DWORD nBufferLength, LPWSTR lpBuffer) throw() = 0; virtual BOOLEAN CreateSymbolicLinkW(LPCWSTR lpSymlinkFileName, LPCWSTR lpTargetFileName, DWORD dwFlags) throw() = 0; virtual bool SupportsCreateSymbolicLink() throw() = 0; virtual BOOL ReadFile(HANDLE hFile, LPVOID lpBuffer, DWORD nNumberOfBytesToRead, LPDWORD lpNumberOfBytesRead) throw() = 0; virtual HANDLE CreateFileMappingW(HANDLE hFile, DWORD flProtect, DWORD dwMaximumSizeHigh, DWORD dwMaximumSizeLow) throw() = 0; virtual LPVOID MapViewOfFile(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh, DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap) throw() = 0; virtual BOOL UnmapViewOfFile(LPCVOID lpBaseAddress) throw() = 0; // Console APIs. virtual bool FileDescriptorIsDisplayed(int fd) throw() = 0; virtual unsigned GetColumnCount(DWORD nStdHandle) throw() = 0; virtual unsigned GetConsoleOutputTextAttributes() throw() = 0; virtual void SetConsoleOutputTextAttributes(unsigned) throw() = 0; virtual void ResetConsoleOutputTextAttributes() throw() = 0; // CRT APIs. virtual int open_osfhandle(intptr_t osfhandle, int flags) throw() = 0; virtual intptr_t get_osfhandle(int fd) throw() = 0; virtual int close(int fd) throw() = 0; virtual long lseek(int fd, long offset, int origin) throw() = 0; virtual int setmode(int fd, int mode) throw() = 0; virtual errno_t resize_file(LPCWSTR path, uint64_t size) throw() = 0; virtual int Read(int fd, void *buffer, unsigned int count) throw() = 0; virtual int Write(int fd, const void *buffer, unsigned int count) throw() = 0; // Unix interface #ifndef _WIN32 virtual int Open(const char *lpFileName, int flags, mode_t mode = 0) throw() = 0; virtual int Stat(const char *lpFileName, struct stat *Status) throw() = 0; virtual int Fstat(int FD, struct stat *Status) throw() = 0; #endif }; } // end namespace fs } // end namespace sys } // end namespace llvm /// <summary>Creates a Win32/CRT-based implementation with full fidelity for a /// console program.</summary> <remarks>This requires the LLVM MS Support /// library to be linked in.</remarks> HRESULT CreateMSFileSystemForDisk(::llvm::sys::fs::MSFileSystem **pResult) throw(); struct IUnknown; /// <summary>Creates an implementation based on IDxcSystemAccess.</summary> HRESULT CreateMSFileSystemForIface(IUnknown *pService, ::llvm::sys::fs::MSFileSystem **pResult) throw(); #endif // LLVM_SUPPORT_MSFILESYSTEM_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Recycler.h
//==- llvm/Support/Recycler.h - Recycling Allocator --------------*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Recycler class template. See the doxygen comment for // Recycler for more details. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_RECYCLER_H #define LLVM_SUPPORT_RECYCLER_H #include "llvm/ADT/ilist.h" #include "llvm/Support/AlignOf.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/ErrorHandling.h" #include <cassert> namespace llvm { /// PrintRecyclingAllocatorStats - Helper for RecyclingAllocator for /// printing statistics. /// void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize); /// RecyclerStruct - Implementation detail for Recycler. This is a /// class that the recycler imposes on free'd memory to carve out /// next/prev pointers. struct RecyclerStruct { RecyclerStruct *Prev, *Next; }; template<> struct ilist_traits<RecyclerStruct> : public ilist_default_traits<RecyclerStruct> { static RecyclerStruct *getPrev(const RecyclerStruct *t) { return t->Prev; } static RecyclerStruct *getNext(const RecyclerStruct *t) { return t->Next; } static void setPrev(RecyclerStruct *t, RecyclerStruct *p) { t->Prev = p; } static void setNext(RecyclerStruct *t, RecyclerStruct *n) { t->Next = n; } mutable RecyclerStruct Sentinel; RecyclerStruct *createSentinel() const { return &Sentinel; } static void destroySentinel(RecyclerStruct *) {} RecyclerStruct *provideInitialHead() const { return createSentinel(); } RecyclerStruct *ensureHead(RecyclerStruct*) const { return createSentinel(); } static void noteHead(RecyclerStruct*, RecyclerStruct*) {} static void deleteNode(RecyclerStruct *) { llvm_unreachable("Recycler's ilist_traits shouldn't see a deleteNode call!"); } }; /// Recycler - This class manages a linked-list of deallocated nodes /// and facilitates reusing deallocated memory in place of allocating /// new memory. /// template<class T, size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment> class Recycler { /// FreeList - Doubly-linked list of nodes that have deleted contents and /// are not in active use. /// iplist<RecyclerStruct> FreeList; public: ~Recycler() { // If this fails, either the callee has lost track of some allocation, // or the callee isn't tracking allocations and should just call // clear() before deleting the Recycler. assert(FreeList.empty() && "Non-empty recycler deleted!"); } /// clear - Release all the tracked allocations to the allocator. The /// recycler must be free of any tracked allocations before being /// deleted; calling clear is one way to ensure this. template<class AllocatorType> void clear(AllocatorType &Allocator) { while (!FreeList.empty()) { T *t = reinterpret_cast<T *>(FreeList.remove(FreeList.begin())); Allocator.Deallocate(t); } } /// Special case for BumpPtrAllocator which has an empty Deallocate() /// function. /// /// There is no need to traverse the free list, pulling all the objects into /// cache. void clear(BumpPtrAllocator&) { FreeList.clearAndLeakNodesUnsafely(); } template<class SubClass, class AllocatorType> SubClass *Allocate(AllocatorType &Allocator) { static_assert(AlignOf<SubClass>::Alignment <= Align, "Recycler allocation alignment is less than object align!"); static_assert(sizeof(SubClass) <= Size, "Recycler allocation size is less than object size!"); return !FreeList.empty() ? reinterpret_cast<SubClass *>(FreeList.remove(FreeList.begin())) : static_cast<SubClass *>(Allocator.Allocate(Size, Align)); } template<class AllocatorType> T *Allocate(AllocatorType &Allocator) { return Allocate<T>(Allocator); } template<class SubClass, class AllocatorType> void Deallocate(AllocatorType & /*Allocator*/, SubClass* Element) { FreeList.push_front(reinterpret_cast<RecyclerStruct *>(Element)); } void PrintStats() { PrintRecyclerStats(Size, Align, FreeList.size()); } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/circular_raw_ostream.h
//===-- llvm/Support/circular_raw_ostream.h - Buffered streams --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains raw_ostream implementations for streams to do circular // buffering of their output. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H #define LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H #include "llvm/Support/raw_ostream.h" namespace llvm { /// circular_raw_ostream - A raw_ostream which *can* save its data /// to a circular buffer, or can pass it through directly to an /// underlying stream if specified with a buffer of zero. /// class circular_raw_ostream : public raw_ostream { public: /// TAKE_OWNERSHIP - Tell this stream that it owns the underlying /// stream and is responsible for cleanup, memory management /// issues, etc. /// static const bool TAKE_OWNERSHIP = true; /// REFERENCE_ONLY - Tell this stream it should not manage the /// held stream. /// static const bool REFERENCE_ONLY = false; private: /// TheStream - The real stream we output to. We set it to be /// unbuffered, since we're already doing our own buffering. /// raw_ostream *TheStream; /// OwnsStream - Are we responsible for managing the underlying /// stream? /// bool OwnsStream; /// BufferSize - The size of the buffer in bytes. /// size_t BufferSize; /// BufferArray - The actual buffer storage. /// char *BufferArray; /// Cur - Pointer to the current output point in BufferArray. /// char *Cur; /// Filled - Indicate whether the buffer has been completely /// filled. This helps avoid garbage output. /// bool Filled; /// Banner - A pointer to a banner to print before dumping the /// log. /// const char *Banner; /// flushBuffer - Dump the contents of the buffer to Stream. /// void flushBuffer() { if (Filled) // Write the older portion of the buffer. TheStream->write(Cur, BufferArray + BufferSize - Cur); // Write the newer portion of the buffer. TheStream->write(BufferArray, Cur - BufferArray); Cur = BufferArray; Filled = false; } void write_impl(const char *Ptr, size_t Size) override; /// current_pos - Return the current position within the stream, /// not counting the bytes currently in the buffer. /// uint64_t current_pos() const override { // This has the same effect as calling TheStream.current_pos(), // but that interface is private. return TheStream->tell() - TheStream->GetNumBytesInBuffer(); } public: /// circular_raw_ostream - Construct an optionally /// circular-buffered stream, handing it an underlying stream to /// do the "real" output. /// /// As a side effect, if BuffSize is nonzero, the given Stream is /// set to be Unbuffered. This is because circular_raw_ostream /// does its own buffering, so it doesn't want another layer of /// buffering to be happening underneath it. /// /// "Owns" tells the circular_raw_ostream whether it is /// responsible for managing the held stream, doing memory /// management of it, etc. /// circular_raw_ostream(raw_ostream &Stream, const char *Header, size_t BuffSize = 0, bool Owns = REFERENCE_ONLY) : raw_ostream(/*unbuffered*/ true), TheStream(nullptr), OwnsStream(Owns), BufferSize(BuffSize), BufferArray(nullptr), Filled(false), Banner(Header) { if (BufferSize != 0) BufferArray = new char[BufferSize]; Cur = BufferArray; setStream(Stream, Owns); } ~circular_raw_ostream() override { flush(); flushBufferWithBanner(); releaseStream(); delete[] BufferArray; } /// setStream - Tell the circular_raw_ostream to output a /// different stream. "Owns" tells circular_raw_ostream whether /// it should take responsibility for managing the underlying /// stream. /// void setStream(raw_ostream &Stream, bool Owns = REFERENCE_ONLY) { releaseStream(); TheStream = &Stream; OwnsStream = Owns; } /// flushBufferWithBanner - Force output of the buffer along with /// a small header. /// void flushBufferWithBanner(); private: /// releaseStream - Delete the held stream if needed. Otherwise, /// transfer the buffer settings from this circular_raw_ostream /// back to the underlying stream. /// void releaseStream() { if (!TheStream) return; if (OwnsStream) delete TheStream; } }; } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Casting.h
//===-- llvm/Support/Casting.h - Allow flexible, checked, casts -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(), // and dyn_cast_or_null<X>() templates. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_CASTING_H #define LLVM_SUPPORT_CASTING_H #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/type_traits.h" #include <cassert> namespace llvm { //===----------------------------------------------------------------------===// // isa<x> Support Templates //===----------------------------------------------------------------------===// // Define a template that can be specialized by smart pointers to reflect the // fact that they are automatically dereferenced, and are not involved with the // template selection process... the default implementation is a noop. // template<typename From> struct simplify_type { typedef From SimpleType; // The real type this represents... // An accessor to get the real value... static SimpleType &getSimplifiedValue(From &Val) { return Val; } }; template<typename From> struct simplify_type<const From> { typedef typename simplify_type<From>::SimpleType NonConstSimpleType; typedef typename add_const_past_pointer<NonConstSimpleType>::type SimpleType; typedef typename add_lvalue_reference_if_not_pointer<SimpleType>::type RetType; static RetType getSimplifiedValue(const From& Val) { return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val)); } }; // The core of the implementation of isa<X> is here; To and From should be // the names of classes. This template can be specialized to customize the // implementation of isa<> without rewriting it from scratch. template <typename To, typename From, typename Enabler = void> struct isa_impl { static inline bool doit(const From &Val) { return To::classof(&Val); } }; /// \brief Always allow upcasts, and perform no dynamic check for them. template <typename To, typename From> struct isa_impl< To, From, typename std::enable_if<std::is_base_of<To, From>::value>::type> { static inline bool doit(const From &) { return true; } }; template <typename To, typename From> struct isa_impl_cl { static inline bool doit(const From &Val) { return isa_impl<To, From>::doit(Val); } }; template <typename To, typename From> struct isa_impl_cl<To, const From> { static inline bool doit(const From &Val) { return isa_impl<To, From>::doit(Val); } }; template <typename To, typename From> struct isa_impl_cl<To, From*> { static inline bool doit(const From *Val) { assert(Val && "isa<> used on a null pointer"); return isa_impl<To, From>::doit(*Val); } }; template <typename To, typename From> struct isa_impl_cl<To, From*const> { static inline bool doit(const From *Val) { assert(Val && "isa<> used on a null pointer"); return isa_impl<To, From>::doit(*Val); } }; template <typename To, typename From> struct isa_impl_cl<To, const From*> { static inline bool doit(const From *Val) { assert(Val && "isa<> used on a null pointer"); return isa_impl<To, From>::doit(*Val); } }; template <typename To, typename From> struct isa_impl_cl<To, const From*const> { static inline bool doit(const From *Val) { assert(Val && "isa<> used on a null pointer"); return isa_impl<To, From>::doit(*Val); } }; template<typename To, typename From, typename SimpleFrom> struct isa_impl_wrap { // When From != SimplifiedType, we can simplify the type some more by using // the simplify_type template. static bool doit(const From &Val) { return isa_impl_wrap<To, SimpleFrom, typename simplify_type<SimpleFrom>::SimpleType>::doit( simplify_type<const From>::getSimplifiedValue(Val)); } }; template<typename To, typename FromTy> struct isa_impl_wrap<To, FromTy, FromTy> { // When From == SimpleType, we are as simple as we are going to get. static bool doit(const FromTy &Val) { return isa_impl_cl<To,FromTy>::doit(Val); } }; // isa<X> - Return true if the parameter to the template is an instance of the // template type argument. Used like this: // // if (isa<Type>(myVal)) { ... } // template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline bool isa(const Y &Val) { return isa_impl_wrap<X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val); } //===----------------------------------------------------------------------===// // cast<x> Support Templates // // /////////////////////////////////////////////////////////////////////////////// template<class To, class From> struct cast_retty; // Calculate what type the 'cast' function should return, based on a requested // type of To and a source type of From. template<class To, class From> struct cast_retty_impl { typedef To& ret_type; // Normal case, return Ty& }; template<class To, class From> struct cast_retty_impl<To, const From> { typedef const To &ret_type; // Normal case, return Ty& }; template<class To, class From> struct cast_retty_impl<To, From*> { typedef To* ret_type; // Pointer arg case, return Ty* }; template<class To, class From> struct cast_retty_impl<To, const From*> { typedef const To* ret_type; // Constant pointer arg case, return const Ty* }; template<class To, class From> struct cast_retty_impl<To, const From*const> { typedef const To* ret_type; // Constant pointer arg case, return const Ty* }; template<class To, class From, class SimpleFrom> struct cast_retty_wrap { // When the simplified type and the from type are not the same, use the type // simplifier to reduce the type, then reuse cast_retty_impl to get the // resultant type. typedef typename cast_retty<To, SimpleFrom>::ret_type ret_type; }; template<class To, class FromTy> struct cast_retty_wrap<To, FromTy, FromTy> { // When the simplified type is equal to the from type, use it directly. typedef typename cast_retty_impl<To,FromTy>::ret_type ret_type; }; template<class To, class From> struct cast_retty { typedef typename cast_retty_wrap<To, From, typename simplify_type<From>::SimpleType>::ret_type ret_type; }; // Ensure the non-simple values are converted using the simplify_type template // that may be specialized by smart pointers... // template<class To, class From, class SimpleFrom> struct cast_convert_val { // This is not a simple type, use the template to simplify it... static typename cast_retty<To, From>::ret_type doit(From &Val) { return cast_convert_val<To, SimpleFrom, typename simplify_type<SimpleFrom>::SimpleType>::doit( simplify_type<From>::getSimplifiedValue(Val)); } }; template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> { // This _is_ a simple type, just cast it. static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) { typename cast_retty<To, FromTy>::ret_type Res2 = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val); return Res2; } }; template <class X> struct is_simple_type { static const bool value = std::is_same<X, typename simplify_type<X>::SimpleType>::value; }; // cast<X> - Return the argument parameter cast to the specified type. This // casting operator asserts that the type is correct, so it does not return null // on failure. It does not allow a null argument (use cast_or_null for that). // It is typically used like this: // // cast<Instruction>(myVal)->getParent() // template <class X, class Y> inline typename std::enable_if<!is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type cast(const Y &Val) { llvm_cast_assert(X, Val); // HLSL change return cast_convert_val< X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val); } template <class X, class Y> inline typename cast_retty<X, Y>::ret_type cast(Y &Val) { llvm_cast_assert(X, Val); // HLSL change return cast_convert_val<X, Y, typename simplify_type<Y>::SimpleType>::doit(Val); } template <class X, class Y> inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) { llvm_cast_assert(X, Val); // HLSL change return cast_convert_val<X, Y*, typename simplify_type<Y*>::SimpleType>::doit(Val); } // cast_or_null<X> - Functionally identical to cast, except that a null value is // accepted. // template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if< !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type cast_or_null(const Y &Val) { if (!Val) return nullptr; llvm_cast_assert(X, Val); // HLSL change return cast<X>(Val); } template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if< !is_simple_type<Y>::value, typename cast_retty<X, Y>::ret_type>::type cast_or_null(Y &Val) { if (!Val) return nullptr; llvm_cast_assert(X, Val); // HLSL change return cast<X>(Val); } template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type cast_or_null(Y *Val) { if (!Val) return nullptr; llvm_cast_assert(X, Val); // HLSL change return cast<X>(Val); } // dyn_cast<X> - Return the argument parameter cast to the specified type. This // casting operator returns null if the argument is of the wrong type, so it can // be used to test for a type as well as cast if successful. This should be // used in the context of an if statement like this: // // if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... } // template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if< !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type dyn_cast(const Y &Val) { return isa<X>(Val) ? cast<X>(Val) : nullptr; } template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) { return isa<X>(Val) ? cast<X>(Val) : nullptr; } template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) { return isa<X>(Val) ? cast<X>(Val) : nullptr; } // dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null // value is accepted. // template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if< !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type dyn_cast_or_null(const Y &Val) { return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; } template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if< !is_simple_type<Y>::value, typename cast_retty<X, Y>::ret_type>::type dyn_cast_or_null(Y &Val) { return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; } template <class X, class Y> LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type dyn_cast_or_null(Y *Val) { return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr; } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Solaris.h
/*===- llvm/Support/Solaris.h ------------------------------------*- C++ -*-===* * * The LLVM Compiler Infrastructure * * This file is distributed under the University of Illinois Open Source * License. See LICENSE.TXT for details. * *===----------------------------------------------------------------------===* * * This file contains portability fixes for Solaris hosts. * *===----------------------------------------------------------------------===*/ #ifndef LLVM_SUPPORT_SOLARIS_H #define LLVM_SUPPORT_SOLARIS_H #include <sys/types.h> #include <sys/regset.h> /* Solaris doesn't have endian.h. SPARC is the only supported big-endian ISA. */ #define BIG_ENDIAN 4321 #define LITTLE_ENDIAN 1234 #if defined(__sparc) || defined(__sparc__) #define BYTE_ORDER BIG_ENDIAN #else #define BYTE_ORDER LITTLE_ENDIAN #endif #undef CS #undef DS #undef ES #undef FS #undef GS #undef SS #undef EAX #undef ECX #undef EDX #undef EBX #undef ESP #undef EBP #undef ESI #undef EDI #undef EIP #undef UESP #undef EFL #undef ERR #undef TRAPNO #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Format.h
//===- Format.h - Efficient printf-style formatting for streams -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the format() function, which can be used with other // LLVM subsystems to provide printf-style formatting. This gives all the power // and risk of printf. This can be used like this (with raw_ostreams as an // example): // // OS << "mynumber: " << format("%4.5f", 1234.412) << '\n'; // // Or if you prefer: // // OS << format("mynumber: %4.5f\n", 1234.412); // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_FORMAT_H #define LLVM_SUPPORT_FORMAT_H #include "dxc/WinAdapter.h" // HLSL Change #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/DataTypes.h" #include <cassert> #include <cstdio> #include <tuple> namespace llvm { /// This is a helper class used for handling formatted output. It is the /// abstract base class of a templated derived class. class format_object_base { protected: const char *Fmt; ~format_object_base() = default; // Disallow polymorphic deletion. format_object_base(const format_object_base &) = default; virtual void home(); // Out of line virtual method. /// Call snprintf() for this object, on the given buffer and size. virtual int snprint(char *Buffer, unsigned BufferSize) const = 0; public: format_object_base(const char *fmt) : Fmt(fmt) {} /// Format the object into the specified buffer. On success, this returns /// the length of the formatted string. If the buffer is too small, this /// returns a length to retry with, which will be larger than BufferSize. unsigned print(char *Buffer, unsigned BufferSize) const { assert(BufferSize && "Invalid buffer size!"); // Print the string, leaving room for the terminating null. int N = snprint(Buffer, BufferSize); // VC++ and old GlibC return negative on overflow, just double the size. if (N < 0) return BufferSize * 2; // Other implementations yield number of bytes needed, not including the // final '\0'. if (unsigned(N) >= BufferSize) return N + 1; // Otherwise N is the length of output (not including the final '\0'). return N; } }; /// These are templated helper classes used by the format function that /// capture the object to be formated and the format string. When actually /// printed, this synthesizes the string into a temporary buffer provided and /// returns whether or not it is big enough. template <typename... Ts> class format_object final : public format_object_base { std::tuple<Ts...> Vals; template <std::size_t... Is> int snprint_tuple(char *Buffer, unsigned BufferSize, index_sequence<Is...>) const { #ifdef _MSC_VER // Use _TRUNCATE as the buffer size; truncation will still return -1 as // a result, thereby triggering the 'double on VC++' behavior in // caller, for example llvm::format_object_base::print(char * Buffer, unsigned int BufferSize) return _snprintf_s(Buffer, BufferSize, _TRUNCATE, Fmt, std::get<Is>(Vals)...); #else return snprintf(Buffer, BufferSize, Fmt, std::get<Is>(Vals)...); #endif } public: format_object(const char *fmt, const Ts &... vals) : format_object_base(fmt), Vals(vals...) {} int snprint(char *Buffer, unsigned BufferSize) const override { return snprint_tuple(Buffer, BufferSize, index_sequence_for<Ts...>()); } }; /// These are helper functions used to produce formatted output. They use /// template type deduction to construct the appropriate instance of the /// format_object class to simplify their construction. /// /// This is typically used like: /// \code /// OS << format("%0.4f", myfloat) << '\n'; /// \endcode template <typename... Ts> inline format_object<Ts...> format(const char *Fmt, const Ts &... Vals) { return format_object<Ts...>(Fmt, Vals...); } /// This is a helper class used for left_justify() and right_justify(). class FormattedString { StringRef Str; unsigned Width; bool RightJustify; friend class raw_ostream; public: FormattedString(StringRef S, unsigned W, bool R) : Str(S), Width(W), RightJustify(R) { } }; /// left_justify - append spaces after string so total output is /// \p Width characters. If \p Str is larger that \p Width, full string /// is written with no padding. inline FormattedString left_justify(StringRef Str, unsigned Width) { return FormattedString(Str, Width, false); } /// right_justify - add spaces before string so total output is /// \p Width characters. If \p Str is larger that \p Width, full string /// is written with no padding. inline FormattedString right_justify(StringRef Str, unsigned Width) { return FormattedString(Str, Width, true); } /// This is a helper class used for format_hex() and format_decimal(). class FormattedNumber { uint64_t HexValue; int64_t DecValue; unsigned Width; bool Hex; bool Upper; bool HexPrefix; friend class raw_ostream; public: FormattedNumber(uint64_t HV, int64_t DV, unsigned W, bool H, bool U, bool Prefix) : HexValue(HV), DecValue(DV), Width(W), Hex(H), Upper(U), HexPrefix(Prefix) {} }; /// format_hex - Output \p N as a fixed width hexadecimal. If number will not /// fit in width, full number is still printed. Examples: /// OS << format_hex(255, 4) => 0xff /// OS << format_hex(255, 4, true) => 0xFF /// OS << format_hex(255, 6) => 0x00ff /// OS << format_hex(255, 2) => 0xff inline FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper = false) { assert(Width <= 18 && "hex width must be <= 18"); return FormattedNumber(N, 0, Width, true, Upper, true); } /// format_hex_no_prefix - Output \p N as a fixed width hexadecimal. Does not /// prepend '0x' to the outputted string. If number will not fit in width, /// full number is still printed. Examples: /// OS << format_hex_no_prefix(255, 4) => ff /// OS << format_hex_no_prefix(255, 4, true) => FF /// OS << format_hex_no_prefix(255, 6) => 00ff /// OS << format_hex_no_prefix(255, 2) => ff inline FormattedNumber format_hex_no_prefix(uint64_t N, unsigned Width, bool Upper = false) { assert(Width <= 18 && "hex width must be <= 18"); return FormattedNumber(N, 0, Width, true, Upper, false); } /// format_decimal - Output \p N as a right justified, fixed-width decimal. If /// number will not fit in width, full number is still printed. Examples: /// OS << format_decimal(0, 5) => " 0" /// OS << format_decimal(255, 5) => " 255" /// OS << format_decimal(-1, 3) => " -1" /// OS << format_decimal(12345, 3) => "12345" inline FormattedNumber format_decimal(int64_t N, unsigned Width) { return FormattedNumber(0, N, Width, false, false, false); } } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/MD5.h
/* * This code is derived from (original license follows): * * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc. * MD5 Message-Digest Algorithm (RFC 1321). * * Homepage: * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5 * * Author: * Alexander Peslyak, better known as Solar Designer <solar at openwall.com> * * This software was written by Alexander Peslyak in 2001. No copyright is * claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * See md5.c for more information. */ #ifndef LLVM_SUPPORT_MD5_H #define LLVM_SUPPORT_MD5_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/DataTypes.h" namespace llvm { class MD5 { // Any 32-bit or wider unsigned integer data type will do. typedef uint32_t MD5_u32plus; MD5_u32plus a, b, c, d; MD5_u32plus hi, lo; uint8_t buffer[64]; MD5_u32plus block[16]; public: typedef uint8_t MD5Result[16]; MD5(); /// \brief Updates the hash for the byte stream provided. void update(ArrayRef<uint8_t> Data); /// \brief Updates the hash for the StringRef provided. void update(StringRef Str); /// \brief Finishes off the hash and puts the result in result. void final(MD5Result &Result); /// \brief Translates the bytes in \p Res to a hex string that is /// deposited into \p Str. The result will be of length 32. static void stringifyResult(MD5Result &Result, SmallString<32> &Str); private: const uint8_t *body(ArrayRef<uint8_t> Data); }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Errc.h
//===- llvm/Support/Errc.h - Defines the llvm::errc enum --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // While std::error_code works OK on all platforms we use, there are some // some problems with std::errc that can be avoided by using our own // enumeration: // // * std::errc is a namespace in some implementations. That meas that ADL // doesn't work and it is sometimes necessary to write std::make_error_code // or in templates: // using std::make_error_code; // make_error_code(...); // // with this enum it is safe to always just use make_error_code. // // * Some implementations define fewer names than others. This header has // the intersection of all the ones we support. // // * std::errc is just marked with is_error_condition_enum. This means that // common patters like AnErrorCode == errc::no_such_file_or_directory take // 4 virtual calls instead of two comparisons. //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ERRC_H #define LLVM_SUPPORT_ERRC_H #include <system_error> namespace llvm { enum class errc { argument_list_too_long = int(std::errc::argument_list_too_long), argument_out_of_domain = int(std::errc::argument_out_of_domain), bad_address = int(std::errc::bad_address), bad_file_descriptor = int(std::errc::bad_file_descriptor), broken_pipe = int(std::errc::broken_pipe), device_or_resource_busy = int(std::errc::device_or_resource_busy), directory_not_empty = int(std::errc::directory_not_empty), executable_format_error = int(std::errc::executable_format_error), file_exists = int(std::errc::file_exists), file_too_large = int(std::errc::file_too_large), filename_too_long = int(std::errc::filename_too_long), function_not_supported = int(std::errc::function_not_supported), illegal_byte_sequence = int(std::errc::illegal_byte_sequence), inappropriate_io_control_operation = int(std::errc::inappropriate_io_control_operation), interrupted = int(std::errc::interrupted), invalid_argument = int(std::errc::invalid_argument), invalid_seek = int(std::errc::invalid_seek), io_error = int(std::errc::io_error), is_a_directory = int(std::errc::is_a_directory), no_child_process = int(std::errc::no_child_process), no_lock_available = int(std::errc::no_lock_available), no_space_on_device = int(std::errc::no_space_on_device), no_such_device_or_address = int(std::errc::no_such_device_or_address), no_such_device = int(std::errc::no_such_device), no_such_file_or_directory = int(std::errc::no_such_file_or_directory), no_such_process = int(std::errc::no_such_process), not_a_directory = int(std::errc::not_a_directory), not_enough_memory = int(std::errc::not_enough_memory), operation_not_permitted = int(std::errc::operation_not_permitted), permission_denied = int(std::errc::permission_denied), read_only_file_system = int(std::errc::read_only_file_system), resource_deadlock_would_occur = int(std::errc::resource_deadlock_would_occur), resource_unavailable_try_again = int(std::errc::resource_unavailable_try_again), result_out_of_range = int(std::errc::result_out_of_range), too_many_files_open_in_system = int(std::errc::too_many_files_open_in_system), too_many_files_open = int(std::errc::too_many_files_open), too_many_links = int(std::errc::too_many_links) }; inline std::error_code make_error_code(errc E) { return std::error_code(static_cast<int>(E), std::generic_category()); } } namespace std { template <> struct is_error_code_enum<llvm::errc> : std::true_type {}; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/DOTGraphTraits.h
//===-- llvm/Support/DotGraphTraits.h - Customize .dot output ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a template class that can be used to customize dot output // graphs generated by the GraphWriter.h file. The default implementation of // this file will produce a simple, but not very polished graph. By // specializing this template, lots of customization opportunities are possible. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_DOTGRAPHTRAITS_H #define LLVM_SUPPORT_DOTGRAPHTRAITS_H #include <string> namespace llvm { /// DefaultDOTGraphTraits - This class provides the default implementations of /// all of the DOTGraphTraits methods. If a specialization does not need to /// override all methods here it should inherit so that it can get the default /// implementations. /// struct DefaultDOTGraphTraits { private: bool IsSimple; protected: bool isSimple() { return IsSimple; } public: explicit DefaultDOTGraphTraits(bool simple=false) : IsSimple (simple) {} /// getGraphName - Return the label for the graph as a whole. Printed at the /// top of the graph. /// template<typename GraphType> static std::string getGraphName(const GraphType &) { return ""; } /// getGraphProperties - Return any custom properties that should be included /// in the top level graph structure for dot. /// template<typename GraphType> static std::string getGraphProperties(const GraphType &) { return ""; } /// renderGraphFromBottomUp - If this function returns true, the graph is /// emitted bottom-up instead of top-down. This requires graphviz 2.0 to work /// though. static bool renderGraphFromBottomUp() { return false; } /// isNodeHidden - If the function returns true, the given node is not /// displayed in the graph. static bool isNodeHidden(const void *) { return false; } /// getNodeLabel - Given a node and a pointer to the top level graph, return /// the label to print in the node. template<typename GraphType> std::string getNodeLabel(const void *, const GraphType &) { return ""; } /// hasNodeAddressLabel - If this method returns true, the address of the node /// is added to the label of the node. template<typename GraphType> static bool hasNodeAddressLabel(const void *, const GraphType &) { return false; } template<typename GraphType> static std::string getNodeDescription(const void *, const GraphType &) { return ""; } /// If you want to specify custom node attributes, this is the place to do so /// template<typename GraphType> static std::string getNodeAttributes(const void *, const GraphType &) { return ""; } /// If you want to override the dot attributes printed for a particular edge, /// override this method. template<typename EdgeIter, typename GraphType> static std::string getEdgeAttributes(const void *, EdgeIter, const GraphType &) { return ""; } /// getEdgeSourceLabel - If you want to label the edge source itself, /// implement this method. template<typename EdgeIter> static std::string getEdgeSourceLabel(const void *, EdgeIter) { return ""; } /// edgeTargetsEdgeSource - This method returns true if this outgoing edge /// should actually target another edge source, not a node. If this method is /// implemented, getEdgeTarget should be implemented. template<typename EdgeIter> static bool edgeTargetsEdgeSource(const void *, EdgeIter) { return false; } /// getEdgeTarget - If edgeTargetsEdgeSource returns true, this method is /// called to determine which outgoing edge of Node is the target of this /// edge. template<typename EdgeIter> static EdgeIter getEdgeTarget(const void *, EdgeIter I) { return I; } /// hasEdgeDestLabels - If this function returns true, the graph is able /// to provide labels for edge destinations. static bool hasEdgeDestLabels() { return false; } /// numEdgeDestLabels - If hasEdgeDestLabels, this function returns the /// number of incoming edge labels the given node has. static unsigned numEdgeDestLabels(const void *) { return 0; } /// getEdgeDestLabel - If hasEdgeDestLabels, this function returns the /// incoming edge label with the given index in the given node. static std::string getEdgeDestLabel(const void *, unsigned) { return ""; } /// addCustomGraphFeatures - If a graph is made up of more than just /// straight-forward nodes and edges, this is the place to put all of the /// custom stuff necessary. The GraphWriter object, instantiated with your /// GraphType is passed in as an argument. You may call arbitrary methods on /// it to add things to the output graph. /// template<typename GraphType, typename GraphWriter> static void addCustomGraphFeatures(const GraphType &, GraphWriter &) {} }; /// DOTGraphTraits - Template class that can be specialized to customize how /// graphs are converted to 'dot' graphs. When specializing, you may inherit /// from DefaultDOTGraphTraits if you don't need to override everything. /// template <typename Ty> struct DOTGraphTraits : public DefaultDOTGraphTraits { DOTGraphTraits (bool simple=false) : DefaultDOTGraphTraits (simple) {} }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Mutex.h
//===- llvm/Support/Mutex.h - Mutex Operating System Concept -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the llvm::sys::Mutex class. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_MUTEX_H #define LLVM_SUPPORT_MUTEX_H #include "llvm/Support/Compiler.h" #include "llvm/Support/Threading.h" #include <cassert> namespace llvm { namespace sys { /// @brief Platform agnostic Mutex class. class MutexImpl { /// @name Constructors /// @{ public: /// Initializes the lock but doesn't acquire it. if \p recursive is set /// to false, the lock will not be recursive which makes it cheaper but /// also more likely to deadlock (same thread can't acquire more than /// once). /// @brief Default Constructor. explicit MutexImpl(bool recursive = true); /// Releases and removes the lock /// @brief Destructor ~MutexImpl(); /// @} /// @name Methods /// @{ public: /// Attempts to unconditionally acquire the lock. If the lock is held by /// another thread, this method will wait until it can acquire the lock. /// @returns false if any kind of error occurs, true otherwise. /// @brief Unconditionally acquire the lock. bool acquire(); /// Attempts to release the lock. If the lock is held by the current /// thread, the lock is released allowing other threads to acquire the /// lock. /// @returns false if any kind of error occurs, true otherwise. /// @brief Unconditionally release the lock. bool release(); /// Attempts to acquire the lock without blocking. If the lock is not /// available, this function returns false quickly (without blocking). If /// the lock is available, it is acquired. /// @returns false if any kind of error occurs or the lock is not /// available, true otherwise. /// @brief Try to acquire the lock. bool tryacquire(); //@} /// @name Platform Dependent Data /// @{ private: #if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0 #if LLVM_ON_WIN32 // HLSL Change char data_[sizeof(void*) == 8 ? 40 : 24]; // C_ASSERT this is CRITICAL_SECTION-sized #else void* data_; ///< We don't know what the data will be #endif // HLSL Change #endif /// @} /// @name Do Not Implement /// @{ private: MutexImpl(const MutexImpl &) = delete; void operator=(const MutexImpl &) = delete; /// @} }; /// SmartMutex - A mutex with a compile time constant parameter that /// indicates whether this mutex should become a no-op when we're not /// running in multithreaded mode. template<bool mt_only> class SmartMutex { MutexImpl impl; unsigned acquired; bool recursive; public: explicit SmartMutex(bool rec = true) : impl(rec), acquired(0), recursive(rec) { } bool lock() { if (!mt_only || llvm_is_multithreaded()) { return impl.acquire(); } else { // Single-threaded debugging code. This would be racy in // multithreaded mode, but provides not sanity checks in single // threaded mode. assert((recursive || acquired == 0) && "Lock already acquired!!"); ++acquired; return true; } } bool unlock() { if (!mt_only || llvm_is_multithreaded()) { return impl.release(); } else { // Single-threaded debugging code. This would be racy in // multithreaded mode, but provides not sanity checks in single // threaded mode. assert(((recursive && acquired) || (acquired == 1)) && "Lock not acquired before release!"); --acquired; return true; } } bool try_lock() { if (!mt_only || llvm_is_multithreaded()) return impl.tryacquire(); else return true; } private: SmartMutex(const SmartMutex<mt_only> & original); void operator=(const SmartMutex<mt_only> &); }; /// Mutex - A standard, always enforced mutex. typedef SmartMutex<false> Mutex; template<bool mt_only> class SmartScopedLock { SmartMutex<mt_only>& mtx; public: SmartScopedLock(SmartMutex<mt_only>& m) : mtx(m) { mtx.lock(); } ~SmartScopedLock() { mtx.unlock(); } }; typedef SmartScopedLock<false> ScopedLock; } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Signals.h
//===- llvm/Support/Signals.h - Signal Handling support ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines some helpful functions for dealing with the possibility of // unix signals occurring while your program is running. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_SIGNALS_H #define LLVM_SUPPORT_SIGNALS_H #include <string> #include <llvm/ADT/StringRef.h> // HLSL Change - StringRef is, in fact, referenced directly namespace llvm { class StringRef; class raw_ostream; namespace sys { /// This function runs all the registered interrupt handlers, including the /// removal of files registered by RemoveFileOnSignal. void RunInterruptHandlers(); /// This function registers signal handlers to ensure that if a signal gets /// delivered that the named file is removed. /// @brief Remove a file if a fatal signal occurs. bool RemoveFileOnSignal(StringRef Filename, std::string* ErrMsg = nullptr); /// This function removes a file from the list of files to be removed on /// signal delivery. void DontRemoveFileOnSignal(StringRef Filename); /// When an error signal (such as SIBABRT or SIGSEGV) is delivered to the /// process, print a stack trace and then exit. /// @brief Print a stack trace if a fatal signal occurs. void PrintStackTraceOnErrorSignal(bool DisableCrashReporting = false); /// Disable all system dialog boxes that appear when the process crashes. void DisableSystemDialogsOnCrash(); /// \brief Print the stack trace using the given \c raw_ostream object. void PrintStackTrace(raw_ostream &OS); /// AddSignalHandler - Add a function to be called when an abort/kill signal /// is delivered to the process. The handler can have a cookie passed to it /// to identify what instance of the handler it is. void AddSignalHandler(void (*FnPtr)(void *), void *Cookie); /// This function registers a function to be called when the user "interrupts" /// the program (typically by pressing ctrl-c). When the user interrupts the /// program, the specified interrupt function is called instead of the program /// being killed, and the interrupt function automatically disabled. Note /// that interrupt functions are not allowed to call any non-reentrant /// functions. An null interrupt function pointer disables the current /// installed function. Note also that the handler may be executed on a /// different thread on some platforms. /// @brief Register a function to be called when ctrl-c is pressed. void SetInterruptFunction(void (*IF)()); } // End sys namespace } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/GenericDomTreeConstruction.h
//===- GenericDomTreeConstruction.h - Dominator Calculation ------*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// Generic dominator tree construction - This file provides routines to /// construct immediate dominator information for a flow-graph based on the /// algorithm described in this document: /// /// A Fast Algorithm for Finding Dominators in a Flowgraph /// T. Lengauer & R. Tarjan, ACM TOPLAS July 1979, pgs 121-141. /// /// This implements the O(n*log(n)) versions of EVAL and LINK, because it turns /// out that the theoretically slower O(n*log(n)) implementation is actually /// faster than the almost-linear O(n*alpha(n)) version, even for large CFGs. /// //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H #define LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/GenericDomTree.h" namespace llvm { template<class GraphT> unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT, typename GraphT::NodeType* V, unsigned N) { // This is more understandable as a recursive algorithm, but we can't use the // recursive algorithm due to stack depth issues. Keep it here for // documentation purposes. #if 0 InfoRec &VInfo = DT.Info[DT.Roots[i]]; VInfo.DFSNum = VInfo.Semi = ++N; VInfo.Label = V; Vertex.push_back(V); // Vertex[n] = V; for (succ_iterator SI = succ_begin(V), E = succ_end(V); SI != E; ++SI) { InfoRec &SuccVInfo = DT.Info[*SI]; if (SuccVInfo.Semi == 0) { SuccVInfo.Parent = V; N = DTDFSPass(DT, *SI, N); } } #else bool IsChildOfArtificialExit = (N != 0); SmallVector<std::pair<typename GraphT::NodeType*, typename GraphT::ChildIteratorType>, 32> Worklist; Worklist.push_back(std::make_pair(V, GraphT::child_begin(V))); while (!Worklist.empty()) { typename GraphT::NodeType* BB = Worklist.back().first; typename GraphT::ChildIteratorType NextSucc = Worklist.back().second; typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo = DT.Info[BB]; // First time we visited this BB? if (NextSucc == GraphT::child_begin(BB)) { BBInfo.DFSNum = BBInfo.Semi = ++N; BBInfo.Label = BB; DT.Vertex.push_back(BB); // Vertex[n] = V; if (IsChildOfArtificialExit) BBInfo.Parent = 1; IsChildOfArtificialExit = false; } // store the DFS number of the current BB - the reference to BBInfo might // get invalidated when processing the successors. unsigned BBDFSNum = BBInfo.DFSNum; // If we are done with this block, remove it from the worklist. if (NextSucc == GraphT::child_end(BB)) { Worklist.pop_back(); continue; } // Increment the successor number for the next time we get to it. ++Worklist.back().second; // Visit the successor next, if it isn't already visited. typename GraphT::NodeType* Succ = *NextSucc; // For clang, CFG successors can be optimized-out nullptrs. Skip those. if (!Succ) continue; typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &SuccVInfo = DT.Info[Succ]; if (SuccVInfo.Semi == 0) { SuccVInfo.Parent = BBDFSNum; Worklist.push_back(std::make_pair(Succ, GraphT::child_begin(Succ))); } } #endif return N; } template<class GraphT> typename GraphT::NodeType* Eval(DominatorTreeBase<typename GraphT::NodeType>& DT, typename GraphT::NodeType *VIn, unsigned LastLinked) { typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInInfo = DT.Info[VIn]; if (VInInfo.DFSNum < LastLinked) return VIn; SmallVector<typename GraphT::NodeType*, 32> Work; SmallPtrSet<typename GraphT::NodeType*, 32> Visited; if (VInInfo.Parent >= LastLinked) Work.push_back(VIn); while (!Work.empty()) { typename GraphT::NodeType* V = Work.back(); typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo = DT.Info[V]; typename GraphT::NodeType* VAncestor = DT.Vertex[VInfo.Parent]; // Process Ancestor first if (Visited.insert(VAncestor).second && VInfo.Parent >= LastLinked) { Work.push_back(VAncestor); continue; } Work.pop_back(); // Update VInfo based on Ancestor info if (VInfo.Parent < LastLinked) continue; typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VAInfo = DT.Info[VAncestor]; typename GraphT::NodeType* VAncestorLabel = VAInfo.Label; typename GraphT::NodeType* VLabel = VInfo.Label; if (DT.Info[VAncestorLabel].Semi < DT.Info[VLabel].Semi) VInfo.Label = VAncestorLabel; VInfo.Parent = VAInfo.Parent; } return VInInfo.Label; } template<class FuncT, class NodeT> void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT, FuncT& F) { typedef GraphTraits<NodeT> GraphT; unsigned N = 0; bool MultipleRoots = (DT.Roots.size() > 1); if (MultipleRoots) { typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo = DT.Info[nullptr]; BBInfo.DFSNum = BBInfo.Semi = ++N; BBInfo.Label = nullptr; DT.Vertex.push_back(nullptr); // Vertex[n] = V; } // Step #1: Number blocks in depth-first order and initialize variables used // in later stages of the algorithm. for (unsigned i = 0, e = static_cast<unsigned>(DT.Roots.size()); i != e; ++i) N = DFSPass<GraphT>(DT, DT.Roots[i], N); // it might be that some blocks did not get a DFS number (e.g., blocks of // infinite loops). In these cases an artificial exit node is required. MultipleRoots |= (DT.isPostDominator() && N != GraphTraits<FuncT*>::size(&F)); // When naively implemented, the Lengauer-Tarjan algorithm requires a separate // bucket for each vertex. However, this is unnecessary, because each vertex // is only placed into a single bucket (that of its semidominator), and each // vertex's bucket is processed before it is added to any bucket itself. // // Instead of using a bucket per vertex, we use a single array Buckets that // has two purposes. Before the vertex V with preorder number i is processed, // Buckets[i] stores the index of the first element in V's bucket. After V's // bucket is processed, Buckets[i] stores the index of the next element in the // bucket containing V, if any. SmallVector<unsigned, 32> Buckets; Buckets.resize(N + 1); for (unsigned i = 1; i <= N; ++i) Buckets[i] = i; for (unsigned i = N; i >= 2; --i) { typename GraphT::NodeType* W = DT.Vertex[i]; typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo = DT.Info[W]; // Step #2: Implicitly define the immediate dominator of vertices for (unsigned j = i; Buckets[j] != i; j = Buckets[j]) { typename GraphT::NodeType* V = DT.Vertex[Buckets[j]]; typename GraphT::NodeType* U = Eval<GraphT>(DT, V, i + 1); DT.IDoms[V] = DT.Info[U].Semi < i ? U : W; } // Step #3: Calculate the semidominators of all vertices // initialize the semi dominator to point to the parent node WInfo.Semi = WInfo.Parent; typedef GraphTraits<Inverse<NodeT> > InvTraits; for (typename InvTraits::ChildIteratorType CI = InvTraits::child_begin(W), E = InvTraits::child_end(W); CI != E; ++CI) { typename InvTraits::NodeType *N = *CI; if (DT.Info.count(N)) { // Only if this predecessor is reachable! unsigned SemiU = DT.Info[Eval<GraphT>(DT, N, i + 1)].Semi; if (SemiU < WInfo.Semi) WInfo.Semi = SemiU; } } // If V is a non-root vertex and sdom(V) = parent(V), then idom(V) is // necessarily parent(V). In this case, set idom(V) here and avoid placing // V into a bucket. if (WInfo.Semi == WInfo.Parent) { DT.IDoms[W] = DT.Vertex[WInfo.Parent]; } else { Buckets[i] = Buckets[WInfo.Semi]; Buckets[WInfo.Semi] = i; } } if (N >= 1) { typename GraphT::NodeType* Root = DT.Vertex[1]; for (unsigned j = 1; Buckets[j] != 1; j = Buckets[j]) { typename GraphT::NodeType* V = DT.Vertex[Buckets[j]]; DT.IDoms[V] = Root; } } // Step #4: Explicitly define the immediate dominator of each vertex for (unsigned i = 2; i <= N; ++i) { typename GraphT::NodeType* W = DT.Vertex[i]; typename GraphT::NodeType*& WIDom = DT.IDoms[W]; if (WIDom != DT.Vertex[DT.Info[W].Semi]) WIDom = DT.IDoms[WIDom]; } if (DT.Roots.empty()) return; // Add a node for the root. This node might be the actual root, if there is // one exit block, or it may be the virtual exit (denoted by (BasicBlock *)0) // which postdominates all real exits if there are multiple exit blocks, or // an infinite loop. typename GraphT::NodeType* Root = !MultipleRoots ? DT.Roots[0] : nullptr; DT.RootNode = (DT.DomTreeNodes[Root] = llvm::make_unique<DomTreeNodeBase<typename GraphT::NodeType>>( Root, nullptr)).get(); // Loop over all of the reachable blocks in the function... for (unsigned i = 2; i <= N; ++i) { typename GraphT::NodeType* W = DT.Vertex[i]; // Don't replace this with 'count', the insertion side effect is important if (DT.DomTreeNodes[W]) continue; // Haven't calculated this node yet? typename GraphT::NodeType* ImmDom = DT.getIDom(W); assert(ImmDom || DT.DomTreeNodes[nullptr]); // Get or calculate the node for the immediate dominator DomTreeNodeBase<typename GraphT::NodeType> *IDomNode = DT.getNodeForBlock(ImmDom); // Add a new tree node for this BasicBlock, and link it as a child of // IDomNode DT.DomTreeNodes[W] = IDomNode->addChild( llvm::make_unique<DomTreeNodeBase<typename GraphT::NodeType>>( W, IDomNode)); } // Free temporary memory used to construct idom's DT.IDoms.clear(); DT.Info.clear(); DT.Vertex.clear(); DT.Vertex.shrink_to_fit(); DT.updateDFSNumbers(); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/MutexGuard.h
//===-- Support/MutexGuard.h - Acquire/Release Mutex In Scope ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a guard for a block of code that ensures a Mutex is locked // upon construction and released upon destruction. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_MUTEXGUARD_H #define LLVM_SUPPORT_MUTEXGUARD_H #include "llvm/Support/Mutex.h" namespace llvm { /// Instances of this class acquire a given Mutex Lock when constructed and /// hold that lock until destruction. The intention is to instantiate one of /// these on the stack at the top of some scope to be assured that C++ /// destruction of the object will always release the Mutex and thus avoid /// a host of nasty multi-threading problems in the face of exceptions, etc. /// @brief Guard a section of code with a Mutex. class MutexGuard { sys::Mutex &M; MutexGuard(const MutexGuard &) = delete; void operator=(const MutexGuard &) = delete; public: MutexGuard(sys::Mutex &m) : M(m) { M.lock(); } ~MutexGuard() { M.unlock(); } /// holds - Returns true if this locker instance holds the specified lock. /// This is mostly used in assertions to validate that the correct mutex /// is held. bool holds(const sys::Mutex& lock) const { return &M == &lock; } }; } #endif // LLVM_SUPPORT_MUTEXGUARD_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/CommandLine.h
//===- llvm/Support/CommandLine.h - Command line handler --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This class implements a command line argument processor that is useful when // creating a tool. It provides a simple, minimalistic interface that is easily // extensible and supports nonlocal (library) command line options. // // Note that rather than trying to figure out what this code does, you should // read the library documentation located in docs/CommandLine.html or looks at // the many example usages in tools/*/*.cpp // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_COMMANDLINE_H #define LLVM_SUPPORT_COMMANDLINE_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/Compiler.h" #include <cassert> #include <climits> #include <cstdarg> #include <utility> #include <vector> namespace llvm { class BumpPtrStringSaver; class StringSaver; /// cl Namespace - This namespace contains all of the command line option /// processing machinery. It is intentionally a short name to make qualified /// usage concise. namespace cl { //===----------------------------------------------------------------------===// // ParseCommandLineOptions - Command line option processing entry point. // void ParseCommandLineOptions(int argc, const char *const *argv, const char *Overview = nullptr); //===----------------------------------------------------------------------===// // ParseEnvironmentOptions - Environment variable option processing alternate // entry point. // void ParseEnvironmentOptions(const char *progName, const char *envvar, const char *Overview = nullptr); ///===---------------------------------------------------------------------===// /// SetVersionPrinter - Override the default (LLVM specific) version printer /// used to print out the version when --version is given /// on the command line. This allows other systems using the /// CommandLine utilities to print their own version string. void SetVersionPrinter(void (*func)()); ///===---------------------------------------------------------------------===// /// AddExtraVersionPrinter - Add an extra printer to use in addition to the /// default one. This can be called multiple times, /// and each time it adds a new function to the list /// which will be called after the basic LLVM version /// printing is complete. Each can then add additional /// information specific to the tool. void AddExtraVersionPrinter(void (*func)()); // PrintOptionValues - Print option values. // With -print-options print the difference between option values and defaults. // With -print-all-options print all option values. // (Currently not perfect, but best-effort.) void PrintOptionValues(); // Forward declaration - AddLiteralOption needs to be up here to make gcc happy. class Option; /// \brief Adds a new option for parsing and provides the option it refers to. /// /// \param O pointer to the option /// \param Name the string name for the option to handle during parsing /// /// Literal options are used by some parsers to register special option values. /// This is how the PassNameParser registers pass names for opt. void AddLiteralOption(Option &O, const char *Name); //===----------------------------------------------------------------------===// // Flags permitted to be passed to command line arguments // enum NumOccurrencesFlag { // Flags for the number of occurrences allowed Optional = 0x00, // Zero or One occurrence ZeroOrMore = 0x01, // Zero or more occurrences allowed Required = 0x02, // One occurrence required OneOrMore = 0x03, // One or more occurrences required // ConsumeAfter - Indicates that this option is fed anything that follows the // last positional argument required by the application (it is an error if // there are zero positional arguments, and a ConsumeAfter option is used). // Thus, for example, all arguments to LLI are processed until a filename is // found. Once a filename is found, all of the succeeding arguments are // passed, unprocessed, to the ConsumeAfter option. // ConsumeAfter = 0x04 }; enum ValueExpected { // Is a value required for the option? // zero reserved for the unspecified value ValueOptional = 0x01, // The value can appear... or not ValueRequired = 0x02, // The value is required to appear! ValueDisallowed = 0x03 // A value may not be specified (for flags) }; enum OptionHidden { // Control whether -help shows this option NotHidden = 0x00, // Option included in -help & -help-hidden Hidden = 0x01, // -help doesn't, but -help-hidden does ReallyHidden = 0x02 // Neither -help nor -help-hidden show this arg }; // Formatting flags - This controls special features that the option might have // that cause it to be parsed differently... // // Prefix - This option allows arguments that are otherwise unrecognized to be // matched by options that are a prefix of the actual value. This is useful for // cases like a linker, where options are typically of the form '-lfoo' or // '-L../../include' where -l or -L are the actual flags. When prefix is // enabled, and used, the value for the flag comes from the suffix of the // argument. // // Grouping - With this option enabled, multiple letter options are allowed to // bunch together with only a single hyphen for the whole group. This allows // emulation of the behavior that ls uses for example: ls -la === ls -l -a // enum FormattingFlags { NormalFormatting = 0x00, // Nothing special Positional = 0x01, // Is a positional argument, no '-' required Prefix = 0x02, // Can this option directly prefix its value? Grouping = 0x03 // Can this option group with other options? }; enum MiscFlags { // Miscellaneous flags to adjust argument CommaSeparated = 0x01, // Should this cl::list split between commas? PositionalEatsArgs = 0x02, // Should this positional cl::list eat -args? Sink = 0x04 // Should this cl::list eat all unknown options? }; //===----------------------------------------------------------------------===// // Option Category class // class OptionCategory { private: const char *const Name; const char *const Description; void registerCategory(); public: OptionCategory(const char *const Name, const char *const Description = nullptr) : Name(Name), Description(Description) { registerCategory(); } const char *getName() const { return Name; } const char *getDescription() const { return Description; } }; // The general Option Category (used as default category). extern OptionCategory *GeneralCategory; // HLSL Change - GeneralCategory is now a pointer //===----------------------------------------------------------------------===// // Option Base class // class alias; class Option { friend class alias; // handleOccurrences - Overriden by subclasses to handle the value passed into // an argument. Should return true if there was an error processing the // argument and the program should exit. // virtual bool handleOccurrence(unsigned pos, StringRef ArgName, StringRef Arg) = 0; virtual enum ValueExpected getValueExpectedFlagDefault() const { return ValueOptional; } // Out of line virtual function to provide home for the class. virtual void anchor(); int NumOccurrences; // The number of times specified // Occurrences, HiddenFlag, and Formatting are all enum types but to avoid // problems with signed enums in bitfields. unsigned Occurrences : 3; // enum NumOccurrencesFlag // not using the enum type for 'Value' because zero is an implementation // detail representing the non-value unsigned Value : 2; unsigned HiddenFlag : 2; // enum OptionHidden unsigned Formatting : 2; // enum FormattingFlags unsigned Misc : 3; unsigned Position; // Position of last occurrence of the option unsigned AdditionalVals; // Greater than 0 for multi-valued option. public: const char *ArgStr; // The argument string itself (ex: "help", "o") const char *HelpStr; // The descriptive text message for -help const char *ValueStr; // String describing what the value of this option is OptionCategory *Category; // The Category this option belongs to bool FullyInitialized; // Has addArguemnt been called? inline enum NumOccurrencesFlag getNumOccurrencesFlag() const { return (enum NumOccurrencesFlag)Occurrences; } inline enum ValueExpected getValueExpectedFlag() const { return Value ? ((enum ValueExpected)Value) : getValueExpectedFlagDefault(); } inline enum OptionHidden getOptionHiddenFlag() const { return (enum OptionHidden)HiddenFlag; } inline enum FormattingFlags getFormattingFlag() const { return (enum FormattingFlags)Formatting; } inline unsigned getMiscFlags() const { return Misc; } inline unsigned getPosition() const { return Position; } inline unsigned getNumAdditionalVals() const { return AdditionalVals; } // hasArgStr - Return true if the argstr != "" bool hasArgStr() const { return ArgStr[0] != 0; } //-------------------------------------------------------------------------=== // Accessor functions set by OptionModifiers // void setArgStr(const char *S); void setDescription(const char *S) { HelpStr = S; } void setValueStr(const char *S) { ValueStr = S; } void setNumOccurrencesFlag(enum NumOccurrencesFlag Val) { Occurrences = Val; } void setValueExpectedFlag(enum ValueExpected Val) { Value = Val; } void setHiddenFlag(enum OptionHidden Val) { HiddenFlag = Val; } void setFormattingFlag(enum FormattingFlags V) { Formatting = V; } void setMiscFlag(enum MiscFlags M) { Misc |= M; } void setPosition(unsigned pos) { Position = pos; } void setCategory(OptionCategory &C) { Category = &C; } protected: explicit Option(enum NumOccurrencesFlag OccurrencesFlag, enum OptionHidden Hidden) : NumOccurrences(0), Occurrences(OccurrencesFlag), Value(0), HiddenFlag(Hidden), Formatting(NormalFormatting), Misc(0), Position(0), AdditionalVals(0), ArgStr(""), HelpStr(""), ValueStr(""), Category(GeneralCategory), FullyInitialized(false) {} // HLSL Change - not GeneralCategory inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; } public: // addArgument - Register this argument with the commandline system. // void addArgument(); /// Unregisters this option from the CommandLine system. /// /// This option must have been the last option registered. /// For testing purposes only. void removeArgument(); // Return the width of the option tag for printing... virtual size_t getOptionWidth() const = 0; // printOptionInfo - Print out information about this option. The // to-be-maintained width is specified. // virtual void printOptionInfo(size_t GlobalWidth) const = 0; virtual void printOptionValue(size_t GlobalWidth, bool Force) const = 0; virtual void getExtraOptionNames(SmallVectorImpl<const char *> &) {} // addOccurrence - Wrapper around handleOccurrence that enforces Flags. // virtual bool addOccurrence(unsigned pos, StringRef ArgName, StringRef Value, bool MultiArg = false); // Prints option name followed by message. Always returns true. bool error(const Twine &Message, StringRef ArgName = StringRef()); public: inline int getNumOccurrences() const { return NumOccurrences; } virtual ~Option() {} }; //===----------------------------------------------------------------------===// // Command line option modifiers that can be used to modify the behavior of // command line option parsers... // // desc - Modifier to set the description shown in the -help output... struct desc { const char *Desc; desc(const char *Str) : Desc(Str) {} void apply(Option &O) const { O.setDescription(Desc); } }; // value_desc - Modifier to set the value description shown in the -help // output... struct value_desc { const char *Desc; value_desc(const char *Str) : Desc(Str) {} void apply(Option &O) const { O.setValueStr(Desc); } }; // init - Specify a default (initial) value for the command line argument, if // the default constructor for the argument type does not give you what you // want. This is only valid on "opt" arguments, not on "list" arguments. // template <class Ty> struct initializer { const Ty &Init; initializer(const Ty &Val) : Init(Val) {} template <class Opt> void apply(Opt &O) const { O.setInitialValue(Init); } }; template <class Ty> initializer<Ty> init(const Ty &Val) { return initializer<Ty>(Val); } // location - Allow the user to specify which external variable they want to // store the results of the command line argument processing into, if they don't // want to store it in the option itself. // template <class Ty> struct LocationClass { Ty &Loc; LocationClass(Ty &L) : Loc(L) {} template <class Opt> void apply(Opt &O) const { O.setLocation(O, Loc); } }; template <class Ty> LocationClass<Ty> location(Ty &L) { return LocationClass<Ty>(L); } // cat - Specifiy the Option category for the command line argument to belong // to. struct cat { OptionCategory &Category; cat(OptionCategory &c) : Category(c) {} template <class Opt> void apply(Opt &O) const { O.setCategory(Category); } }; //===----------------------------------------------------------------------===// // OptionValue class // Support value comparison outside the template. struct GenericOptionValue { virtual bool compare(const GenericOptionValue &V) const = 0; protected: ~GenericOptionValue() = default; GenericOptionValue() = default; GenericOptionValue(const GenericOptionValue&) = default; GenericOptionValue &operator=(const GenericOptionValue &) = default; private: virtual void anchor(); }; template <class DataType> struct OptionValue; // The default value safely does nothing. Option value printing is only // best-effort. template <class DataType, bool isClass> struct OptionValueBase : public GenericOptionValue { // Temporary storage for argument passing. typedef OptionValue<DataType> WrapperType; bool hasValue() const { return false; } const DataType &getValue() const { llvm_unreachable("no default value"); } // Some options may take their value from a different data type. template <class DT> void setValue(const DT & /*V*/) {} bool compare(const DataType & /*V*/) const { return false; } bool compare(const GenericOptionValue & /*V*/) const override { return false; } protected: ~OptionValueBase() = default; }; // Simple copy of the option value. template <class DataType> class OptionValueCopy : public GenericOptionValue { DataType Value; bool Valid; protected: ~OptionValueCopy() = default; OptionValueCopy(const OptionValueCopy&) = default; OptionValueCopy &operator=(const OptionValueCopy&) = default; public: OptionValueCopy() : Valid(false) {} bool hasValue() const { return Valid; } const DataType &getValue() const { assert(Valid && "invalid option value"); return Value; } void setValue(const DataType &V) { Valid = true; Value = V; } bool compare(const DataType &V) const { return Valid && (Value != V); } bool compare(const GenericOptionValue &V) const override { const OptionValueCopy<DataType> &VC = static_cast<const OptionValueCopy<DataType> &>(V); if (!VC.hasValue()) return false; return compare(VC.getValue()); } }; // Non-class option values. template <class DataType> struct OptionValueBase<DataType, false> : OptionValueCopy<DataType> { typedef DataType WrapperType; protected: ~OptionValueBase() = default; OptionValueBase() = default; OptionValueBase(const OptionValueBase&) = default; OptionValueBase &operator=(const OptionValueBase&) = default; }; // Top-level option class. template <class DataType> struct OptionValue final : OptionValueBase<DataType, std::is_class<DataType>::value> { OptionValue() = default; OptionValue(const DataType &V) { this->setValue(V); } // Some options may take their value from a different data type. template <class DT> OptionValue<DataType> &operator=(const DT &V) { this->setValue(V); return *this; } }; // Other safe-to-copy-by-value common option types. enum boolOrDefault { BOU_UNSET, BOU_TRUE, BOU_FALSE }; template <> struct OptionValue<cl::boolOrDefault> final : OptionValueCopy<cl::boolOrDefault> { typedef cl::boolOrDefault WrapperType; OptionValue() {} OptionValue(const cl::boolOrDefault &V) { this->setValue(V); } OptionValue<cl::boolOrDefault> &operator=(const cl::boolOrDefault &V) { setValue(V); return *this; } private: void anchor() override; }; template <> struct OptionValue<std::string> final : OptionValueCopy<std::string> { typedef StringRef WrapperType; OptionValue() {} OptionValue(const std::string &V) { this->setValue(V); } OptionValue<std::string> &operator=(const std::string &V) { setValue(V); return *this; } private: void anchor() override; }; //===----------------------------------------------------------------------===// // Enum valued command line option // #define clEnumVal(ENUMVAL, DESC) #ENUMVAL, int(ENUMVAL), DESC #define clEnumValN(ENUMVAL, FLAGNAME, DESC) FLAGNAME, int(ENUMVAL), DESC #define clEnumValEnd (reinterpret_cast<void *>(0)) // values - For custom data types, allow specifying a group of values together // as the values that go into the mapping that the option handler uses. Note // that the values list must always have a 0 at the end of the list to indicate // that the list has ended. // template <class DataType> class ValuesClass { // Use a vector instead of a map, because the lists should be short, // the overhead is less, and most importantly, it keeps them in the order // inserted so we can print our option out nicely. SmallVector<std::pair<const char *, std::pair<int, const char *>>, 4> Values; void processValues(va_list Vals); public: ValuesClass(const char *EnumName, DataType Val, const char *Desc, va_list ValueArgs) { // Insert the first value, which is required. Values.push_back(std::make_pair(EnumName, std::make_pair(Val, Desc))); // Process the varargs portion of the values... while (const char *enumName = va_arg(ValueArgs, const char *)) { DataType EnumVal = static_cast<DataType>(va_arg(ValueArgs, int)); const char *EnumDesc = va_arg(ValueArgs, const char *); Values.push_back(std::make_pair(enumName, // Add value to value map std::make_pair(EnumVal, EnumDesc))); } } template <class Opt> void apply(Opt &O) const { for (size_t i = 0, e = Values.size(); i != e; ++i) O.getParser().addLiteralOption(Values[i].first, Values[i].second.first, Values[i].second.second); } }; template <class DataType> ValuesClass<DataType> LLVM_END_WITH_NULL values(const char *Arg, DataType Val, const char *Desc, ...) { va_list ValueArgs; va_start(ValueArgs, Desc); ValuesClass<DataType> Vals(Arg, Val, Desc, ValueArgs); va_end(ValueArgs); return Vals; } //===----------------------------------------------------------------------===// // parser class - Parameterizable parser for different data types. By default, // known data types (string, int, bool) have specialized parsers, that do what // you would expect. The default parser, used for data types that are not // built-in, uses a mapping table to map specific options to values, which is // used, among other things, to handle enum types. //-------------------------------------------------- // generic_parser_base - This class holds all the non-generic code that we do // not need replicated for every instance of the generic parser. This also // allows us to put stuff into CommandLine.cpp // class generic_parser_base { protected: class GenericOptionInfo { public: GenericOptionInfo(const char *name, const char *helpStr) : Name(name), HelpStr(helpStr) {} const char *Name; const char *HelpStr; }; public: generic_parser_base(Option &O) : Owner(O) {} virtual ~generic_parser_base() {} // Base class should have virtual-dtor // getNumOptions - Virtual function implemented by generic subclass to // indicate how many entries are in Values. // virtual unsigned getNumOptions() const = 0; // getOption - Return option name N. virtual const char *getOption(unsigned N) const = 0; // getDescription - Return description N virtual const char *getDescription(unsigned N) const = 0; // Return the width of the option tag for printing... virtual size_t getOptionWidth(const Option &O) const; virtual const GenericOptionValue &getOptionValue(unsigned N) const = 0; // printOptionInfo - Print out information about this option. The // to-be-maintained width is specified. // virtual void printOptionInfo(const Option &O, size_t GlobalWidth) const; void printGenericOptionDiff(const Option &O, const GenericOptionValue &V, const GenericOptionValue &Default, size_t GlobalWidth) const; // printOptionDiff - print the value of an option and it's default. // // Template definition ensures that the option and default have the same // DataType (via the same AnyOptionValue). template <class AnyOptionValue> void printOptionDiff(const Option &O, const AnyOptionValue &V, const AnyOptionValue &Default, size_t GlobalWidth) const { printGenericOptionDiff(O, V, Default, GlobalWidth); } void initialize() {} void getExtraOptionNames(SmallVectorImpl<const char *> &OptionNames) { // If there has been no argstr specified, that means that we need to add an // argument for every possible option. This ensures that our options are // vectored to us. if (!Owner.hasArgStr()) for (unsigned i = 0, e = getNumOptions(); i != e; ++i) OptionNames.push_back(getOption(i)); } enum ValueExpected getValueExpectedFlagDefault() const { // If there is an ArgStr specified, then we are of the form: // // -opt=O2 or -opt O2 or -optO2 // // In which case, the value is required. Otherwise if an arg str has not // been specified, we are of the form: // // -O2 or O2 or -la (where -l and -a are separate options) // // If this is the case, we cannot allow a value. // if (Owner.hasArgStr()) return ValueRequired; else return ValueDisallowed; } // findOption - Return the option number corresponding to the specified // argument string. If the option is not found, getNumOptions() is returned. // unsigned findOption(const char *Name); protected: Option &Owner; }; // Default parser implementation - This implementation depends on having a // mapping of recognized options to values of some sort. In addition to this, // each entry in the mapping also tracks a help message that is printed with the // command line option for -help. Because this is a simple mapping parser, the // data type can be any unsupported type. // template <class DataType> class parser : public generic_parser_base { protected: class OptionInfo : public GenericOptionInfo { public: OptionInfo(const char *name, DataType v, const char *helpStr) : GenericOptionInfo(name, helpStr), V(v) {} OptionValue<DataType> V; }; SmallVector<OptionInfo, 8> Values; public: parser(Option &O) : generic_parser_base(O) {} typedef DataType parser_data_type; // Implement virtual functions needed by generic_parser_base unsigned getNumOptions() const override { return unsigned(Values.size()); } const char *getOption(unsigned N) const override { return Values[N].Name; } const char *getDescription(unsigned N) const override { return Values[N].HelpStr; } // getOptionValue - Return the value of option name N. const GenericOptionValue &getOptionValue(unsigned N) const override { return Values[N].V; } // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, DataType &V) { StringRef ArgVal; if (Owner.hasArgStr()) ArgVal = Arg; else ArgVal = ArgName; for (size_t i = 0, e = Values.size(); i != e; ++i) if (Values[i].Name == ArgVal) { V = Values[i].V.getValue(); return false; } return O.error("Cannot find option named '" + ArgVal + "'!"); } /// addLiteralOption - Add an entry to the mapping table. /// template <class DT> void addLiteralOption(const char *Name, const DT &V, const char *HelpStr) { assert(findOption(Name) == Values.size() && "Option already exists!"); OptionInfo X(Name, static_cast<DataType>(V), HelpStr); Values.push_back(X); AddLiteralOption(Owner, Name); } /// removeLiteralOption - Remove the specified option. /// void removeLiteralOption(const char *Name) { unsigned N = findOption(Name); assert(N != Values.size() && "Option not found!"); Values.erase(Values.begin() + N); } }; //-------------------------------------------------- // basic_parser - Super class of parsers to provide boilerplate code // class basic_parser_impl { // non-template implementation of basic_parser<t> public: basic_parser_impl(Option &O) {} enum ValueExpected getValueExpectedFlagDefault() const { return ValueRequired; } void getExtraOptionNames(SmallVectorImpl<const char *> &) {} void initialize() {} // Return the width of the option tag for printing... size_t getOptionWidth(const Option &O) const; // printOptionInfo - Print out information about this option. The // to-be-maintained width is specified. // void printOptionInfo(const Option &O, size_t GlobalWidth) const; // printOptionNoValue - Print a placeholder for options that don't yet support // printOptionDiff(). void printOptionNoValue(const Option &O, size_t GlobalWidth) const; // getValueName - Overload in subclass to provide a better default value. virtual const char *getValueName() const { return "value"; } // An out-of-line virtual method to provide a 'home' for this class. virtual void anchor(); protected: ~basic_parser_impl() = default; // A helper for basic_parser::printOptionDiff. void printOptionName(const Option &O, size_t GlobalWidth) const; }; // basic_parser - The real basic parser is just a template wrapper that provides // a typedef for the provided data type. // template <class DataType> class basic_parser : public basic_parser_impl { public: basic_parser(Option &O) : basic_parser_impl(O) {} typedef DataType parser_data_type; typedef OptionValue<DataType> OptVal; protected: // Workaround Clang PR22793 ~basic_parser() {} }; //-------------------------------------------------- // parser<bool> // template <> class parser<bool> final : public basic_parser<bool> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, bool &Val); void initialize() {} enum ValueExpected getValueExpectedFlagDefault() const { return ValueOptional; } // getValueName - Do not print =<value> at all. const char *getValueName() const override { return nullptr; } void printOptionDiff(const Option &O, bool V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<bool>; //-------------------------------------------------- // parser<boolOrDefault> template <> class parser<boolOrDefault> final : public basic_parser<boolOrDefault> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, boolOrDefault &Val); enum ValueExpected getValueExpectedFlagDefault() const { return ValueOptional; } // getValueName - Do not print =<value> at all. const char *getValueName() const override { return nullptr; } void printOptionDiff(const Option &O, boolOrDefault V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<boolOrDefault>; //-------------------------------------------------- // parser<int> // template <> class parser<int> final : public basic_parser<int> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, int &Val); // getValueName - Overload in subclass to provide a better default value. const char *getValueName() const override { return "int"; } void printOptionDiff(const Option &O, int V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<int>; //-------------------------------------------------- // parser<unsigned> // template <> class parser<unsigned> final : public basic_parser<unsigned> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned &Val); // getValueName - Overload in subclass to provide a better default value. const char *getValueName() const override { return "uint"; } void printOptionDiff(const Option &O, unsigned V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<unsigned>; //-------------------------------------------------- // parser<unsigned long long> // template <> class parser<unsigned long long> final : public basic_parser<unsigned long long> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned long long &Val); // getValueName - Overload in subclass to provide a better default value. const char *getValueName() const override { return "uint"; } void printOptionDiff(const Option &O, unsigned long long V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<unsigned long long>; //-------------------------------------------------- // parser<double> // template <> class parser<double> final : public basic_parser<double> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, double &Val); // getValueName - Overload in subclass to provide a better default value. const char *getValueName() const override { return "number"; } void printOptionDiff(const Option &O, double V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<double>; //-------------------------------------------------- // parser<float> // template <> class parser<float> final : public basic_parser<float> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &O, StringRef ArgName, StringRef Arg, float &Val); // getValueName - Overload in subclass to provide a better default value. const char *getValueName() const override { return "number"; } void printOptionDiff(const Option &O, float V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<float>; //-------------------------------------------------- // parser<std::string> // template <> class parser<std::string> final : public basic_parser<std::string> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &, StringRef, StringRef Arg, std::string &Value) { Value = Arg.str(); return false; } // getValueName - Overload in subclass to provide a better default value. const char *getValueName() const override { return "string"; } void printOptionDiff(const Option &O, StringRef V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<std::string>; //-------------------------------------------------- // parser<char> // template <> class parser<char> final : public basic_parser<char> { public: parser(Option &O) : basic_parser(O) {} // parse - Return true on error. bool parse(Option &, StringRef, StringRef Arg, char &Value) { Value = Arg[0]; return false; } // getValueName - Overload in subclass to provide a better default value. const char *getValueName() const override { return "char"; } void printOptionDiff(const Option &O, char V, OptVal Default, size_t GlobalWidth) const; // An out-of-line virtual method to provide a 'home' for this class. void anchor() override; }; extern template class basic_parser<char>; //-------------------------------------------------- // PrintOptionDiff // // This collection of wrappers is the intermediary between class opt and class // parser to handle all the template nastiness. // This overloaded function is selected by the generic parser. template <class ParserClass, class DT> void printOptionDiff(const Option &O, const generic_parser_base &P, const DT &V, const OptionValue<DT> &Default, size_t GlobalWidth) { OptionValue<DT> OV = V; P.printOptionDiff(O, OV, Default, GlobalWidth); } // This is instantiated for basic parsers when the parsed value has a different // type than the option value. e.g. HelpPrinter. template <class ParserDT, class ValDT> struct OptionDiffPrinter { void print(const Option &O, const parser<ParserDT> &P, const ValDT & /*V*/, const OptionValue<ValDT> & /*Default*/, size_t GlobalWidth) { P.printOptionNoValue(O, GlobalWidth); } }; // This is instantiated for basic parsers when the parsed value has the same // type as the option value. template <class DT> struct OptionDiffPrinter<DT, DT> { void print(const Option &O, const parser<DT> &P, const DT &V, const OptionValue<DT> &Default, size_t GlobalWidth) { P.printOptionDiff(O, V, Default, GlobalWidth); } }; // This overloaded function is selected by the basic parser, which may parse a // different type than the option type. template <class ParserClass, class ValDT> void printOptionDiff( const Option &O, const basic_parser<typename ParserClass::parser_data_type> &P, const ValDT &V, const OptionValue<ValDT> &Default, size_t GlobalWidth) { OptionDiffPrinter<typename ParserClass::parser_data_type, ValDT> printer; printer.print(O, static_cast<const ParserClass &>(P), V, Default, GlobalWidth); } //===----------------------------------------------------------------------===// // applicator class - This class is used because we must use partial // specialization to handle literal string arguments specially (const char* does // not correctly respond to the apply method). Because the syntax to use this // is a pain, we have the 'apply' method below to handle the nastiness... // template <class Mod> struct applicator { template <class Opt> static void opt(const Mod &M, Opt &O) { M.apply(O); } }; // Handle const char* as a special case... template <unsigned n> struct applicator<char[n]> { template <class Opt> static void opt(const char *Str, Opt &O) { O.setArgStr(Str); } }; template <unsigned n> struct applicator<const char[n]> { template <class Opt> static void opt(const char *Str, Opt &O) { O.setArgStr(Str); } }; template <> struct applicator<const char *> { template <class Opt> static void opt(const char *Str, Opt &O) { O.setArgStr(Str); } }; template <> struct applicator<NumOccurrencesFlag> { static void opt(NumOccurrencesFlag N, Option &O) { O.setNumOccurrencesFlag(N); } }; template <> struct applicator<ValueExpected> { static void opt(ValueExpected VE, Option &O) { O.setValueExpectedFlag(VE); } }; template <> struct applicator<OptionHidden> { static void opt(OptionHidden OH, Option &O) { O.setHiddenFlag(OH); } }; template <> struct applicator<FormattingFlags> { static void opt(FormattingFlags FF, Option &O) { O.setFormattingFlag(FF); } }; template <> struct applicator<MiscFlags> { static void opt(MiscFlags MF, Option &O) { O.setMiscFlag(MF); } }; // apply method - Apply modifiers to an option in a type safe way. template <class Opt, class Mod, class... Mods> void apply(Opt *O, const Mod &M, const Mods &... Ms) { applicator<Mod>::opt(M, *O); apply(O, Ms...); } template <class Opt, class Mod> void apply(Opt *O, const Mod &M) { applicator<Mod>::opt(M, *O); } //===----------------------------------------------------------------------===// // opt_storage class // Default storage class definition: external storage. This implementation // assumes the user will specify a variable to store the data into with the // cl::location(x) modifier. // template <class DataType, bool ExternalStorage, bool isClass> class opt_storage { DataType *Location; // Where to store the object... OptionValue<DataType> Default; void check_location() const { assert(Location && "cl::location(...) not specified for a command " "line option with external storage, " "or cl::init specified before cl::location()!!"); } public: opt_storage() : Location(nullptr) {} bool setLocation(Option &O, DataType &L) { if (Location) return O.error("cl::location(x) specified more than once!"); Location = &L; Default = L; return false; } template <class T> void setValue(const T &V, bool initial = false) { check_location(); *Location = V; if (initial) Default = V; } DataType &getValue() { check_location(); return *Location; } const DataType &getValue() const { check_location(); return *Location; } operator DataType() const { return this->getValue(); } const OptionValue<DataType> &getDefault() const { return Default; } }; // Define how to hold a class type object, such as a string. Since we can // inherit from a class, we do so. This makes us exactly compatible with the // object in all cases that it is used. // template <class DataType> class opt_storage<DataType, false, true> : public DataType { public: OptionValue<DataType> Default; template <class T> void setValue(const T &V, bool initial = false) { DataType::operator=(V); if (initial) Default = V; } DataType &getValue() { return *this; } const DataType &getValue() const { return *this; } const OptionValue<DataType> &getDefault() const { return Default; } }; // Define a partial specialization to handle things we cannot inherit from. In // this case, we store an instance through containment, and overload operators // to get at the value. // template <class DataType> class opt_storage<DataType, false, false> { public: DataType Value; OptionValue<DataType> Default; // Make sure we initialize the value with the default constructor for the // type. opt_storage() : Value(DataType()), Default(DataType()) {} template <class T> void setValue(const T &V, bool initial = false) { Value = V; if (initial) Default = V; } DataType &getValue() { return Value; } DataType getValue() const { return Value; } const OptionValue<DataType> &getDefault() const { return Default; } operator DataType() const { return getValue(); } // If the datatype is a pointer, support -> on it. DataType operator->() const { return Value; } }; //===----------------------------------------------------------------------===// // opt - A scalar command line option. // template <class DataType, bool ExternalStorage = false, class ParserClass = parser<DataType>> class opt : public Option, public opt_storage<DataType, ExternalStorage, std::is_class<DataType>::value> { ParserClass Parser; bool handleOccurrence(unsigned pos, StringRef ArgName, StringRef Arg) override { typename ParserClass::parser_data_type Val = typename ParserClass::parser_data_type(); if (Parser.parse(*this, ArgName, Arg, Val)) return true; // Parse error! this->setValue(Val); this->setPosition(pos); return false; } enum ValueExpected getValueExpectedFlagDefault() const override { return Parser.getValueExpectedFlagDefault(); } void getExtraOptionNames(SmallVectorImpl<const char *> &OptionNames) override { return Parser.getExtraOptionNames(OptionNames); } // Forward printing stuff to the parser... size_t getOptionWidth() const override { return Parser.getOptionWidth(*this); } void printOptionInfo(size_t GlobalWidth) const override { Parser.printOptionInfo(*this, GlobalWidth); } void printOptionValue(size_t GlobalWidth, bool Force) const override { if (Force || this->getDefault().compare(this->getValue())) { cl::printOptionDiff<ParserClass>(*this, Parser, this->getValue(), this->getDefault(), GlobalWidth); } } void done() { addArgument(); Parser.initialize(); } // Command line options should not be copyable opt(const opt &) = delete; opt &operator=(const opt &) = delete; public: // setInitialValue - Used by the cl::init modifier... void setInitialValue(const DataType &V) { this->setValue(V, true); } ParserClass &getParser() { return Parser; } template <class T> DataType &operator=(const T &Val) { this->setValue(Val); return this->getValue(); } template <class... Mods> explicit opt(const Mods &... Ms) : Option(Optional, NotHidden), Parser(*this) { apply(this, Ms...); done(); } }; extern template class opt<unsigned>; extern template class opt<int>; extern template class opt<std::string>; extern template class opt<char>; extern template class opt<bool>; //===----------------------------------------------------------------------===// // list_storage class // Default storage class definition: external storage. This implementation // assumes the user will specify a variable to store the data into with the // cl::location(x) modifier. // template <class DataType, class StorageClass> class list_storage { StorageClass *Location; // Where to store the object... public: list_storage() : Location(0) {} bool setLocation(Option &O, StorageClass &L) { if (Location) return O.error("cl::location(x) specified more than once!"); Location = &L; return false; } template <class T> void addValue(const T &V) { assert(Location != 0 && "cl::location(...) not specified for a command " "line option with external storage!"); Location->push_back(V); } }; // Define how to hold a class type object, such as a string. // Originally this code inherited from std::vector. In transitioning to a new // API for command line options we should change this. The new implementation // of this list_storage specialization implements the minimum subset of the // std::vector API required for all the current clients. // // FIXME: Reduce this API to a more narrow subset of std::vector // template <class DataType> class list_storage<DataType, bool> { std::vector<DataType> Storage; public: typedef typename std::vector<DataType>::iterator iterator; iterator begin() { return Storage.begin(); } iterator end() { return Storage.end(); } typedef typename std::vector<DataType>::const_iterator const_iterator; const_iterator begin() const { return Storage.begin(); } const_iterator end() const { return Storage.end(); } typedef typename std::vector<DataType>::size_type size_type; size_type size() const { return Storage.size(); } bool empty() const { return Storage.empty(); } void push_back(const DataType &value) { Storage.push_back(value); } void push_back(DataType &&value) { Storage.push_back(value); } typedef typename std::vector<DataType>::reference reference; typedef typename std::vector<DataType>::const_reference const_reference; reference operator[](size_type pos) { return Storage[pos]; } const_reference operator[](size_type pos) const { return Storage[pos]; } iterator erase(const_iterator pos) { return Storage.erase(pos); } iterator erase(const_iterator first, const_iterator last) { return Storage.erase(first, last); } iterator erase(iterator pos) { return Storage.erase(pos); } iterator erase(iterator first, iterator last) { return Storage.erase(first, last); } iterator insert(const_iterator pos, const DataType &value) { return Storage.insert(pos, value); } iterator insert(const_iterator pos, DataType &&value) { return Storage.insert(pos, value); } iterator insert(iterator pos, const DataType &value) { return Storage.insert(pos, value); } iterator insert(iterator pos, DataType &&value) { return Storage.insert(pos, value); } reference front() { return Storage.front(); } const_reference front() const { return Storage.front(); } operator std::vector<DataType>&() { return Storage; } operator ArrayRef<DataType>() { return Storage; } std::vector<DataType> *operator&() { return &Storage; } const std::vector<DataType> *operator&() const { return &Storage; } template <class T> void addValue(const T &V) { Storage.push_back(V); } }; //===----------------------------------------------------------------------===// // list - A list of command line options. // template <class DataType, class StorageClass = bool, class ParserClass = parser<DataType>> class list : public Option, public list_storage<DataType, StorageClass> { std::vector<unsigned> Positions; ParserClass Parser; enum ValueExpected getValueExpectedFlagDefault() const override { return Parser.getValueExpectedFlagDefault(); } void getExtraOptionNames(SmallVectorImpl<const char *> &OptionNames) override { return Parser.getExtraOptionNames(OptionNames); } bool handleOccurrence(unsigned pos, StringRef ArgName, StringRef Arg) override { typename ParserClass::parser_data_type Val = typename ParserClass::parser_data_type(); if (Parser.parse(*this, ArgName, Arg, Val)) return true; // Parse Error! list_storage<DataType, StorageClass>::addValue(Val); setPosition(pos); Positions.push_back(pos); return false; } // Forward printing stuff to the parser... size_t getOptionWidth() const override { return Parser.getOptionWidth(*this); } void printOptionInfo(size_t GlobalWidth) const override { Parser.printOptionInfo(*this, GlobalWidth); } // Unimplemented: list options don't currently store their default value. void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override { } void done() { addArgument(); Parser.initialize(); } // Command line options should not be copyable list(const list &) = delete; list &operator=(const list &) = delete; public: ParserClass &getParser() { return Parser; } unsigned getPosition(unsigned optnum) const { assert(optnum < this->size() && "Invalid option index"); return Positions[optnum]; } void setNumAdditionalVals(unsigned n) { Option::setNumAdditionalVals(n); } template <class... Mods> explicit list(const Mods &... Ms) : Option(ZeroOrMore, NotHidden), Parser(*this) { apply(this, Ms...); done(); } }; // multi_val - Modifier to set the number of additional values. struct multi_val { unsigned AdditionalVals; explicit multi_val(unsigned N) : AdditionalVals(N) {} template <typename D, typename S, typename P> void apply(list<D, S, P> &L) const { L.setNumAdditionalVals(AdditionalVals); } }; //===----------------------------------------------------------------------===// // bits_storage class // Default storage class definition: external storage. This implementation // assumes the user will specify a variable to store the data into with the // cl::location(x) modifier. // template <class DataType, class StorageClass> class bits_storage { unsigned *Location; // Where to store the bits... template <class T> static unsigned Bit(const T &V) { unsigned BitPos = reinterpret_cast<unsigned>(V); assert(BitPos < sizeof(unsigned) * CHAR_BIT && "enum exceeds width of bit vector!"); return 1 << BitPos; } public: bits_storage() : Location(nullptr) {} bool setLocation(Option &O, unsigned &L) { if (Location) return O.error("cl::location(x) specified more than once!"); Location = &L; return false; } template <class T> void addValue(const T &V) { assert(Location != 0 && "cl::location(...) not specified for a command " "line option with external storage!"); *Location |= Bit(V); } unsigned getBits() { return *Location; } template <class T> bool isSet(const T &V) { return (*Location & Bit(V)) != 0; } }; // Define how to hold bits. Since we can inherit from a class, we do so. // This makes us exactly compatible with the bits in all cases that it is used. // template <class DataType> class bits_storage<DataType, bool> { unsigned Bits; // Where to store the bits... template <class T> static unsigned Bit(const T &V) { unsigned BitPos = (unsigned)V; assert(BitPos < sizeof(unsigned) * CHAR_BIT && "enum exceeds width of bit vector!"); return 1 << BitPos; } public: template <class T> void addValue(const T &V) { Bits |= Bit(V); } unsigned getBits() { return Bits; } template <class T> bool isSet(const T &V) { return (Bits & Bit(V)) != 0; } }; //===----------------------------------------------------------------------===// // bits - A bit vector of command options. // template <class DataType, class Storage = bool, class ParserClass = parser<DataType>> class bits : public Option, public bits_storage<DataType, Storage> { std::vector<unsigned> Positions; ParserClass Parser; enum ValueExpected getValueExpectedFlagDefault() const override { return Parser.getValueExpectedFlagDefault(); } void getExtraOptionNames(SmallVectorImpl<const char *> &OptionNames) override { return Parser.getExtraOptionNames(OptionNames); } bool handleOccurrence(unsigned pos, StringRef ArgName, StringRef Arg) override { typename ParserClass::parser_data_type Val = typename ParserClass::parser_data_type(); if (Parser.parse(*this, ArgName, Arg, Val)) return true; // Parse Error! this->addValue(Val); setPosition(pos); Positions.push_back(pos); return false; } // Forward printing stuff to the parser... size_t getOptionWidth() const override { return Parser.getOptionWidth(*this); } void printOptionInfo(size_t GlobalWidth) const override { Parser.printOptionInfo(*this, GlobalWidth); } // Unimplemented: bits options don't currently store their default values. void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override { } void done() { addArgument(); Parser.initialize(); } // Command line options should not be copyable bits(const bits &) = delete; bits &operator=(const bits &) = delete; public: ParserClass &getParser() { return Parser; } unsigned getPosition(unsigned optnum) const { assert(optnum < this->size() && "Invalid option index"); return Positions[optnum]; } template <class... Mods> explicit bits(const Mods &... Ms) : Option(ZeroOrMore, NotHidden), Parser(*this) { apply(this, Ms...); done(); } }; //===----------------------------------------------------------------------===// // Aliased command line option (alias this name to a preexisting name) // class alias : public Option { Option *AliasFor; bool handleOccurrence(unsigned pos, StringRef /*ArgName*/, StringRef Arg) override { return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg); } bool addOccurrence(unsigned pos, StringRef /*ArgName*/, StringRef Value, bool MultiArg = false) override { return AliasFor->addOccurrence(pos, AliasFor->ArgStr, Value, MultiArg); } // Handle printing stuff... size_t getOptionWidth() const override; void printOptionInfo(size_t GlobalWidth) const override; // Aliases do not need to print their values. void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override { } ValueExpected getValueExpectedFlagDefault() const override { return AliasFor->getValueExpectedFlag(); } void done() { if (!hasArgStr()) error("cl::alias must have argument name specified!"); if (!AliasFor) error("cl::alias must have an cl::aliasopt(option) specified!"); addArgument(); } // Command line options should not be copyable alias(const alias &) = delete; alias &operator=(const alias &) = delete; public: void setAliasFor(Option &O) { if (AliasFor) error("cl::alias must only have one cl::aliasopt(...) specified!"); AliasFor = &O; } template <class... Mods> explicit alias(const Mods &... Ms) : Option(Optional, Hidden), AliasFor(nullptr) { apply(this, Ms...); done(); } }; // aliasfor - Modifier to set the option an alias aliases. struct aliasopt { Option &Opt; explicit aliasopt(Option &O) : Opt(O) {} void apply(alias &A) const { A.setAliasFor(Opt); } }; // extrahelp - provide additional help at the end of the normal help // output. All occurrences of cl::extrahelp will be accumulated and // printed to stderr at the end of the regular help, just before // exit is called. struct extrahelp { const char *morehelp; explicit extrahelp(const char *help); }; void PrintVersionMessage(); /// This function just prints the help message, exactly the same way as if the /// -help or -help-hidden option had been given on the command line. /// /// NOTE: THIS FUNCTION TERMINATES THE PROGRAM! /// /// \param Hidden if true will print hidden options /// \param Categorized if true print options in categories void PrintHelpMessage(bool Hidden = false, bool Categorized = false); //===----------------------------------------------------------------------===// // Public interface for accessing registered options. // /// \brief Use this to get a StringMap to all registered named options /// (e.g. -help). Note \p Map Should be an empty StringMap. /// /// \return A reference to the StringMap used by the cl APIs to parse options. /// /// Access to unnamed arguments (i.e. positional) are not provided because /// it is expected that the client already has access to these. /// /// Typical usage: /// \code /// main(int argc,char* argv[]) { /// StringMap<llvm::cl::Option*> &opts = llvm::cl::getRegisteredOptions(); /// assert(opts.count("help") == 1) /// opts["help"]->setDescription("Show alphabetical help information") /// // More code /// llvm::cl::ParseCommandLineOptions(argc,argv); /// //More code /// } /// \endcode /// /// This interface is useful for modifying options in libraries that are out of /// the control of the client. The options should be modified before calling /// llvm::cl::ParseCommandLineOptions(). /// /// Hopefully this API can be depricated soon. Any situation where options need /// to be modified by tools or libraries should be handled by sane APIs rather /// than just handing around a global list. StringMap<Option *> &getRegisteredOptions(); // // /////////////////////////////////////////////////////////////////////////////// // Standalone command line processing utilities. // /// \brief Tokenizes a command line that can contain escapes and quotes. // /// The quoting rules match those used by GCC and other tools that use /// libiberty's buildargv() or expandargv() utilities, and do not match bash. /// They differ from buildargv() on treatment of backslashes that do not escape /// a special character to make it possible to accept most Windows file paths. /// /// \param [in] Source The string to be split on whitespace with quotes. /// \param [in] Saver Delegates back to the caller for saving parsed strings. /// \param [in] MarkEOLs true if tokenizing a response file and you want end of /// lines and end of the response file to be marked with a nullptr string. /// \param [out] NewArgv All parsed strings are appended to NewArgv. void TokenizeGNUCommandLine(StringRef Source, StringSaver &Saver, SmallVectorImpl<const char *> &NewArgv, bool MarkEOLs = false); /// \brief Tokenizes a Windows command line which may contain quotes and escaped /// quotes. /// /// See MSDN docs for CommandLineToArgvW for information on the quoting rules. /// http://msdn.microsoft.com/en-us/library/windows/desktop/17w5ykft(v=vs.85).aspx /// /// \param [in] Source The string to be split on whitespace with quotes. /// \param [in] Saver Delegates back to the caller for saving parsed strings. /// \param [in] MarkEOLs true if tokenizing a response file and you want end of /// lines and end of the response file to be marked with a nullptr string. /// \param [out] NewArgv All parsed strings are appended to NewArgv. void TokenizeWindowsCommandLine(StringRef Source, StringSaver &Saver, SmallVectorImpl<const char *> &NewArgv, bool MarkEOLs = false); /// \brief String tokenization function type. Should be compatible with either /// Windows or Unix command line tokenizers. typedef void (*TokenizerCallback)(StringRef Source, StringSaver &Saver, SmallVectorImpl<const char *> &NewArgv, bool MarkEOLs); /// \brief Expand response files on a command line recursively using the given /// StringSaver and tokenization strategy. Argv should contain the command line /// before expansion and will be modified in place. If requested, Argv will /// also be populated with nullptrs indicating where each response file line /// ends, which is useful for the "/link" argument that needs to consume all /// remaining arguments only until the next end of line, when in a response /// file. /// /// \param [in] Saver Delegates back to the caller for saving parsed strings. /// \param [in] Tokenizer Tokenization strategy. Typically Unix or Windows. /// \param [in,out] Argv Command line into which to expand response files. /// \param [in] MarkEOLs Mark end of lines and the end of the response file /// with nullptrs in the Argv vector. /// \return true if all @files were expanded successfully or there were none. bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer, SmallVectorImpl<const char *> &Argv, bool MarkEOLs = false); /// \brief Mark all options not part of this category as cl::ReallyHidden. /// /// \param Category the category of options to keep displaying /// /// Some tools (like clang-format) like to be able to hide all options that are /// not specific to the tool. This function allows a tool to specify a single /// option category to display in the -help output. void HideUnrelatedOptions(cl::OptionCategory &Category); /// \brief Mark all options not part of the categories as cl::ReallyHidden. /// /// \param Categories the categories of options to keep displaying. /// /// Some tools (like clang-format) like to be able to hide all options that are /// not specific to the tool. This function allows a tool to specify a single /// option category to display in the -help output. void HideUnrelatedOptions(ArrayRef<const cl::OptionCategory *> Categories); } // End namespace cl } // End namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/GCOV.h
//===- GCOV.h - LLVM coverage tool ----------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This header provides the interface to read and write coverage files that // use 'gcov' format. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_GCOV_H #define LLVM_SUPPORT_GCOV_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/raw_ostream.h" namespace llvm { class GCOVFunction; class GCOVBlock; class FileInfo; namespace GCOV { enum GCOVVersion { V402, V404 }; } // end GCOV namespace /// GCOVOptions - A struct for passing gcov options between functions. struct GCOVOptions { GCOVOptions(bool A, bool B, bool C, bool F, bool P, bool U, bool L, bool N) : AllBlocks(A), BranchInfo(B), BranchCount(C), FuncCoverage(F), PreservePaths(P), UncondBranch(U), LongFileNames(L), NoOutput(N) {} bool AllBlocks; bool BranchInfo; bool BranchCount; bool FuncCoverage; bool PreservePaths; bool UncondBranch; bool LongFileNames; bool NoOutput; }; /// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific /// read operations. class GCOVBuffer { public: GCOVBuffer(MemoryBuffer *B) : Buffer(B), Cursor(0) {} /// readGCNOFormat - Check GCNO signature is valid at the beginning of buffer. bool readGCNOFormat() { StringRef File = Buffer->getBuffer().slice(0, 4); if (File != "oncg") { errs() << "Unexpected file type: " << File << ".\n"; return false; } Cursor = 4; return true; } /// readGCDAFormat - Check GCDA signature is valid at the beginning of buffer. bool readGCDAFormat() { StringRef File = Buffer->getBuffer().slice(0, 4); if (File != "adcg") { errs() << "Unexpected file type: " << File << ".\n"; return false; } Cursor = 4; return true; } /// readGCOVVersion - Read GCOV version. bool readGCOVVersion(GCOV::GCOVVersion &Version) { StringRef VersionStr = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (VersionStr == "*204") { Cursor += 4; Version = GCOV::V402; return true; } if (VersionStr == "*404") { Cursor += 4; Version = GCOV::V404; return true; } errs() << "Unexpected version: " << VersionStr << ".\n"; return false; } /// readFunctionTag - If cursor points to a function tag then increment the /// cursor and return true otherwise return false. bool readFunctionTag() { StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || Tag[3] != '\1') { return false; } Cursor += 4; return true; } /// readBlockTag - If cursor points to a block tag then increment the /// cursor and return true otherwise return false. bool readBlockTag() { StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x41' || Tag[3] != '\x01') { return false; } Cursor += 4; return true; } /// readEdgeTag - If cursor points to an edge tag then increment the /// cursor and return true otherwise return false. bool readEdgeTag() { StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x43' || Tag[3] != '\x01') { return false; } Cursor += 4; return true; } /// readLineTag - If cursor points to a line tag then increment the /// cursor and return true otherwise return false. bool readLineTag() { StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x45' || Tag[3] != '\x01') { return false; } Cursor += 4; return true; } /// readArcTag - If cursor points to an gcda arc tag then increment the /// cursor and return true otherwise return false. bool readArcTag() { StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\xa1' || Tag[3] != '\1') { return false; } Cursor += 4; return true; } /// readObjectTag - If cursor points to an object summary tag then increment /// the cursor and return true otherwise return false. bool readObjectTag() { StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || Tag[3] != '\xa1') { return false; } Cursor += 4; return true; } /// readProgramTag - If cursor points to a program summary tag then increment /// the cursor and return true otherwise return false. bool readProgramTag() { StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4); if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' || Tag[3] != '\xa3') { return false; } Cursor += 4; return true; } bool readInt(uint32_t &Val) { if (Buffer->getBuffer().size() < Cursor + 4) { errs() << "Unexpected end of memory buffer: " << Cursor + 4 << ".\n"; return false; } StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor + 4); Cursor += 4; Val = *(const uint32_t *)(Str.data()); return true; } bool readInt64(uint64_t &Val) { uint32_t Lo, Hi; if (!readInt(Lo) || !readInt(Hi)) return false; Val = ((uint64_t)Hi << 32) | Lo; return true; } bool readString(StringRef &Str) { uint32_t Len = 0; // Keep reading until we find a non-zero length. This emulates gcov's // behaviour, which appears to do the same. while (Len == 0) if (!readInt(Len)) return false; Len *= 4; if (Buffer->getBuffer().size() < Cursor + Len) { errs() << "Unexpected end of memory buffer: " << Cursor + Len << ".\n"; return false; } Str = Buffer->getBuffer().slice(Cursor, Cursor + Len).split('\0').first; Cursor += Len; return true; } uint64_t getCursor() const { return Cursor; } void advanceCursor(uint32_t n) { Cursor += n * 4; } private: MemoryBuffer *Buffer; uint64_t Cursor; }; /// GCOVFile - Collects coverage information for one pair of coverage file /// (.gcno and .gcda). class GCOVFile { public: GCOVFile() : GCNOInitialized(false), Checksum(0), Functions(), RunCount(0), ProgramCount(0) {} bool readGCNO(GCOVBuffer &Buffer); bool readGCDA(GCOVBuffer &Buffer); uint32_t getChecksum() const { return Checksum; } void dump() const; void collectLineCounts(FileInfo &FI); private: bool GCNOInitialized; GCOV::GCOVVersion Version; uint32_t Checksum; SmallVector<std::unique_ptr<GCOVFunction>, 16> Functions; uint32_t RunCount; uint32_t ProgramCount; }; /// GCOVEdge - Collects edge information. struct GCOVEdge { GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D), Count(0) {} GCOVBlock &Src; GCOVBlock &Dst; uint64_t Count; }; /// GCOVFunction - Collects function information. class GCOVFunction { public: typedef pointee_iterator<SmallVectorImpl< std::unique_ptr<GCOVBlock>>::const_iterator> BlockIterator; GCOVFunction(GCOVFile &P) : Parent(P), Ident(0), LineNumber(0) {} bool readGCNO(GCOVBuffer &Buffer, GCOV::GCOVVersion Version); bool readGCDA(GCOVBuffer &Buffer, GCOV::GCOVVersion Version); StringRef getName() const { return Name; } StringRef getFilename() const { return Filename; } size_t getNumBlocks() const { return Blocks.size(); } uint64_t getEntryCount() const; uint64_t getExitCount() const; BlockIterator block_begin() const { return Blocks.begin(); } BlockIterator block_end() const { return Blocks.end(); } iterator_range<BlockIterator> blocks() const { return make_range(block_begin(), block_end()); } void dump() const; void collectLineCounts(FileInfo &FI); private: GCOVFile &Parent; uint32_t Ident; uint32_t Checksum; uint32_t LineNumber; StringRef Name; StringRef Filename; SmallVector<std::unique_ptr<GCOVBlock>, 16> Blocks; SmallVector<std::unique_ptr<GCOVEdge>, 16> Edges; }; /// GCOVBlock - Collects block information. class GCOVBlock { struct EdgeWeight { EdgeWeight(GCOVBlock *D) : Dst(D), Count(0) {} GCOVBlock *Dst; uint64_t Count; }; struct SortDstEdgesFunctor { bool operator()(const GCOVEdge *E1, const GCOVEdge *E2) { return E1->Dst.Number < E2->Dst.Number; } }; public: typedef SmallVectorImpl<GCOVEdge *>::const_iterator EdgeIterator; GCOVBlock(GCOVFunction &P, uint32_t N) : Parent(P), Number(N), Counter(0), DstEdgesAreSorted(true), SrcEdges(), DstEdges(), Lines() {} ~GCOVBlock(); const GCOVFunction &getParent() const { return Parent; } void addLine(uint32_t N) { Lines.push_back(N); } uint32_t getLastLine() const { return Lines.back(); } void addCount(size_t DstEdgeNo, uint64_t N); uint64_t getCount() const { return Counter; } void addSrcEdge(GCOVEdge *Edge) { assert(&Edge->Dst == this); // up to caller to ensure edge is valid SrcEdges.push_back(Edge); } void addDstEdge(GCOVEdge *Edge) { assert(&Edge->Src == this); // up to caller to ensure edge is valid // Check if adding this edge causes list to become unsorted. if (DstEdges.size() && DstEdges.back()->Dst.Number > Edge->Dst.Number) DstEdgesAreSorted = false; DstEdges.push_back(Edge); } size_t getNumSrcEdges() const { return SrcEdges.size(); } size_t getNumDstEdges() const { return DstEdges.size(); } void sortDstEdges(); EdgeIterator src_begin() const { return SrcEdges.begin(); } EdgeIterator src_end() const { return SrcEdges.end(); } iterator_range<EdgeIterator> srcs() const { return make_range(src_begin(), src_end()); } EdgeIterator dst_begin() const { return DstEdges.begin(); } EdgeIterator dst_end() const { return DstEdges.end(); } iterator_range<EdgeIterator> dsts() const { return make_range(dst_begin(), dst_end()); } void dump() const; void collectLineCounts(FileInfo &FI); private: GCOVFunction &Parent; uint32_t Number; uint64_t Counter; bool DstEdgesAreSorted; SmallVector<GCOVEdge *, 16> SrcEdges; SmallVector<GCOVEdge *, 16> DstEdges; SmallVector<uint32_t, 16> Lines; }; class FileInfo { // It is unlikely--but possible--for multiple functions to be on the same // line. // Therefore this typedef allows LineData.Functions to store multiple // functions // per instance. This is rare, however, so optimize for the common case. typedef SmallVector<const GCOVFunction *, 1> FunctionVector; typedef DenseMap<uint32_t, FunctionVector> FunctionLines; typedef SmallVector<const GCOVBlock *, 4> BlockVector; typedef DenseMap<uint32_t, BlockVector> BlockLines; struct LineData { LineData() : LastLine(0) {} BlockLines Blocks; FunctionLines Functions; uint32_t LastLine; }; struct GCOVCoverage { GCOVCoverage(StringRef Name) : Name(Name), LogicalLines(0), LinesExec(0), Branches(0), BranchesExec(0), BranchesTaken(0) {} StringRef Name; uint32_t LogicalLines; uint32_t LinesExec; uint32_t Branches; uint32_t BranchesExec; uint32_t BranchesTaken; }; public: FileInfo(const GCOVOptions &Options) : Options(Options), LineInfo(), RunCount(0), ProgramCount(0) {} void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) { if (Line > LineInfo[Filename].LastLine) LineInfo[Filename].LastLine = Line; LineInfo[Filename].Blocks[Line - 1].push_back(Block); } void addFunctionLine(StringRef Filename, uint32_t Line, const GCOVFunction *Function) { if (Line > LineInfo[Filename].LastLine) LineInfo[Filename].LastLine = Line; LineInfo[Filename].Functions[Line - 1].push_back(Function); } void setRunCount(uint32_t Runs) { RunCount = Runs; } void setProgramCount(uint32_t Programs) { ProgramCount = Programs; } void print(raw_ostream &OS, StringRef MainFilename, StringRef GCNOFile, StringRef GCDAFile); private: std::string getCoveragePath(StringRef Filename, StringRef MainFilename); std::unique_ptr<raw_ostream> openCoveragePath(StringRef CoveragePath); void printFunctionSummary(raw_ostream &OS, const FunctionVector &Funcs) const; void printBlockInfo(raw_ostream &OS, const GCOVBlock &Block, uint32_t LineIndex, uint32_t &BlockNo) const; void printBranchInfo(raw_ostream &OS, const GCOVBlock &Block, GCOVCoverage &Coverage, uint32_t &EdgeNo); void printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo, uint64_t Count) const; void printCoverage(raw_ostream &OS, const GCOVCoverage &Coverage) const; void printFuncCoverage(raw_ostream &OS) const; void printFileCoverage(raw_ostream &OS) const; const GCOVOptions &Options; StringMap<LineData> LineInfo; uint32_t RunCount; uint32_t ProgramCount; typedef SmallVector<std::pair<std::string, GCOVCoverage>, 4> FileCoverageList; typedef MapVector<const GCOVFunction *, GCOVCoverage> FuncCoverageMap; FileCoverageList FileCoverages; FuncCoverageMap FuncCoverages; }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/ThreadLocal.h
//===- llvm/Support/ThreadLocal.h - Thread Local Data ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the llvm::sys::ThreadLocal class. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_THREADLOCAL_H #define LLVM_SUPPORT_THREADLOCAL_H #include "llvm/Support/DataTypes.h" #include "llvm/Support/Threading.h" #include <cassert> namespace llvm { namespace sys { // ThreadLocalImpl - Common base class of all ThreadLocal instantiations. // YOU SHOULD NEVER USE THIS DIRECTLY. class ThreadLocalImpl { typedef uint64_t ThreadLocalDataTy; /// \brief Platform-specific thread local data. /// /// This is embedded in the class and we avoid malloc'ing/free'ing it, /// to make this class more safe for use along with CrashRecoveryContext. union { char data[sizeof(ThreadLocalDataTy)]; ThreadLocalDataTy align_data; }; public: ThreadLocalImpl(); virtual ~ThreadLocalImpl(); void setInstance(const void* d); void *getInstance(); void removeInstance(); }; /// ThreadLocal - A class used to abstract thread-local storage. It holds, /// for each thread, a pointer a single object of type T. template<class T> class ThreadLocal : public ThreadLocalImpl { public: ThreadLocal() : ThreadLocalImpl() { } /// get - Fetches a pointer to the object associated with the current /// thread. If no object has yet been associated, it returns NULL; T* get() { return static_cast<T*>(getInstance()); } // set - Associates a pointer to an object with the current thread. void set(T* d) { setInstance(d); } // erase - Removes the pointer associated with the current thread. void erase() { removeInstance(); } }; } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/SwapByteOrder.h
//===- SwapByteOrder.h - Generic and optimized byte swaps -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares generic and optimized functions to swap the byte order of // an integral type. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_SWAPBYTEORDER_H #define LLVM_SUPPORT_SWAPBYTEORDER_H #include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" #include <cstddef> #include <limits> namespace llvm { namespace sys { /// SwapByteOrder_16 - This function returns a byte-swapped representation of /// the 16-bit argument. inline uint16_t SwapByteOrder_16(uint16_t value) { #if defined(_MSC_VER) && !defined(_DEBUG) // The DLL version of the runtime lacks these functions (bug!?), but in a // release build they're replaced with BSWAP instructions anyway. return _byteswap_ushort(value); #else uint16_t Hi = value << 8; uint16_t Lo = value >> 8; return Hi | Lo; #endif } /// SwapByteOrder_32 - This function returns a byte-swapped representation of /// the 32-bit argument. inline uint32_t SwapByteOrder_32(uint32_t value) { #if defined(__llvm__) || (LLVM_GNUC_PREREQ(4, 3, 0) && !defined(__ICC)) return __builtin_bswap32(value); #elif defined(_MSC_VER) && !defined(_DEBUG) return _byteswap_ulong(value); #else uint32_t Byte0 = value & 0x000000FF; uint32_t Byte1 = value & 0x0000FF00; uint32_t Byte2 = value & 0x00FF0000; uint32_t Byte3 = value & 0xFF000000; return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24); #endif } /// SwapByteOrder_64 - This function returns a byte-swapped representation of /// the 64-bit argument. inline uint64_t SwapByteOrder_64(uint64_t value) { #if defined(__llvm__) || (LLVM_GNUC_PREREQ(4, 3, 0) && !defined(__ICC)) return __builtin_bswap64(value); #elif defined(_MSC_VER) && !defined(_DEBUG) return _byteswap_uint64(value); #else uint64_t Hi = SwapByteOrder_32(uint32_t(value)); uint32_t Lo = SwapByteOrder_32(uint32_t(value >> 32)); return (Hi << 32) | Lo; #endif } inline unsigned char getSwappedBytes(unsigned char C) { return C; } inline signed char getSwappedBytes(signed char C) { return C; } inline char getSwappedBytes(char C) { return C; } inline unsigned short getSwappedBytes(unsigned short C) { return SwapByteOrder_16(C); } inline signed short getSwappedBytes( signed short C) { return SwapByteOrder_16(C); } inline unsigned int getSwappedBytes(unsigned int C) { return SwapByteOrder_32(C); } inline signed int getSwappedBytes( signed int C) { return SwapByteOrder_32(C); } #if __LONG_MAX__ == __INT_MAX__ inline unsigned long getSwappedBytes(unsigned long C) { return SwapByteOrder_32(C); } inline signed long getSwappedBytes( signed long C) { return SwapByteOrder_32(C); } #elif __LONG_MAX__ == __LONG_LONG_MAX__ inline unsigned long getSwappedBytes(unsigned long C) { return SwapByteOrder_64(C); } inline signed long getSwappedBytes( signed long C) { return SwapByteOrder_64(C); } #else #error "Unknown long size!" #endif inline unsigned long long getSwappedBytes(unsigned long long C) { return SwapByteOrder_64(C); } inline signed long long getSwappedBytes(signed long long C) { return SwapByteOrder_64(C); } inline float getSwappedBytes(float C) { union { uint32_t i; float f; } in, out; in.f = C; out.i = SwapByteOrder_32(in.i); return out.f; } inline double getSwappedBytes(double C) { union { uint64_t i; double d; } in, out; in.d = C; out.i = SwapByteOrder_64(in.i); return out.d; } template<typename T> inline void swapByteOrder(T &Value) { Value = getSwappedBytes(Value); } } // end namespace sys } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/FormattedStream.h
//===-- llvm/Support/FormattedStream.h - Formatted streams ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains raw_ostream implementations for streams to do // things like pretty-print comments. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_FORMATTEDSTREAM_H #define LLVM_SUPPORT_FORMATTEDSTREAM_H #include "llvm/Support/raw_ostream.h" #include <utility> namespace llvm { /// formatted_raw_ostream - A raw_ostream that wraps another one and keeps track /// of line and column position, allowing padding out to specific column /// boundaries and querying the number of lines written to the stream. /// class formatted_raw_ostream : public raw_ostream { /// TheStream - The real stream we output to. We set it to be /// unbuffered, since we're already doing our own buffering. /// raw_ostream *TheStream; /// Position - The current output column and line of the data that's /// been flushed and the portion of the buffer that's been /// scanned. The line and column scheme is zero-based. /// std::pair<unsigned, unsigned> Position; /// Scanned - This points to one past the last character in the /// buffer we've scanned. /// const char *Scanned; void write_impl(const char *Ptr, size_t Size) override; /// current_pos - Return the current position within the stream, /// not counting the bytes currently in the buffer. uint64_t current_pos() const override { // Our current position in the stream is all the contents which have been // written to the underlying stream (*not* the current position of the // underlying stream). return TheStream->tell(); } /// ComputePosition - Examine the given output buffer and figure out the new /// position after output. /// void ComputePosition(const char *Ptr, size_t size); void setStream(raw_ostream &Stream) { releaseStream(); TheStream = &Stream; // This formatted_raw_ostream inherits from raw_ostream, so it'll do its // own buffering, and it doesn't need or want TheStream to do another // layer of buffering underneath. Resize the buffer to what TheStream // had been using, and tell TheStream not to do its own buffering. if (size_t BufferSize = TheStream->GetBufferSize()) SetBufferSize(BufferSize); else SetUnbuffered(); TheStream->SetUnbuffered(); Scanned = nullptr; } public: /// formatted_raw_ostream - Open the specified file for /// writing. If an error occurs, information about the error is /// put into ErrorInfo, and the stream should be immediately /// destroyed; the string will be empty if no error occurred. /// /// As a side effect, the given Stream is set to be Unbuffered. /// This is because formatted_raw_ostream does its own buffering, /// so it doesn't want another layer of buffering to be happening /// underneath it. /// formatted_raw_ostream(raw_ostream &Stream) : TheStream(nullptr), Position(0, 0) { setStream(Stream); } explicit formatted_raw_ostream() : TheStream(nullptr), Position(0, 0) { Scanned = nullptr; } ~formatted_raw_ostream() override { flush(); releaseStream(); } /// PadToColumn - Align the output to some column number. If the current /// column is already equal to or more than NewCol, PadToColumn inserts one /// space. /// /// \param NewCol - The column to move to. formatted_raw_ostream &PadToColumn(unsigned NewCol); /// getColumn - Return the column number unsigned getColumn() { return Position.first; } /// getLine - Return the line number unsigned getLine() { return Position.second; } raw_ostream &resetColor() override { TheStream->resetColor(); return *this; } raw_ostream &reverseColor() override { TheStream->reverseColor(); return *this; } raw_ostream &changeColor(enum Colors Color, bool Bold, bool BG) override { TheStream->changeColor(Color, Bold, BG); return *this; } bool is_displayed() const override { return TheStream->is_displayed(); } private: void releaseStream() { // Transfer the buffer settings from this raw_ostream back to the underlying // stream. if (!TheStream) return; if (size_t BufferSize = GetBufferSize()) TheStream->SetBufferSize(BufferSize); else TheStream->SetUnbuffered(); } }; /// fouts() - This returns a reference to a formatted_raw_ostream for /// standard output. Use it like: fouts() << "foo" << "bar"; formatted_raw_ostream &fouts(); /// ferrs() - This returns a reference to a formatted_raw_ostream for /// standard error. Use it like: ferrs() << "foo" << "bar"; formatted_raw_ostream &ferrs(); /// fdbgs() - This returns a reference to a formatted_raw_ostream for /// debug output. Use it like: fdbgs() << "foo" << "bar"; formatted_raw_ostream &fdbgs(); } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/COFF.h
//===-- llvm/Support/COFF.h -------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains an definitions used in Windows COFF Files. // // Structures and enums defined within this file where created using // information from Microsoft's publicly available PE/COFF format document: // // Microsoft Portable Executable and Common Object File Format Specification // Revision 8.1 - February 15, 2008 // // As of 5/2/2010, hosted by Microsoft at: // http://www.microsoft.com/whdc/system/platform/firmware/pecoff.mspx // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_COFF_H #define LLVM_SUPPORT_COFF_H #include "llvm/Support/DataTypes.h" #include <cassert> #include <cstring> namespace llvm { namespace COFF { // The maximum number of sections that a COFF object can have (inclusive). const int32_t MaxNumberOfSections16 = 65279; // The PE signature bytes that follows the DOS stub header. static const char PEMagic[] = { 'P', 'E', '\0', '\0' }; static const char BigObjMagic[] = { '\xc7', '\xa1', '\xba', '\xd1', '\xee', '\xba', '\xa9', '\x4b', '\xaf', '\x20', '\xfa', '\xf6', '\x6a', '\xa4', '\xdc', '\xb8', }; // Sizes in bytes of various things in the COFF format. enum { Header16Size = 20, Header32Size = 56, NameSize = 8, Symbol16Size = 18, Symbol32Size = 20, SectionSize = 40, RelocationSize = 10 }; struct header { uint16_t Machine; int32_t NumberOfSections; uint32_t TimeDateStamp; uint32_t PointerToSymbolTable; uint32_t NumberOfSymbols; uint16_t SizeOfOptionalHeader; uint16_t Characteristics; }; struct BigObjHeader { enum : uint16_t { MinBigObjectVersion = 2 }; uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0). uint16_t Sig2; ///< Must be 0xFFFF. uint16_t Version; uint16_t Machine; uint32_t TimeDateStamp; uint8_t UUID[16]; uint32_t unused1; uint32_t unused2; uint32_t unused3; uint32_t unused4; uint32_t NumberOfSections; uint32_t PointerToSymbolTable; uint32_t NumberOfSymbols; }; enum MachineTypes { MT_Invalid = 0xffff, IMAGE_FILE_MACHINE_UNKNOWN = 0x0, IMAGE_FILE_MACHINE_AM33 = 0x13, IMAGE_FILE_MACHINE_AMD64 = 0x8664, IMAGE_FILE_MACHINE_ARM = 0x1C0, IMAGE_FILE_MACHINE_ARMNT = 0x1C4, IMAGE_FILE_MACHINE_EBC = 0xEBC, IMAGE_FILE_MACHINE_I386 = 0x14C, IMAGE_FILE_MACHINE_IA64 = 0x200, IMAGE_FILE_MACHINE_M32R = 0x9041, IMAGE_FILE_MACHINE_MIPS16 = 0x266, IMAGE_FILE_MACHINE_MIPSFPU = 0x366, IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466, IMAGE_FILE_MACHINE_POWERPC = 0x1F0, IMAGE_FILE_MACHINE_POWERPCFP = 0x1F1, IMAGE_FILE_MACHINE_R4000 = 0x166, IMAGE_FILE_MACHINE_SH3 = 0x1A2, IMAGE_FILE_MACHINE_SH3DSP = 0x1A3, IMAGE_FILE_MACHINE_SH4 = 0x1A6, IMAGE_FILE_MACHINE_SH5 = 0x1A8, IMAGE_FILE_MACHINE_THUMB = 0x1C2, IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169 }; enum Characteristics { C_Invalid = 0, /// The file does not contain base relocations and must be loaded at its /// preferred base. If this cannot be done, the loader will error. IMAGE_FILE_RELOCS_STRIPPED = 0x0001, /// The file is valid and can be run. IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002, /// COFF line numbers have been stripped. This is deprecated and should be /// 0. IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004, /// COFF symbol table entries for local symbols have been removed. This is /// deprecated and should be 0. IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008, /// Aggressively trim working set. This is deprecated and must be 0. IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010, /// Image can handle > 2GiB addresses. IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020, /// Little endian: the LSB precedes the MSB in memory. This is deprecated /// and should be 0. IMAGE_FILE_BYTES_REVERSED_LO = 0x0080, /// Machine is based on a 32bit word architecture. IMAGE_FILE_32BIT_MACHINE = 0x0100, /// Debugging info has been removed. IMAGE_FILE_DEBUG_STRIPPED = 0x0200, /// If the image is on removable media, fully load it and copy it to swap. IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400, /// If the image is on network media, fully load it and copy it to swap. IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800, /// The image file is a system file, not a user program. IMAGE_FILE_SYSTEM = 0x1000, /// The image file is a DLL. IMAGE_FILE_DLL = 0x2000, /// This file should only be run on a uniprocessor machine. IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000, /// Big endian: the MSB precedes the LSB in memory. This is deprecated /// and should be 0. IMAGE_FILE_BYTES_REVERSED_HI = 0x8000 }; struct symbol { char Name[NameSize]; uint32_t Value; int32_t SectionNumber; uint16_t Type; uint8_t StorageClass; uint8_t NumberOfAuxSymbols; }; enum SymbolSectionNumber : int32_t { IMAGE_SYM_DEBUG = -2, IMAGE_SYM_ABSOLUTE = -1, IMAGE_SYM_UNDEFINED = 0 }; /// Storage class tells where and what the symbol represents enum SymbolStorageClass { SSC_Invalid = 0xff, IMAGE_SYM_CLASS_END_OF_FUNCTION = -1, ///< Physical end of function IMAGE_SYM_CLASS_NULL = 0, ///< No symbol IMAGE_SYM_CLASS_AUTOMATIC = 1, ///< Stack variable IMAGE_SYM_CLASS_EXTERNAL = 2, ///< External symbol IMAGE_SYM_CLASS_STATIC = 3, ///< Static IMAGE_SYM_CLASS_REGISTER = 4, ///< Register variable IMAGE_SYM_CLASS_EXTERNAL_DEF = 5, ///< External definition IMAGE_SYM_CLASS_LABEL = 6, ///< Label IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7, ///< Undefined label IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8, ///< Member of structure IMAGE_SYM_CLASS_ARGUMENT = 9, ///< Function argument IMAGE_SYM_CLASS_STRUCT_TAG = 10, ///< Structure tag IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11, ///< Member of union IMAGE_SYM_CLASS_UNION_TAG = 12, ///< Union tag IMAGE_SYM_CLASS_TYPE_DEFINITION = 13, ///< Type definition IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14, ///< Undefined static IMAGE_SYM_CLASS_ENUM_TAG = 15, ///< Enumeration tag IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16, ///< Member of enumeration IMAGE_SYM_CLASS_REGISTER_PARAM = 17, ///< Register parameter IMAGE_SYM_CLASS_BIT_FIELD = 18, ///< Bit field /// ".bb" or ".eb" - beginning or end of block IMAGE_SYM_CLASS_BLOCK = 100, /// ".bf" or ".ef" - beginning or end of function IMAGE_SYM_CLASS_FUNCTION = 101, IMAGE_SYM_CLASS_END_OF_STRUCT = 102, ///< End of structure IMAGE_SYM_CLASS_FILE = 103, ///< File name /// Line number, reformatted as symbol IMAGE_SYM_CLASS_SECTION = 104, IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105, ///< Duplicate tag /// External symbol in dmert public lib IMAGE_SYM_CLASS_CLR_TOKEN = 107 }; enum SymbolBaseType { IMAGE_SYM_TYPE_NULL = 0, ///< No type information or unknown base type. IMAGE_SYM_TYPE_VOID = 1, ///< Used with void pointers and functions. IMAGE_SYM_TYPE_CHAR = 2, ///< A character (signed byte). IMAGE_SYM_TYPE_SHORT = 3, ///< A 2-byte signed integer. IMAGE_SYM_TYPE_INT = 4, ///< A natural integer type on the target. IMAGE_SYM_TYPE_LONG = 5, ///< A 4-byte signed integer. IMAGE_SYM_TYPE_FLOAT = 6, ///< A 4-byte floating-point number. IMAGE_SYM_TYPE_DOUBLE = 7, ///< An 8-byte floating-point number. IMAGE_SYM_TYPE_STRUCT = 8, ///< A structure. IMAGE_SYM_TYPE_UNION = 9, ///< An union. IMAGE_SYM_TYPE_ENUM = 10, ///< An enumerated type. IMAGE_SYM_TYPE_MOE = 11, ///< A member of enumeration (a specific value). IMAGE_SYM_TYPE_BYTE = 12, ///< A byte; unsigned 1-byte integer. IMAGE_SYM_TYPE_WORD = 13, ///< A word; unsigned 2-byte integer. IMAGE_SYM_TYPE_UINT = 14, ///< An unsigned integer of natural size. IMAGE_SYM_TYPE_DWORD = 15 ///< An unsigned 4-byte integer. }; enum SymbolComplexType { IMAGE_SYM_DTYPE_NULL = 0, ///< No complex type; simple scalar variable. IMAGE_SYM_DTYPE_POINTER = 1, ///< A pointer to base type. IMAGE_SYM_DTYPE_FUNCTION = 2, ///< A function that returns a base type. IMAGE_SYM_DTYPE_ARRAY = 3, ///< An array of base type. /// Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT)) SCT_COMPLEX_TYPE_SHIFT = 4 }; enum AuxSymbolType { IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF = 1 }; struct section { char Name[NameSize]; uint32_t VirtualSize; uint32_t VirtualAddress; uint32_t SizeOfRawData; uint32_t PointerToRawData; uint32_t PointerToRelocations; uint32_t PointerToLineNumbers; uint16_t NumberOfRelocations; uint16_t NumberOfLineNumbers; uint32_t Characteristics; }; enum SectionCharacteristics : uint32_t { SC_Invalid = 0xffffffff, IMAGE_SCN_TYPE_NO_PAD = 0x00000008, IMAGE_SCN_CNT_CODE = 0x00000020, IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040, IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080, IMAGE_SCN_LNK_OTHER = 0x00000100, IMAGE_SCN_LNK_INFO = 0x00000200, IMAGE_SCN_LNK_REMOVE = 0x00000800, IMAGE_SCN_LNK_COMDAT = 0x00001000, IMAGE_SCN_GPREL = 0x00008000, IMAGE_SCN_MEM_PURGEABLE = 0x00020000, IMAGE_SCN_MEM_16BIT = 0x00020000, IMAGE_SCN_MEM_LOCKED = 0x00040000, IMAGE_SCN_MEM_PRELOAD = 0x00080000, IMAGE_SCN_ALIGN_1BYTES = 0x00100000, IMAGE_SCN_ALIGN_2BYTES = 0x00200000, IMAGE_SCN_ALIGN_4BYTES = 0x00300000, IMAGE_SCN_ALIGN_8BYTES = 0x00400000, IMAGE_SCN_ALIGN_16BYTES = 0x00500000, IMAGE_SCN_ALIGN_32BYTES = 0x00600000, IMAGE_SCN_ALIGN_64BYTES = 0x00700000, IMAGE_SCN_ALIGN_128BYTES = 0x00800000, IMAGE_SCN_ALIGN_256BYTES = 0x00900000, IMAGE_SCN_ALIGN_512BYTES = 0x00A00000, IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000, IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000, IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000, IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000, IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000, IMAGE_SCN_MEM_DISCARDABLE = 0x02000000, IMAGE_SCN_MEM_NOT_CACHED = 0x04000000, IMAGE_SCN_MEM_NOT_PAGED = 0x08000000, IMAGE_SCN_MEM_SHARED = 0x10000000, IMAGE_SCN_MEM_EXECUTE = 0x20000000, IMAGE_SCN_MEM_READ = 0x40000000, IMAGE_SCN_MEM_WRITE = 0x80000000 }; struct relocation { uint32_t VirtualAddress; uint32_t SymbolTableIndex; uint16_t Type; }; enum RelocationTypeI386 { IMAGE_REL_I386_ABSOLUTE = 0x0000, IMAGE_REL_I386_DIR16 = 0x0001, IMAGE_REL_I386_REL16 = 0x0002, IMAGE_REL_I386_DIR32 = 0x0006, IMAGE_REL_I386_DIR32NB = 0x0007, IMAGE_REL_I386_SEG12 = 0x0009, IMAGE_REL_I386_SECTION = 0x000A, IMAGE_REL_I386_SECREL = 0x000B, IMAGE_REL_I386_TOKEN = 0x000C, IMAGE_REL_I386_SECREL7 = 0x000D, IMAGE_REL_I386_REL32 = 0x0014 }; enum RelocationTypeAMD64 { IMAGE_REL_AMD64_ABSOLUTE = 0x0000, IMAGE_REL_AMD64_ADDR64 = 0x0001, IMAGE_REL_AMD64_ADDR32 = 0x0002, IMAGE_REL_AMD64_ADDR32NB = 0x0003, IMAGE_REL_AMD64_REL32 = 0x0004, IMAGE_REL_AMD64_REL32_1 = 0x0005, IMAGE_REL_AMD64_REL32_2 = 0x0006, IMAGE_REL_AMD64_REL32_3 = 0x0007, IMAGE_REL_AMD64_REL32_4 = 0x0008, IMAGE_REL_AMD64_REL32_5 = 0x0009, IMAGE_REL_AMD64_SECTION = 0x000A, IMAGE_REL_AMD64_SECREL = 0x000B, IMAGE_REL_AMD64_SECREL7 = 0x000C, IMAGE_REL_AMD64_TOKEN = 0x000D, IMAGE_REL_AMD64_SREL32 = 0x000E, IMAGE_REL_AMD64_PAIR = 0x000F, IMAGE_REL_AMD64_SSPAN32 = 0x0010 }; enum RelocationTypesARM { IMAGE_REL_ARM_ABSOLUTE = 0x0000, IMAGE_REL_ARM_ADDR32 = 0x0001, IMAGE_REL_ARM_ADDR32NB = 0x0002, IMAGE_REL_ARM_BRANCH24 = 0x0003, IMAGE_REL_ARM_BRANCH11 = 0x0004, IMAGE_REL_ARM_TOKEN = 0x0005, IMAGE_REL_ARM_BLX24 = 0x0008, IMAGE_REL_ARM_BLX11 = 0x0009, IMAGE_REL_ARM_SECTION = 0x000E, IMAGE_REL_ARM_SECREL = 0x000F, IMAGE_REL_ARM_MOV32A = 0x0010, IMAGE_REL_ARM_MOV32T = 0x0011, IMAGE_REL_ARM_BRANCH20T = 0x0012, IMAGE_REL_ARM_BRANCH24T = 0x0014, IMAGE_REL_ARM_BLX23T = 0x0015 }; enum COMDATType { IMAGE_COMDAT_SELECT_NODUPLICATES = 1, IMAGE_COMDAT_SELECT_ANY, IMAGE_COMDAT_SELECT_SAME_SIZE, IMAGE_COMDAT_SELECT_EXACT_MATCH, IMAGE_COMDAT_SELECT_ASSOCIATIVE, IMAGE_COMDAT_SELECT_LARGEST, IMAGE_COMDAT_SELECT_NEWEST }; // Auxiliary Symbol Formats struct AuxiliaryFunctionDefinition { uint32_t TagIndex; uint32_t TotalSize; uint32_t PointerToLinenumber; uint32_t PointerToNextFunction; char unused[2]; }; struct AuxiliarybfAndefSymbol { uint8_t unused1[4]; uint16_t Linenumber; uint8_t unused2[6]; uint32_t PointerToNextFunction; uint8_t unused3[2]; }; struct AuxiliaryWeakExternal { uint32_t TagIndex; uint32_t Characteristics; uint8_t unused[10]; }; /// These are not documented in the spec, but are located in WinNT.h. enum WeakExternalCharacteristics { IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1, IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2, IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3 }; struct AuxiliarySectionDefinition { uint32_t Length; uint16_t NumberOfRelocations; uint16_t NumberOfLinenumbers; uint32_t CheckSum; uint32_t Number; uint8_t Selection; char unused; }; struct AuxiliaryCLRToken { uint8_t AuxType; uint8_t unused1; uint32_t SymbolTableIndex; char unused2[12]; }; union Auxiliary { AuxiliaryFunctionDefinition FunctionDefinition; AuxiliarybfAndefSymbol bfAndefSymbol; AuxiliaryWeakExternal WeakExternal; AuxiliarySectionDefinition SectionDefinition; }; /// @brief The Import Directory Table. /// /// There is a single array of these and one entry per imported DLL. struct ImportDirectoryTableEntry { uint32_t ImportLookupTableRVA; uint32_t TimeDateStamp; uint32_t ForwarderChain; uint32_t NameRVA; uint32_t ImportAddressTableRVA; }; /// @brief The PE32 Import Lookup Table. /// /// There is an array of these for each imported DLL. It represents either /// the ordinal to import from the target DLL, or a name to lookup and import /// from the target DLL. /// /// This also happens to be the same format used by the Import Address Table /// when it is initially written out to the image. struct ImportLookupTableEntry32 { uint32_t data; /// @brief Is this entry specified by ordinal, or name? bool isOrdinal() const { return data & 0x80000000; } /// @brief Get the ordinal value of this entry. isOrdinal must be true. uint16_t getOrdinal() const { assert(isOrdinal() && "ILT entry is not an ordinal!"); return data & 0xFFFF; } /// @brief Set the ordinal value and set isOrdinal to true. void setOrdinal(uint16_t o) { data = o; data |= 0x80000000; } /// @brief Get the Hint/Name entry RVA. isOrdinal must be false. uint32_t getHintNameRVA() const { assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!"); return data; } /// @brief Set the Hint/Name entry RVA and set isOrdinal to false. void setHintNameRVA(uint32_t rva) { data = rva; } }; /// @brief The DOS compatible header at the front of all PEs. struct DOSHeader { uint16_t Magic; uint16_t UsedBytesInTheLastPage; uint16_t FileSizeInPages; uint16_t NumberOfRelocationItems; uint16_t HeaderSizeInParagraphs; uint16_t MinimumExtraParagraphs; uint16_t MaximumExtraParagraphs; uint16_t InitialRelativeSS; uint16_t InitialSP; uint16_t Checksum; uint16_t InitialIP; uint16_t InitialRelativeCS; uint16_t AddressOfRelocationTable; uint16_t OverlayNumber; uint16_t Reserved[4]; uint16_t OEMid; uint16_t OEMinfo; uint16_t Reserved2[10]; uint32_t AddressOfNewExeHeader; }; struct PE32Header { enum { PE32 = 0x10b, PE32_PLUS = 0x20b }; uint16_t Magic; uint8_t MajorLinkerVersion; uint8_t MinorLinkerVersion; uint32_t SizeOfCode; uint32_t SizeOfInitializedData; uint32_t SizeOfUninitializedData; uint32_t AddressOfEntryPoint; // RVA uint32_t BaseOfCode; // RVA uint32_t BaseOfData; // RVA uint32_t ImageBase; uint32_t SectionAlignment; uint32_t FileAlignment; uint16_t MajorOperatingSystemVersion; uint16_t MinorOperatingSystemVersion; uint16_t MajorImageVersion; uint16_t MinorImageVersion; uint16_t MajorSubsystemVersion; uint16_t MinorSubsystemVersion; uint32_t Win32VersionValue; uint32_t SizeOfImage; uint32_t SizeOfHeaders; uint32_t CheckSum; uint16_t Subsystem; // FIXME: This should be DllCharacteristics to match the COFF spec. uint16_t DLLCharacteristics; uint32_t SizeOfStackReserve; uint32_t SizeOfStackCommit; uint32_t SizeOfHeapReserve; uint32_t SizeOfHeapCommit; uint32_t LoaderFlags; // FIXME: This should be NumberOfRvaAndSizes to match the COFF spec. uint32_t NumberOfRvaAndSize; }; struct DataDirectory { uint32_t RelativeVirtualAddress; uint32_t Size; }; enum DataDirectoryIndex { EXPORT_TABLE = 0, IMPORT_TABLE, RESOURCE_TABLE, EXCEPTION_TABLE, CERTIFICATE_TABLE, BASE_RELOCATION_TABLE, DEBUG, ARCHITECTURE, GLOBAL_PTR, TLS_TABLE, LOAD_CONFIG_TABLE, BOUND_IMPORT, IAT, DELAY_IMPORT_DESCRIPTOR, CLR_RUNTIME_HEADER, NUM_DATA_DIRECTORIES }; enum WindowsSubsystem { IMAGE_SUBSYSTEM_UNKNOWN = 0, ///< An unknown subsystem. IMAGE_SUBSYSTEM_NATIVE = 1, ///< Device drivers and native Windows processes IMAGE_SUBSYSTEM_WINDOWS_GUI = 2, ///< The Windows GUI subsystem. IMAGE_SUBSYSTEM_WINDOWS_CUI = 3, ///< The Windows character subsystem. IMAGE_SUBSYSTEM_OS2_CUI = 5, ///< The OS/2 character subsytem. IMAGE_SUBSYSTEM_POSIX_CUI = 7, ///< The POSIX character subsystem. IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8, ///< Native Windows 9x driver. IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9, ///< Windows CE. IMAGE_SUBSYSTEM_EFI_APPLICATION = 10, ///< An EFI application. IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11, ///< An EFI driver with boot /// services. IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12, ///< An EFI driver with run-time /// services. IMAGE_SUBSYSTEM_EFI_ROM = 13, ///< An EFI ROM image. IMAGE_SUBSYSTEM_XBOX = 14, ///< XBOX. IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16 ///< A BCD application. }; enum DLLCharacteristics { /// ASLR with 64 bit address space. IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020, /// DLL can be relocated at load time. IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040, /// Code integrity checks are enforced. IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY = 0x0080, ///< Image is NX compatible. IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100, /// Isolation aware, but do not isolate the image. IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION = 0x0200, /// Does not use structured exception handling (SEH). No SEH handler may be /// called in this image. IMAGE_DLL_CHARACTERISTICS_NO_SEH = 0x0400, /// Do not bind the image. IMAGE_DLL_CHARACTERISTICS_NO_BIND = 0x0800, ///< Image should execute in an AppContainer. IMAGE_DLL_CHARACTERISTICS_APPCONTAINER = 0x1000, ///< A WDM driver. IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER = 0x2000, ///< Image supports Control Flow Guard. IMAGE_DLL_CHARACTERISTICS_GUARD_CF = 0x4000, /// Terminal Server aware. IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000 }; enum DebugType { IMAGE_DEBUG_TYPE_UNKNOWN = 0, IMAGE_DEBUG_TYPE_COFF = 1, IMAGE_DEBUG_TYPE_CODEVIEW = 2, IMAGE_DEBUG_TYPE_FPO = 3, IMAGE_DEBUG_TYPE_MISC = 4, IMAGE_DEBUG_TYPE_EXCEPTION = 5, IMAGE_DEBUG_TYPE_FIXUP = 6, IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7, IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8, IMAGE_DEBUG_TYPE_BORLAND = 9, IMAGE_DEBUG_TYPE_CLSID = 11 }; enum BaseRelocationType { IMAGE_REL_BASED_ABSOLUTE = 0, IMAGE_REL_BASED_HIGH = 1, IMAGE_REL_BASED_LOW = 2, IMAGE_REL_BASED_HIGHLOW = 3, IMAGE_REL_BASED_HIGHADJ = 4, IMAGE_REL_BASED_MIPS_JMPADDR = 5, IMAGE_REL_BASED_ARM_MOV32A = 5, IMAGE_REL_BASED_ARM_MOV32T = 7, IMAGE_REL_BASED_MIPS_JMPADDR16 = 9, IMAGE_REL_BASED_DIR64 = 10 }; enum ImportType { IMPORT_CODE = 0, IMPORT_DATA = 1, IMPORT_CONST = 2 }; enum ImportNameType { /// Import is by ordinal. This indicates that the value in the Ordinal/Hint /// field of the import header is the import's ordinal. If this constant is /// not specified, then the Ordinal/Hint field should always be interpreted /// as the import's hint. IMPORT_ORDINAL = 0, /// The import name is identical to the public symbol name IMPORT_NAME = 1, /// The import name is the public symbol name, but skipping the leading ?, /// @, or optionally _. IMPORT_NAME_NOPREFIX = 2, /// The import name is the public symbol name, but skipping the leading ?, /// @, or optionally _, and truncating at the first @. IMPORT_NAME_UNDECORATE = 3 }; struct ImportHeader { uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0). uint16_t Sig2; ///< Must be 0xFFFF. uint16_t Version; uint16_t Machine; uint32_t TimeDateStamp; uint32_t SizeOfData; uint16_t OrdinalHint; uint16_t TypeInfo; ImportType getType() const { return static_cast<ImportType>(TypeInfo & 0x3); } ImportNameType getNameType() const { return static_cast<ImportNameType>((TypeInfo & 0x1C) >> 3); } }; enum CodeViewIdentifiers { DEBUG_LINE_TABLES_HAVE_COLUMN_RECORDS = 0x1, DEBUG_SECTION_MAGIC = 0x4, DEBUG_SYMBOL_SUBSECTION = 0xF1, DEBUG_LINE_TABLE_SUBSECTION = 0xF2, DEBUG_STRING_TABLE_SUBSECTION = 0xF3, DEBUG_INDEX_SUBSECTION = 0xF4, // Symbol subsections are split into records of different types. DEBUG_SYMBOL_TYPE_PROC_START = 0x1147, DEBUG_SYMBOL_TYPE_PROC_END = 0x114F }; inline bool isReservedSectionNumber(int32_t SectionNumber) { return SectionNumber <= 0; } } // End namespace COFF. } // End namespace llvm. #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/ArrayRecycler.h
//==- llvm/Support/ArrayRecycler.h - Recycling of Arrays ---------*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ArrayRecycler class template which can recycle small // arrays allocated from one of the allocators in Allocator.h // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ARRAYRECYCLER_H #define LLVM_SUPPORT_ARRAYRECYCLER_H #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/MathExtras.h" namespace llvm { /// Recycle small arrays allocated from a BumpPtrAllocator. /// /// Arrays are allocated in a small number of fixed sizes. For each supported /// array size, the ArrayRecycler keeps a free list of available arrays. /// template<class T, size_t Align = AlignOf<T>::Alignment> class ArrayRecycler { // The free list for a given array size is a simple singly linked list. // We can't use iplist or Recycler here since those classes can't be copied. struct FreeList { FreeList *Next; }; static_assert(Align >= AlignOf<FreeList>::Alignment, "Object underaligned"); static_assert(sizeof(T) >= sizeof(FreeList), "Objects are too small"); // Keep a free list for each array size. SmallVector<FreeList*, 8> Bucket; // Remove an entry from the free list in Bucket[Idx] and return it. // Return NULL if no entries are available. T *pop(unsigned Idx) { if (Idx >= Bucket.size()) return nullptr; FreeList *Entry = Bucket[Idx]; if (!Entry) return nullptr; Bucket[Idx] = Entry->Next; return reinterpret_cast<T*>(Entry); } // Add an entry to the free list at Bucket[Idx]. void push(unsigned Idx, T *Ptr) { assert(Ptr && "Cannot recycle NULL pointer"); FreeList *Entry = reinterpret_cast<FreeList*>(Ptr); if (Idx >= Bucket.size()) Bucket.resize(size_t(Idx) + 1); Entry->Next = Bucket[Idx]; Bucket[Idx] = Entry; } public: /// The size of an allocated array is represented by a Capacity instance. /// /// This class is much smaller than a size_t, and it provides methods to work /// with the set of legal array capacities. class Capacity { uint8_t Index; explicit Capacity(uint8_t idx) : Index(idx) {} public: Capacity() : Index(0) {} /// Get the capacity of an array that can hold at least N elements. static Capacity get(size_t N) { return Capacity(N ? Log2_64_Ceil(N) : 0); } /// Get the number of elements in an array with this capacity. size_t getSize() const { return size_t(1u) << Index; } /// Get the bucket number for this capacity. unsigned getBucket() const { return Index; } /// Get the next larger capacity. Large capacities grow exponentially, so /// this function can be used to reallocate incrementally growing vectors /// in amortized linear time. Capacity getNext() const { return Capacity(Index + 1); } }; ~ArrayRecycler() { // The client should always call clear() so recycled arrays can be returned // to the allocator. assert(Bucket.empty() && "Non-empty ArrayRecycler deleted!"); } /// Release all the tracked allocations to the allocator. The recycler must /// be free of any tracked allocations before being deleted. template<class AllocatorType> void clear(AllocatorType &Allocator) { for (; !Bucket.empty(); Bucket.pop_back()) while (T *Ptr = pop(Bucket.size() - 1)) Allocator.Deallocate(Ptr); } /// Special case for BumpPtrAllocator which has an empty Deallocate() /// function. /// /// There is no need to traverse the free lists, pulling all the objects into /// cache. void clear(BumpPtrAllocator&) { Bucket.clear(); } /// Allocate an array of at least the requested capacity. /// /// Return an existing recycled array, or allocate one from Allocator if /// none are available for recycling. /// template<class AllocatorType> T *allocate(Capacity Cap, AllocatorType &Allocator) { // Try to recycle an existing array. if (T *Ptr = pop(Cap.getBucket())) return Ptr; // Nope, get more memory. return static_cast<T*>(Allocator.Allocate(sizeof(T)*Cap.getSize(), Align)); } /// Deallocate an array with the specified Capacity. /// /// Cap must be the same capacity that was given to allocate(). /// void deallocate(Capacity Cap, T *Ptr) { push(Cap.getBucket(), Ptr); } }; } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Threading.h
//===-- llvm/Support/Threading.h - Control multithreading mode --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares helper functions for running LLVM in a multi-threaded // environment. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_THREADING_H #define LLVM_SUPPORT_THREADING_H namespace llvm { /// Returns true if LLVM is compiled with support for multi-threading, and /// false otherwise. bool llvm_is_multithreaded(); /// llvm_execute_on_thread - Execute the given \p UserFn on a separate /// thread, passing it the provided \p UserData and waits for thread /// completion. /// /// This function does not guarantee that the code will actually be executed /// on a separate thread or honoring the requested stack size, but tries to do /// so where system support is available. /// /// \param UserFn - The callback to execute. /// \param UserData - An argument to pass to the callback function. /// \param RequestedStackSize - If non-zero, a requested size (in bytes) for /// the thread stack. void llvm_execute_on_thread(void (*UserFn)(void*), void *UserData, unsigned RequestedStackSize = 0); } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Compression.h
//===-- llvm/Support/Compression.h ---Compression----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains basic functions for compression/uncompression. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_COMPRESSION_H #define LLVM_SUPPORT_COMPRESSION_H #include "llvm/Support/DataTypes.h" namespace llvm { template <typename T> class SmallVectorImpl; class StringRef; namespace zlib { enum CompressionLevel { NoCompression, DefaultCompression, BestSpeedCompression, BestSizeCompression }; enum Status { StatusOK, StatusUnsupported, // zlib is unavailable StatusOutOfMemory, // there was not enough memory StatusBufferTooShort, // there was not enough room in the output buffer StatusInvalidArg, // invalid input parameter StatusInvalidData // data was corrupted or incomplete }; bool isAvailable(); Status compress(StringRef InputBuffer, SmallVectorImpl<char> &CompressedBuffer, CompressionLevel Level = DefaultCompression); Status uncompress(StringRef InputBuffer, SmallVectorImpl<char> &UncompressedBuffer, size_t UncompressedSize); uint32_t crc32(StringRef Buffer); } // End of namespace zlib } // End of namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/OutputBuffer.h
//=== OutputBuffer.h - Output Buffer ----------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Methods to output values to a data buffer. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_OUTPUTBUFFER_H #define LLVM_SUPPORT_OUTPUTBUFFER_H #include <cassert> #include <string> #include <vector> namespace llvm { class OutputBuffer { /// Output buffer. std::vector<unsigned char> &Output; /// is64Bit/isLittleEndian - This information is inferred from the target /// machine directly, indicating what header values and flags to set. bool is64Bit, isLittleEndian; public: OutputBuffer(std::vector<unsigned char> &Out, bool is64bit, bool le) : Output(Out), is64Bit(is64bit), isLittleEndian(le) {} // align - Emit padding into the file until the current output position is // aligned to the specified power of two boundary. void align(unsigned Boundary) { assert(Boundary && (Boundary & (Boundary - 1)) == 0 && "Must align to 2^k boundary"); size_t Size = Output.size(); if (Size & (Boundary - 1)) { // Add padding to get alignment to the correct place. size_t Pad = Boundary - (Size & (Boundary - 1)); Output.resize(Size + Pad); } } //===------------------------------------------------------------------===// // Out Functions - Output the specified value to the data buffer. void outbyte(unsigned char X) { Output.push_back(X); } void outhalf(unsigned short X) { if (isLittleEndian) { Output.push_back(X & 255); Output.push_back(X >> 8); } else { Output.push_back(X >> 8); Output.push_back(X & 255); } } void outword(unsigned X) { if (isLittleEndian) { Output.push_back((X >> 0) & 255); Output.push_back((X >> 8) & 255); Output.push_back((X >> 16) & 255); Output.push_back((X >> 24) & 255); } else { Output.push_back((X >> 24) & 255); Output.push_back((X >> 16) & 255); Output.push_back((X >> 8) & 255); Output.push_back((X >> 0) & 255); } } void outxword(uint64_t X) { if (isLittleEndian) { Output.push_back(unsigned(X >> 0) & 255); Output.push_back(unsigned(X >> 8) & 255); Output.push_back(unsigned(X >> 16) & 255); Output.push_back(unsigned(X >> 24) & 255); Output.push_back(unsigned(X >> 32) & 255); Output.push_back(unsigned(X >> 40) & 255); Output.push_back(unsigned(X >> 48) & 255); Output.push_back(unsigned(X >> 56) & 255); } else { Output.push_back(unsigned(X >> 56) & 255); Output.push_back(unsigned(X >> 48) & 255); Output.push_back(unsigned(X >> 40) & 255); Output.push_back(unsigned(X >> 32) & 255); Output.push_back(unsigned(X >> 24) & 255); Output.push_back(unsigned(X >> 16) & 255); Output.push_back(unsigned(X >> 8) & 255); Output.push_back(unsigned(X >> 0) & 255); } } void outaddr32(unsigned X) { outword(X); } void outaddr64(uint64_t X) { outxword(X); } void outaddr(uint64_t X) { if (!is64Bit) outword((unsigned)X); else outxword(X); } void outstring(const std::string &S, unsigned Length) { unsigned len_to_copy = static_cast<unsigned>(S.length()) < Length ? static_cast<unsigned>(S.length()) : Length; unsigned len_to_fill = static_cast<unsigned>(S.length()) < Length ? Length - static_cast<unsigned>(S.length()) : 0; for (unsigned i = 0; i < len_to_copy; ++i) outbyte(S[i]); for (unsigned i = 0; i < len_to_fill; ++i) outbyte(0); } //===------------------------------------------------------------------===// // Fix Functions - Replace an existing entry at an offset. void fixhalf(unsigned short X, unsigned Offset) { unsigned char *P = &Output[Offset]; P[0] = (X >> (isLittleEndian ? 0 : 8)) & 255; P[1] = (X >> (isLittleEndian ? 8 : 0)) & 255; } void fixword(unsigned X, unsigned Offset) { unsigned char *P = &Output[Offset]; P[0] = (X >> (isLittleEndian ? 0 : 24)) & 255; P[1] = (X >> (isLittleEndian ? 8 : 16)) & 255; P[2] = (X >> (isLittleEndian ? 16 : 8)) & 255; P[3] = (X >> (isLittleEndian ? 24 : 0)) & 255; } void fixxword(uint64_t X, unsigned Offset) { unsigned char *P = &Output[Offset]; P[0] = (X >> (isLittleEndian ? 0 : 56)) & 255; P[1] = (X >> (isLittleEndian ? 8 : 48)) & 255; P[2] = (X >> (isLittleEndian ? 16 : 40)) & 255; P[3] = (X >> (isLittleEndian ? 24 : 32)) & 255; P[4] = (X >> (isLittleEndian ? 32 : 24)) & 255; P[5] = (X >> (isLittleEndian ? 40 : 16)) & 255; P[6] = (X >> (isLittleEndian ? 48 : 8)) & 255; P[7] = (X >> (isLittleEndian ? 56 : 0)) & 255; } void fixaddr(uint64_t X, unsigned Offset) { if (!is64Bit) fixword((unsigned)X, Offset); else fixxword(X, Offset); } unsigned char &operator[](unsigned Index) { return Output[Index]; } const unsigned char &operator[](unsigned Index) const { return Output[Index]; } }; } // end llvm namespace #endif // LLVM_SUPPORT_OUTPUTBUFFER_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/RegistryParser.h
//=== RegistryParser.h - Linker-supported plugin registries -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Defines a command-line parser for a registry. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_REGISTRYPARSER_H #define LLVM_SUPPORT_REGISTRYPARSER_H #include "llvm/Support/CommandLine.h" #include "llvm/Support/Registry.h" namespace llvm { /// A command-line parser for a registry. Use like such: /// /// static cl::opt<Registry<Collector>::entry, false, /// RegistryParser<Collector> > /// GCOpt("gc", cl::desc("Garbage collector to use."), /// cl::value_desc()); /// /// To make use of the value: /// /// Collector *TheCollector = GCOpt->instantiate(); /// template <typename T, typename U = RegistryTraits<T> > class RegistryParser : public cl::parser<const typename U::entry*>, public Registry<T, U>::listener { typedef U traits; typedef typename U::entry entry; typedef typename Registry<T, U>::listener listener; protected: void registered(const entry &E) { addLiteralOption(traits::nameof(E), &E, traits::descof(E)); } public: void initialize(cl::Option &O) { listener::init(); cl::parser<const typename U::entry*>::initialize(O); } }; } #endif // LLVM_SUPPORT_REGISTRYPARSER_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/PrettyStackTrace.h
//===- llvm/Support/PrettyStackTrace.h - Pretty Crash Handling --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PrettyStackTraceEntry class, which is used to make // crashes give more contextual information about what the program was doing // when it crashed. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H #define LLVM_SUPPORT_PRETTYSTACKTRACE_H #include "llvm/Support/Compiler.h" namespace llvm { class raw_ostream; void EnablePrettyStackTrace(); /// PrettyStackTraceEntry - This class is used to represent a frame of the /// "pretty" stack trace that is dumped when a program crashes. You can define /// subclasses of this and declare them on the program stack: when they are /// constructed and destructed, they will add their symbolic frames to a /// virtual stack trace. This gets dumped out if the program crashes. class PrettyStackTraceEntry { const PrettyStackTraceEntry *NextEntry; PrettyStackTraceEntry(const PrettyStackTraceEntry &) = delete; void operator=(const PrettyStackTraceEntry&) = delete; public: PrettyStackTraceEntry(); virtual ~PrettyStackTraceEntry(); /// print - Emit information about this stack frame to OS. virtual void print(raw_ostream &OS) const = 0; /// getNextEntry - Return the next entry in the list of frames. const PrettyStackTraceEntry *getNextEntry() const { return NextEntry; } }; /// PrettyStackTraceString - This object prints a specified string (which /// should not contain newlines) to the stream as the stack trace when a crash /// occurs. class PrettyStackTraceString : public PrettyStackTraceEntry { const char *Str; public: PrettyStackTraceString(const char *str) : Str(str) {} void print(raw_ostream &OS) const override; }; /// PrettyStackTraceProgram - This object prints a specified program arguments /// to the stream as the stack trace when a crash occurs. class PrettyStackTraceProgram : public PrettyStackTraceEntry { int ArgC; const char *const *ArgV; public: PrettyStackTraceProgram(int argc, const char * const*argv) : ArgC(argc), ArgV(argv) { EnablePrettyStackTrace(); } void print(raw_ostream &OS) const override; }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/MathExtras.h
//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains some functions that are useful for math stuff. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_MATHEXTRAS_H #define LLVM_SUPPORT_MATHEXTRAS_H #include "dxc/WinAdapter.h" // HLSL Change #include "llvm/Support/Compiler.h" #include "llvm/Support/SwapByteOrder.h" #include <cassert> #include <cstring> #include <type_traits> #ifdef _MSC_VER #include <intrin.h> #endif #ifdef __ANDROID_NDK__ #include <android/api-level.h> #endif namespace llvm { /// \brief The behavior an operation has on an input of 0. enum ZeroBehavior { /// \brief The returned value is undefined. ZB_Undefined, /// \brief The returned value is numeric_limits<T>::max() ZB_Max, /// \brief The returned value is numeric_limits<T>::digits ZB_Width }; namespace detail { template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { static std::size_t count(T Val, ZeroBehavior) { if (!Val) return std::numeric_limits<T>::digits; if (Val & 0x1) return 0; // Bisection method. std::size_t ZeroBits = 0; T Shift = std::numeric_limits<T>::digits >> 1; T Mask = std::numeric_limits<T>::max() >> Shift; while (Shift) { if ((Val & Mask) == 0) { Val >>= Shift; ZeroBits |= Shift; } Shift >>= 1; Mask >>= Shift; } return ZeroBits; } }; #if __GNUC__ >= 4 || _MSC_VER template <typename T> struct TrailingZerosCounter<T, 4> { static std::size_t count(T Val, ZeroBehavior ZB) { if (ZB != ZB_Undefined && Val == 0) return 32; #if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0) return __builtin_ctz(Val); #elif _MSC_VER unsigned long Index; _BitScanForward(&Index, Val); return Index; #endif } }; #if !defined(_MSC_VER) || defined(_M_X64) template <typename T> struct TrailingZerosCounter<T, 8> { static std::size_t count(T Val, ZeroBehavior ZB) { if (ZB != ZB_Undefined && Val == 0) return 64; #if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0) return __builtin_ctzll(Val); #elif _MSC_VER unsigned long Index; _BitScanForward64(&Index, Val); return Index; #endif } }; #endif #endif } // namespace detail /// \brief Count number of 0's from the least significant bit to the most /// stopping at the first 1. /// /// Only unsigned integral types are allowed. /// /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are /// valid arguments. template <typename T> std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { static_assert(std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, "Only unsigned integral types are allowed."); return detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); } namespace detail { template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { static std::size_t count(T Val, ZeroBehavior) { if (!Val) return std::numeric_limits<T>::digits; // Bisection method. std::size_t ZeroBits = 0; for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { T Tmp = Val >> Shift; if (Tmp) Val = Tmp; else ZeroBits |= Shift; } return ZeroBits; } }; #if __GNUC__ >= 4 || _MSC_VER template <typename T> struct LeadingZerosCounter<T, 4> { static std::size_t count(T Val, ZeroBehavior ZB) { if (ZB != ZB_Undefined && Val == 0) return 32; #if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0) return __builtin_clz(Val); #elif _MSC_VER unsigned long Index; _BitScanReverse(&Index, Val); return Index ^ 31; #endif } }; #if !defined(_MSC_VER) || defined(_M_X64) template <typename T> struct LeadingZerosCounter<T, 8> { static std::size_t count(T Val, ZeroBehavior ZB) { if (ZB != ZB_Undefined && Val == 0) return 64; #if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0) return __builtin_clzll(Val); #elif _MSC_VER unsigned long Index; _BitScanReverse64(&Index, Val); return Index ^ 63; #endif } }; #endif #endif } // namespace detail /// \brief Count number of 0's from the most significant bit to the least /// stopping at the first 1. /// /// Only unsigned integral types are allowed. /// /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are /// valid arguments. template <typename T> std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { static_assert(std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, "Only unsigned integral types are allowed."); return detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); } /// \brief Get the index of the first set bit starting from the least /// significant bit. /// /// Only unsigned integral types are allowed. /// /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are /// valid arguments. template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { if (ZB == ZB_Max && Val == 0) return std::numeric_limits<T>::max(); return countTrailingZeros(Val, ZB_Undefined); } /// \brief Get the index of the last set bit starting from the least /// significant bit. /// /// Only unsigned integral types are allowed. /// /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are /// valid arguments. template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { if (ZB == ZB_Max && Val == 0) return std::numeric_limits<T>::max(); // Use ^ instead of - because both gcc and llvm can remove the associated ^ // in the __builtin_clz intrinsic on x86. return countLeadingZeros(Val, ZB_Undefined) ^ (std::numeric_limits<T>::digits - 1); } /// \brief Macro compressed bit reversal table for 256 bits. /// /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable static const unsigned char BitReverseTable256[256] = { #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) R6(0), R6(2), R6(1), R6(3) #undef R2 #undef R4 #undef R6 }; /// \brief Reverse the bits in \p Val. template <typename T> T reverseBits(T Val) { unsigned char in[sizeof(Val)]; unsigned char out[sizeof(Val)]; std::memcpy(in, &Val, sizeof(Val)); for (unsigned i = 0; i < sizeof(Val); ++i) out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; std::memcpy(&Val, out, sizeof(Val)); return Val; } // NOTE: The following support functions use the _32/_64 extensions instead of // type overloading so that signed and unsigned integers can be used without // ambiguity. /// Hi_32 - This function returns the high 32 bits of a 64 bit value. inline uint32_t Hi_32(uint64_t Value) { return static_cast<uint32_t>(Value >> 32); } /// Lo_32 - This function returns the low 32 bits of a 64 bit value. inline uint32_t Lo_32(uint64_t Value) { return static_cast<uint32_t>(Value); } /// Make_64 - This functions makes a 64-bit integer from a high / low pair of /// 32-bit integers. inline uint64_t Make_64(uint32_t High, uint32_t Low) { return ((uint64_t)High << 32) | (uint64_t)Low; } /// isInt - Checks if an integer fits into the given bit width. template<unsigned N> inline bool isInt(int64_t x) { return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1))); } // Template specializations to get better code for common cases. template<> inline bool isInt<8>(int64_t x) { return static_cast<int8_t>(x) == x; } template<> inline bool isInt<16>(int64_t x) { return static_cast<int16_t>(x) == x; } template<> inline bool isInt<32>(int64_t x) { return static_cast<int32_t>(x) == x; } /// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted /// left by S. template<unsigned N, unsigned S> inline bool isShiftedInt(int64_t x) { return isInt<N+S>(x) && (x % (1<<S) == 0); } /// isUInt - Checks if an unsigned integer fits into the given bit width. template<unsigned N> inline bool isUInt(uint64_t x) { return N >= 64 || x < (UINT64_C(1)<<(N)); } // Template specializations to get better code for common cases. template<> inline bool isUInt<8>(uint64_t x) { return static_cast<uint8_t>(x) == x; } template<> inline bool isUInt<16>(uint64_t x) { return static_cast<uint16_t>(x) == x; } template<> inline bool isUInt<32>(uint64_t x) { return static_cast<uint32_t>(x) == x; } /// isShiftedUInt<N,S> - Checks if a unsigned integer is an N bit number shifted /// left by S. template<unsigned N, unsigned S> inline bool isShiftedUInt(uint64_t x) { return isUInt<N+S>(x) && (x % (1<<S) == 0); } /// isUIntN - Checks if an unsigned integer fits into the given (dynamic) /// bit width. inline bool isUIntN(unsigned N, uint64_t x) { return x == (x & (~0ULL >> (64 - N))); } /// isIntN - Checks if an signed integer fits into the given (dynamic) /// bit width. inline bool isIntN(unsigned N, int64_t x) { return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1))); } /// isMask_32 - This function returns true if the argument is a non-empty /// sequence of ones starting at the least significant bit with the remainder /// zero (32 bit version). Ex. isMask_32(0x0000FFFFU) == true. inline bool isMask_32(uint32_t Value) { return Value && ((Value + 1) & Value) == 0; } /// isMask_64 - This function returns true if the argument is a non-empty /// sequence of ones starting at the least significant bit with the remainder /// zero (64 bit version). inline bool isMask_64(uint64_t Value) { return Value && ((Value + 1) & Value) == 0; } /// isShiftedMask_32 - This function returns true if the argument contains a /// non-empty sequence of ones with the remainder zero (32 bit version.) /// Ex. isShiftedMask_32(0x0000FF00U) == true. inline bool isShiftedMask_32(uint32_t Value) { return Value && isMask_32((Value - 1) | Value); } /// isShiftedMask_64 - This function returns true if the argument contains a /// non-empty sequence of ones with the remainder zero (64 bit version.) inline bool isShiftedMask_64(uint64_t Value) { return Value && isMask_64((Value - 1) | Value); } /// isPowerOf2_32 - This function returns true if the argument is a power of /// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) inline bool isPowerOf2_32(uint32_t Value) { return Value && !(Value & (Value - 1)); } /// isPowerOf2_64 - This function returns true if the argument is a power of two /// > 0 (64 bit edition.) inline bool isPowerOf2_64(uint64_t Value) { return Value && !(Value & (Value - int64_t(1L))); } /// ByteSwap_16 - This function returns a byte-swapped representation of the /// 16-bit argument, Value. inline uint16_t ByteSwap_16(uint16_t Value) { return sys::SwapByteOrder_16(Value); } /// ByteSwap_32 - This function returns a byte-swapped representation of the /// 32-bit argument, Value. inline uint32_t ByteSwap_32(uint32_t Value) { return sys::SwapByteOrder_32(Value); } /// ByteSwap_64 - This function returns a byte-swapped representation of the /// 64-bit argument, Value. inline uint64_t ByteSwap_64(uint64_t Value) { return sys::SwapByteOrder_64(Value); } /// \brief Count the number of ones from the most significant bit to the first /// zero bit. /// /// Ex. CountLeadingOnes(0xFF0FFF00) == 8. /// Only unsigned integral types are allowed. /// /// \param ZB the behavior on an input of all ones. Only ZB_Width and /// ZB_Undefined are valid arguments. template <typename T> std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { static_assert(std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, "Only unsigned integral types are allowed."); return countLeadingZeros(~Value, ZB); } /// \brief Count the number of ones from the least significant bit to the first /// zero bit. /// /// Ex. countTrailingOnes(0x00FF00FF) == 8. /// Only unsigned integral types are allowed. /// /// \param ZB the behavior on an input of all ones. Only ZB_Width and /// ZB_Undefined are valid arguments. template <typename T> std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { static_assert(std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, "Only unsigned integral types are allowed."); return countTrailingZeros(~Value, ZB); } namespace detail { template <typename T, std::size_t SizeOfT> struct PopulationCounter { static unsigned count(T Value) { // Generic version, forward to 32 bits. static_assert(SizeOfT <= 4, "Not implemented!"); #if __GNUC__ >= 4 return __builtin_popcount(Value); #else uint32_t v = Value; v = v - ((v >> 1) & 0x55555555); v = (v & 0x33333333) + ((v >> 2) & 0x33333333); return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; #endif } }; template <typename T> struct PopulationCounter<T, 8> { static unsigned count(T Value) { #if __GNUC__ >= 4 return __builtin_popcountll(Value); #else uint64_t v = Value; v = v - ((v >> 1) & 0x5555555555555555ULL); v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); #endif } }; } // namespace detail /// \brief Count the number of set bits in a value. /// Ex. countPopulation(0xF000F000) = 8 /// Returns 0 if the word is zero. template <typename T> inline unsigned countPopulation(T Value) { static_assert(std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed, "Only unsigned integral types are allowed."); return detail::PopulationCounter<T, sizeof(T)>::count(Value); } /// Log2 - This function returns the log base 2 of the specified value inline double __cdecl Log2(double Value) { // HLSL Change - __cdecl #if defined(__ANDROID_API__) && __ANDROID_API__ < 18 return __builtin_log(Value) / __builtin_log(2.0); #else return log2(Value); #endif } /// Log2_32 - This function returns the floor log base 2 of the specified value, /// -1 if the value is zero. (32 bit edition.) /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 inline unsigned Log2_32(uint32_t Value) { return 31 - (unsigned)countLeadingZeros(Value); // HLSL Change (unsigned) } /// Log2_64 - This function returns the floor log base 2 of the specified value, /// -1 if the value is zero. (64 bit edition.) inline unsigned Log2_64(uint64_t Value) { return 63 - (unsigned)countLeadingZeros(Value); // HLSL Change (unsigned) } /// Log2_32_Ceil - This function returns the ceil log base 2 of the specified /// value, 32 if the value is zero. (32 bit edition). /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 inline unsigned Log2_32_Ceil(uint32_t Value) { return 32 - (unsigned)countLeadingZeros(Value - 1); // HLSL Change (unsigned) } /// Log2_64_Ceil - This function returns the ceil log base 2 of the specified /// value, 64 if the value is zero. (64 bit edition.) inline unsigned Log2_64_Ceil(uint64_t Value) { return 64 - (unsigned)countLeadingZeros(Value - 1); // HLSL Change (unsigned) } /// GreatestCommonDivisor64 - Return the greatest common divisor of the two /// values using Euclid's algorithm. inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { while (B) { uint64_t T = B; B = A % B; A = T; } return A; } /// BitsToDouble - This function takes a 64-bit integer and returns the bit /// equivalent double. inline double BitsToDouble(uint64_t Bits) { union { uint64_t L; double D; } T; T.L = Bits; return T.D; } /// BitsToFloat - This function takes a 32-bit integer and returns the bit /// equivalent float. inline float BitsToFloat(uint32_t Bits) { union { uint32_t I; float F; } T; T.I = Bits; return T.F; } /// DoubleToBits - This function takes a double and returns the bit /// equivalent 64-bit integer. Note that copying doubles around /// changes the bits of NaNs on some hosts, notably x86, so this /// routine cannot be used if these bits are needed. inline uint64_t DoubleToBits(double Double) { union { uint64_t L; double D; } T; T.D = Double; return T.L; } /// FloatToBits - This function takes a float and returns the bit /// equivalent 32-bit integer. Note that copying floats around /// changes the bits of NaNs on some hosts, notably x86, so this /// routine cannot be used if these bits are needed. inline uint32_t FloatToBits(float Float) { union { uint32_t I; float F; } T; T.F = Float; return T.I; } /// MinAlign - A and B are either alignments or offsets. Return the minimum /// alignment that may be assumed after adding the two together. inline uint64_t MinAlign(uint64_t A, uint64_t B) { // The largest power of 2 that divides both A and B. // // Replace "-Value" by "1+~Value" in the following commented code to avoid // MSVC warning C4146 // return (A | B) & -(A | B); return (A | B) & (1 + ~(A | B)); } /// \brief Aligns \c Addr to \c Alignment bytes, rounding up. /// /// Alignment should be a power of two. This method rounds up, so /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8. inline uintptr_t alignAddr(const void *Addr, size_t Alignment) { assert(Alignment && isPowerOf2_64((uint64_t)Alignment) && "Alignment is not a power of two!"); assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr); return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1)); } /// \brief Returns the necessary adjustment for aligning \c Ptr to \c Alignment /// bytes, rounding up. inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) { return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr; } /// NextPowerOf2 - Returns the next power of two (in 64-bits) /// that is strictly greater than A. Returns zero on overflow. inline uint64_t NextPowerOf2(uint64_t A) { A |= (A >> 1); A |= (A >> 2); A |= (A >> 4); A |= (A >> 8); A |= (A >> 16); A |= (A >> 32); return A + 1; } /// Returns the power of two which is less than or equal to the given value. /// Essentially, it is a floor operation across the domain of powers of two. inline uint64_t PowerOf2Floor(uint64_t A) { if (!A) return 0; return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); } /// Returns the next integer (mod 2**64) that is greater than or equal to /// \p Value and is a multiple of \p Align. \p Align must be non-zero. /// /// Examples: /// \code /// RoundUpToAlignment(5, 8) = 8 /// RoundUpToAlignment(17, 8) = 24 /// RoundUpToAlignment(~0LL, 8) = 0 /// RoundUpToAlignment(321, 255) = 510 /// \endcode inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) { return (Value + Align - 1) / Align * Align; } /// Returns the offset to the next integer (mod 2**64) that is greater than /// or equal to \p Value and is a multiple of \p Align. \p Align must be /// non-zero. inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { return RoundUpToAlignment(Value, Align) - Value; } /// SignExtend32 - Sign extend B-bit number x to 32-bit int. /// Usage int32_t r = SignExtend32<5>(x); template <unsigned B> inline int32_t SignExtend32(uint32_t x) { return int32_t(x << (32 - B)) >> (32 - B); } /// \brief Sign extend number in the bottom B bits of X to a 32-bit int. /// Requires 0 < B <= 32. inline int32_t SignExtend32(uint32_t X, unsigned B) { return int32_t(X << (32 - B)) >> (32 - B); } /// SignExtend64 - Sign extend B-bit number x to 64-bit int. /// Usage int64_t r = SignExtend64<5>(x); template <unsigned B> inline int64_t SignExtend64(uint64_t x) { return int64_t(x << (64 - B)) >> (64 - B); } /// \brief Sign extend number in the bottom B bits of X to a 64-bit int. /// Requires 0 < B <= 64. inline int64_t SignExtend64(uint64_t X, unsigned B) { return int64_t(X << (64 - B)) >> (64 - B); } extern const float huge_valf; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/DynamicLibrary.h
//===-- llvm/Support/DynamicLibrary.h - Portable Dynamic Library -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the sys::DynamicLibrary class. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_DYNAMICLIBRARY_H #define LLVM_SUPPORT_DYNAMICLIBRARY_H #include <string> namespace llvm { class StringRef; namespace sys { /// This class provides a portable interface to dynamic libraries which also /// might be known as shared libraries, shared objects, dynamic shared /// objects, or dynamic link libraries. Regardless of the terminology or the /// operating system interface, this class provides a portable interface that /// allows dynamic libraries to be loaded and searched for externally /// defined symbols. This is typically used to provide "plug-in" support. /// It also allows for symbols to be defined which don't live in any library, /// but rather the main program itself, useful on Windows where the main /// executable cannot be searched. /// /// Note: there is currently no interface for temporarily loading a library, /// or for unloading libraries when the LLVM library is unloaded. class DynamicLibrary { // Placeholder whose address represents an invalid library. // We use this instead of NULL or a pointer-int pair because the OS library // might define 0 or 1 to be "special" handles, such as "search all". static char Invalid; // Opaque data used to interface with OS-specific dynamic library handling. void *Data; public: explicit DynamicLibrary(void *data = &Invalid) : Data(data) {} /// Returns true if the object refers to a valid library. bool isValid() const { return Data != &Invalid; } /// Searches through the library for the symbol \p symbolName. If it is /// found, the address of that symbol is returned. If not, NULL is returned. /// Note that NULL will also be returned if the library failed to load. /// Use isValid() to distinguish these cases if it is important. /// Note that this will \e not search symbols explicitly registered by /// AddSymbol(). void *getAddressOfSymbol(const char *symbolName); /// This function permanently loads the dynamic library at the given path. /// The library will only be unloaded when the program terminates. /// This returns a valid DynamicLibrary instance on success and an invalid /// instance on failure (see isValid()). \p *errMsg will only be modified /// if the library fails to load. /// /// It is safe to call this function multiple times for the same library. /// @brief Open a dynamic library permanently. static DynamicLibrary getPermanentLibrary(const char *filename, std::string *errMsg = nullptr); /// This function permanently loads the dynamic library at the given path. /// Use this instead of getPermanentLibrary() when you won't need to get /// symbols from the library itself. /// /// It is safe to call this function multiple times for the same library. static bool LoadLibraryPermanently(const char *Filename, std::string *ErrMsg = nullptr) { return !getPermanentLibrary(Filename, ErrMsg).isValid(); } /// This function will search through all previously loaded dynamic /// libraries for the symbol \p symbolName. If it is found, the address of /// that symbol is returned. If not, null is returned. Note that this will /// search permanently loaded libraries (getPermanentLibrary()) as well /// as explicitly registered symbols (AddSymbol()). /// @throws std::string on error. /// @brief Search through libraries for address of a symbol static void *SearchForAddressOfSymbol(const char *symbolName); /// @brief Convenience function for C++ophiles. static void *SearchForAddressOfSymbol(const std::string &symbolName) { return SearchForAddressOfSymbol(symbolName.c_str()); } /// This functions permanently adds the symbol \p symbolName with the /// value \p symbolValue. These symbols are searched before any /// libraries. /// @brief Add searchable symbol/value pair. static void AddSymbol(StringRef symbolName, void *symbolValue); }; } // End sys namespace } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/ARMEHABI.h
//===--- ARMEHABI.h - ARM Exception Handling ABI ----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the constants for the ARM unwind opcodes and exception // handling table entry kinds. // // The enumerations and constants in this file reflect the ARM EHABI // Specification as published by ARM. // // Exception Handling ABI for the ARM Architecture r2.09 - November 30, 2012 // // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ARMEHABI_H #define LLVM_SUPPORT_ARMEHABI_H namespace llvm { namespace ARM { namespace EHABI { /// ARM exception handling table entry kinds enum EHTEntryKind { EHT_GENERIC = 0x00, EHT_COMPACT = 0x80 }; enum { /// Special entry for the function never unwind EXIDX_CANTUNWIND = 0x1 }; /// ARM-defined frame unwinding opcodes enum UnwindOpcodes { // Format: 00xxxxxx // Purpose: vsp = vsp + ((x << 2) + 4) UNWIND_OPCODE_INC_VSP = 0x00, // Format: 01xxxxxx // Purpose: vsp = vsp - ((x << 2) + 4) UNWIND_OPCODE_DEC_VSP = 0x40, // Format: 10000000 00000000 // Purpose: refuse to unwind UNWIND_OPCODE_REFUSE = 0x8000, // Format: 1000xxxx xxxxxxxx // Purpose: pop r[15:12], r[11:4] // Constraint: x != 0 UNWIND_OPCODE_POP_REG_MASK_R4 = 0x8000, // Format: 1001xxxx // Purpose: vsp = r[x] // Constraint: x != 13 && x != 15 UNWIND_OPCODE_SET_VSP = 0x90, // Format: 10100xxx // Purpose: pop r[(4+x):4] UNWIND_OPCODE_POP_REG_RANGE_R4 = 0xa0, // Format: 10101xxx // Purpose: pop r14, r[(4+x):4] UNWIND_OPCODE_POP_REG_RANGE_R4_R14 = 0xa8, // Format: 10110000 // Purpose: finish UNWIND_OPCODE_FINISH = 0xb0, // Format: 10110001 0000xxxx // Purpose: pop r[3:0] // Constraint: x != 0 UNWIND_OPCODE_POP_REG_MASK = 0xb100, // Format: 10110010 x(uleb128) // Purpose: vsp = vsp + ((x << 2) + 0x204) UNWIND_OPCODE_INC_VSP_ULEB128 = 0xb2, // Format: 10110011 xxxxyyyy // Purpose: pop d[(x+y):x] UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX = 0xb300, // Format: 10111xxx // Purpose: pop d[(8+x):8] UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX_D8 = 0xb8, // Format: 11000xxx // Purpose: pop wR[(10+x):10] UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE_WR10 = 0xc0, // Format: 11000110 xxxxyyyy // Purpose: pop wR[(x+y):x] UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE = 0xc600, // Format: 11000111 0000xxxx // Purpose: pop wCGR[3:0] // Constraint: x != 0 UNWIND_OPCODE_POP_WIRELESS_MMX_REG_MASK = 0xc700, // Format: 11001000 xxxxyyyy // Purpose: pop d[(16+x+y):(16+x)] UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 = 0xc800, // Format: 11001001 xxxxyyyy // Purpose: pop d[(x+y):x] UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD = 0xc900, // Format: 11010xxx // Purpose: pop d[(8+x):8] UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D8 = 0xd0 }; /// ARM-defined Personality Routine Index enum PersonalityRoutineIndex { // To make the exception handling table become more compact, ARM defined // several personality routines in EHABI. There are 3 different // personality routines in ARM EHABI currently. It is possible to have 16 // pre-defined personality routines at most. AEABI_UNWIND_CPP_PR0 = 0, AEABI_UNWIND_CPP_PR1 = 1, AEABI_UNWIND_CPP_PR2 = 2, NUM_PERSONALITY_INDEX }; } } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/LockFileManager.h
//===--- LockFileManager.h - File-level locking utility ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_LOCKFILEMANAGER_H #define LLVM_SUPPORT_LOCKFILEMANAGER_H #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include <system_error> #include <utility> // for std::pair namespace llvm { /// \brief Class that manages the creation of a lock file to aid /// implicit coordination between different processes. /// /// The implicit coordination works by creating a ".lock" file alongside /// the file that we're coordinating for, using the atomicity of the file /// system to ensure that only a single process can create that ".lock" file. /// When the lock file is removed, the owning process has finished the /// operation. class LockFileManager { public: /// \brief Describes the state of a lock file. enum LockFileState { /// \brief The lock file has been created and is owned by this instance /// of the object. LFS_Owned, /// \brief The lock file already exists and is owned by some other /// instance. LFS_Shared, /// \brief An error occurred while trying to create or find the lock /// file. LFS_Error }; /// \brief Describes the result of waiting for the owner to release the lock. enum WaitForUnlockResult { /// \brief The lock was released successfully. Res_Success, /// \brief Owner died while holding the lock. Res_OwnerDied, /// \brief Reached timeout while waiting for the owner to release the lock. Res_Timeout }; private: SmallString<128> FileName; SmallString<128> LockFileName; SmallString<128> UniqueLockFileName; Optional<std::pair<std::string, int> > Owner; Optional<std::error_code> Error; LockFileManager(const LockFileManager &) = delete; LockFileManager &operator=(const LockFileManager &) = delete; static Optional<std::pair<std::string, int> > readLockFile(StringRef LockFileName); static bool processStillExecuting(StringRef Hostname, int PID); public: LockFileManager(StringRef FileName); ~LockFileManager(); /// \brief Determine the state of the lock file. LockFileState getState() const; operator LockFileState() const { return getState(); } /// \brief For a shared lock, wait until the owner releases the lock. WaitForUnlockResult waitForUnlock(); /// \brief Remove the lock file. This may delete a different lock file than /// the one previously read if there is a race. std::error_code unsafeRemoveLockFile(); }; } // end namespace llvm #endif // LLVM_SUPPORT_LOCKFILEMANAGER_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/MemoryObject.h
//===- MemoryObject.h - Abstract memory interface ---------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_MEMORYOBJECT_H #define LLVM_SUPPORT_MEMORYOBJECT_H #include "llvm/Support/DataTypes.h" namespace llvm { /// Interface to data which might be streamed. Streamability has 2 important /// implications/restrictions. First, the data might not yet exist in memory /// when the request is made. This just means that readByte/readBytes might have /// to block or do some work to get it. More significantly, the exact size of /// the object might not be known until it has all been fetched. This means that /// to return the right result, getExtent must also wait for all the data to /// arrive; therefore it should not be called on objects which are actually /// streamed (this would defeat the purpose of streaming). Instead, /// isValidAddress can be used to test addresses without knowing the exact size /// of the stream. Finally, getPointer can be used instead of readBytes to avoid /// extra copying. class MemoryObject { public: virtual ~MemoryObject(); /// Returns the size of the region in bytes. (The region is contiguous, so /// the highest valid address of the region is getExtent() - 1). /// /// @result - The size of the region. virtual uint64_t getExtent() const = 0; /// Tries to read a contiguous range of bytes from the region, up to the end /// of the region. /// /// @param Buf - A pointer to a buffer to be filled in. Must be non-NULL /// and large enough to hold size bytes. /// @param Size - The number of bytes to copy. /// @param Address - The address of the first byte, in the same space as /// getBase(). /// @result - The number of bytes read. virtual uint64_t readBytes(uint8_t *Buf, uint64_t Size, uint64_t Address) const = 0; /// Ensures that the requested data is in memory, and returns a pointer to it. /// More efficient than using readBytes if the data is already in memory. May /// block until (address - base + size) bytes have been read /// @param address - address of the byte, in the same space as getBase() /// @param size - amount of data that must be available on return /// @result - valid pointer to the requested data virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const = 0; /// Returns true if the address is within the object (i.e. between base and /// base + extent - 1 inclusive). May block until (address - base) bytes have /// been read /// @param address - address of the byte, in the same space as getBase() /// @result - true if the address may be read with readByte() virtual bool isValidAddress(uint64_t address) const = 0; }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/ErrorOr.h
//===- llvm/Support/ErrorOr.h - Error Smart Pointer -----------------------===// // // The LLVM Linker // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// /// Provides ErrorOr<T> smart pointer. /// //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ERROROR_H #define LLVM_SUPPORT_ERROROR_H #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/AlignOf.h" #include <cassert> #include <system_error> #include <type_traits> namespace llvm { template<class T, class V> typename std::enable_if< std::is_constructible<T, V>::value , typename std::remove_reference<V>::type>::type && moveIfMoveConstructible(V &Val) { return std::move(Val); } template<class T, class V> typename std::enable_if< !std::is_constructible<T, V>::value , typename std::remove_reference<V>::type>::type & moveIfMoveConstructible(V &Val) { return Val; } /// \brief Stores a reference that can be changed. template <typename T> class ReferenceStorage { T *Storage; public: ReferenceStorage(T &Ref) : Storage(&Ref) {} operator T &() const { return *Storage; } T &get() const { return *Storage; } }; /// \brief Represents either an error or a value T. /// /// ErrorOr<T> is a pointer-like class that represents the result of an /// operation. The result is either an error, or a value of type T. This is /// designed to emulate the usage of returning a pointer where nullptr indicates /// failure. However instead of just knowing that the operation failed, we also /// have an error_code and optional user data that describes why it failed. /// /// It is used like the following. /// \code /// ErrorOr<Buffer> getBuffer(); /// /// auto buffer = getBuffer(); /// if (error_code ec = buffer.getError()) /// return ec; /// buffer->write("adena"); /// \endcode /// /// /// Implicit conversion to bool returns true if there is a usable value. The /// unary * and -> operators provide pointer like access to the value. Accessing /// the value when there is an error has undefined behavior. /// /// When T is a reference type the behaivor is slightly different. The reference /// is held in a std::reference_wrapper<std::remove_reference<T>::type>, and /// there is special handling to make operator -> work as if T was not a /// reference. /// /// T cannot be a rvalue reference. template<class T> class ErrorOr { template <class OtherT> friend class ErrorOr; static const bool isRef = std::is_reference<T>::value; typedef ReferenceStorage<typename std::remove_reference<T>::type> wrap; public: typedef typename std::conditional<isRef, wrap, T>::type storage_type; private: typedef typename std::remove_reference<T>::type &reference; typedef const typename std::remove_reference<T>::type &const_reference; typedef typename std::remove_reference<T>::type *pointer; public: template <class E> ErrorOr(E ErrorCode, typename std::enable_if<std::is_error_code_enum<E>::value || std::is_error_condition_enum<E>::value, void *>::type = 0) : HasError(true) { new (getErrorStorage()) std::error_code(make_error_code(ErrorCode)); } ErrorOr(std::error_code EC) : HasError(true) { new (getErrorStorage()) std::error_code(EC); } ErrorOr(T Val) : HasError(false) { new (getStorage()) storage_type(moveIfMoveConstructible<storage_type>(Val)); } ErrorOr(const ErrorOr &Other) { copyConstruct(Other); } template <class OtherT> ErrorOr( const ErrorOr<OtherT> &Other, typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * = nullptr) { copyConstruct(Other); } template <class OtherT> explicit ErrorOr( const ErrorOr<OtherT> &Other, typename std::enable_if< !std::is_convertible<OtherT, const T &>::value>::type * = nullptr) { copyConstruct(Other); } ErrorOr(ErrorOr &&Other) { moveConstruct(std::move(Other)); } template <class OtherT> ErrorOr( ErrorOr<OtherT> &&Other, typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * = nullptr) { moveConstruct(std::move(Other)); } // This might eventually need SFINAE but it's more complex than is_convertible // & I'm too lazy to write it right now. template <class OtherT> explicit ErrorOr( ErrorOr<OtherT> &&Other, typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * = nullptr) { moveConstruct(std::move(Other)); } ErrorOr &operator=(const ErrorOr &Other) { copyAssign(Other); return *this; } ErrorOr &operator=(ErrorOr &&Other) { moveAssign(std::move(Other)); return *this; } ~ErrorOr() { if (!HasError) getStorage()->~storage_type(); } /// \brief Return false if there is an error. explicit operator bool() const { return !HasError; } reference get() { return *getStorage(); } const_reference get() const { return const_cast<ErrorOr<T> *>(this)->get(); } std::error_code getError() const { return HasError ? *getErrorStorage() : std::error_code(); } pointer operator ->() { return toPointer(getStorage()); } reference operator *() { return *getStorage(); } private: template <class OtherT> void copyConstruct(const ErrorOr<OtherT> &Other) { if (!Other.HasError) { // Get the other value. HasError = false; new (getStorage()) storage_type(*Other.getStorage()); } else { // Get other's error. HasError = true; new (getErrorStorage()) std::error_code(Other.getError()); } } template <class T1> static bool compareThisIfSameType(const T1 &a, const T1 &b) { return &a == &b; } template <class T1, class T2> static bool compareThisIfSameType(const T1 &a, const T2 &b) { return false; } template <class OtherT> void copyAssign(const ErrorOr<OtherT> &Other) { if (compareThisIfSameType(*this, Other)) return; this->~ErrorOr(); new (this) ErrorOr(Other); } template <class OtherT> void moveConstruct(ErrorOr<OtherT> &&Other) { if (!Other.HasError) { // Get the other value. HasError = false; new (getStorage()) storage_type(std::move(*Other.getStorage())); } else { // Get other's error. HasError = true; new (getErrorStorage()) std::error_code(Other.getError()); } } template <class OtherT> void moveAssign(ErrorOr<OtherT> &&Other) { if (compareThisIfSameType(*this, Other)) return; this->~ErrorOr(); new (this) ErrorOr(std::move(Other)); } pointer toPointer(pointer Val) { return Val; } pointer toPointer(wrap *Val) { return &Val->get(); } storage_type *getStorage() { assert(!HasError && "Cannot get value when an error exists!"); return reinterpret_cast<storage_type*>(TStorage.buffer); } const storage_type *getStorage() const { assert(!HasError && "Cannot get value when an error exists!"); return reinterpret_cast<const storage_type*>(TStorage.buffer); } std::error_code *getErrorStorage() { assert(HasError && "Cannot get error when a value exists!"); return reinterpret_cast<std::error_code *>(ErrorStorage.buffer); } const std::error_code *getErrorStorage() const { return const_cast<ErrorOr<T> *>(this)->getErrorStorage(); } union { AlignedCharArrayUnion<storage_type> TStorage; AlignedCharArrayUnion<std::error_code> ErrorStorage; }; bool HasError : 1; }; template <class T, class E> typename std::enable_if<std::is_error_code_enum<E>::value || std::is_error_condition_enum<E>::value, bool>::type operator==(const ErrorOr<T> &Err, E Code) { return Err.getError() == Code; } } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/SourceMgr.h
//===- SourceMgr.h - Manager for Source Buffers & Diagnostics ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the SMDiagnostic and SourceMgr classes. This // provides a simple substrate for diagnostics, #include handling, and other low // level things for simple parsers. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_SOURCEMGR_H #define LLVM_SUPPORT_SOURCEMGR_H #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SMLoc.h" #include <string> namespace llvm { class SourceMgr; class SMDiagnostic; class SMFixIt; class Twine; class raw_ostream; /// This owns the files read by a parser, handles include stacks, /// and handles diagnostic wrangling. class SourceMgr { public: enum DiagKind { DK_Error, DK_Warning, DK_Note }; /// Clients that want to handle their own diagnostics in a custom way can /// register a function pointer+context as a diagnostic handler. /// It gets called each time PrintMessage is invoked. typedef void (*DiagHandlerTy)(const SMDiagnostic &, void *Context); private: struct SrcBuffer { /// The memory buffer for the file. std::unique_ptr<MemoryBuffer> Buffer; /// This is the location of the parent include, or null if at the top level. SMLoc IncludeLoc; SrcBuffer() {} SrcBuffer(SrcBuffer &&O) : Buffer(std::move(O.Buffer)), IncludeLoc(O.IncludeLoc) {} }; /// This is all of the buffers that we are reading from. std::vector<SrcBuffer> Buffers; // This is the list of directories we should search for include files in. std::vector<std::string> IncludeDirectories; /// This is a cache for line number queries, its implementation is really /// private to SourceMgr.cpp. mutable void *LineNoCache; DiagHandlerTy DiagHandler; void *DiagContext; bool isValidBufferID(unsigned i) const { return i && i <= Buffers.size(); } SourceMgr(const SourceMgr&) = delete; void operator=(const SourceMgr&) = delete; public: SourceMgr() : LineNoCache(nullptr), DiagHandler(nullptr), DiagContext(nullptr) {} ~SourceMgr(); void Reset(); // HLSL Change - add a Reset version to clean up explicitly void setIncludeDirs(const std::vector<std::string> &Dirs) { IncludeDirectories = Dirs; } /// Specify a diagnostic handler to be invoked every time PrintMessage is /// called. \p Ctx is passed into the handler when it is invoked. void setDiagHandler(DiagHandlerTy DH, void *Ctx = nullptr) { DiagHandler = DH; DiagContext = Ctx; } DiagHandlerTy getDiagHandler() const { return DiagHandler; } void *getDiagContext() const { return DiagContext; } const SrcBuffer &getBufferInfo(unsigned i) const { assert(isValidBufferID(i)); return Buffers[i - 1]; } const MemoryBuffer *getMemoryBuffer(unsigned i) const { assert(isValidBufferID(i)); return Buffers[i - 1].Buffer.get(); } unsigned getNumBuffers() const { return Buffers.size(); } unsigned getMainFileID() const { assert(getNumBuffers()); return 1; } SMLoc getParentIncludeLoc(unsigned i) const { assert(isValidBufferID(i)); return Buffers[i - 1].IncludeLoc; } /// Add a new source buffer to this source manager. This takes ownership of /// the memory buffer. unsigned AddNewSourceBuffer(std::unique_ptr<MemoryBuffer> F, SMLoc IncludeLoc) { SrcBuffer NB; NB.Buffer = std::move(F); NB.IncludeLoc = IncludeLoc; Buffers.push_back(std::move(NB)); return Buffers.size(); } /// Search for a file with the specified name in the current directory or in /// one of the IncludeDirs. /// /// If no file is found, this returns 0, otherwise it returns the buffer ID /// of the stacked file. The full path to the included file can be found in /// \p IncludedFile. unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc, std::string &IncludedFile); /// Return the ID of the buffer containing the specified location. /// /// 0 is returned if the buffer is not found. unsigned FindBufferContainingLoc(SMLoc Loc) const; /// Find the line number for the specified location in the specified file. /// This is not a fast method. unsigned FindLineNumber(SMLoc Loc, unsigned BufferID = 0) const { return getLineAndColumn(Loc, BufferID).first; } /// Find the line and column number for the specified location in the /// specified file. This is not a fast method. std::pair<unsigned, unsigned> getLineAndColumn(SMLoc Loc, unsigned BufferID = 0) const; /// Emit a message about the specified location with the specified string. /// /// \param ShowColors Display colored messages if output is a terminal and /// the default error handler is used. void PrintMessage(raw_ostream &OS, SMLoc Loc, DiagKind Kind, const Twine &Msg, ArrayRef<SMRange> Ranges = None, ArrayRef<SMFixIt> FixIts = None, bool ShowColors = true) const; /// Emits a diagnostic to llvm::errs(). void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg, ArrayRef<SMRange> Ranges = None, ArrayRef<SMFixIt> FixIts = None, bool ShowColors = true) const; /// Emits a manually-constructed diagnostic to the given output stream. /// /// \param ShowColors Display colored messages if output is a terminal and /// the default error handler is used. void PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic, bool ShowColors = true) const; /// Return an SMDiagnostic at the specified location with the specified /// string. /// /// \param Msg If non-null, the kind of message (e.g., "error") which is /// prefixed to the message. SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg, ArrayRef<SMRange> Ranges = None, ArrayRef<SMFixIt> FixIts = None) const; /// Prints the names of included files and the line of the file they were /// included from. A diagnostic handler can use this before printing its /// custom formatted message. /// /// \param IncludeLoc The location of the include. /// \param OS the raw_ostream to print on. void PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const; }; /// Represents a single fixit, a replacement of one range of text with another. class SMFixIt { SMRange Range; std::string Text; public: // FIXME: Twine.str() is not very efficient. SMFixIt(SMLoc Loc, const Twine &Insertion) : Range(Loc, Loc), Text(Insertion.str()) { assert(Loc.isValid()); } // FIXME: Twine.str() is not very efficient. SMFixIt(SMRange R, const Twine &Replacement) : Range(R), Text(Replacement.str()) { assert(R.isValid()); } StringRef getText() const { return Text; } SMRange getRange() const { return Range; } bool operator<(const SMFixIt &Other) const { if (Range.Start.getPointer() != Other.Range.Start.getPointer()) return Range.Start.getPointer() < Other.Range.Start.getPointer(); if (Range.End.getPointer() != Other.Range.End.getPointer()) return Range.End.getPointer() < Other.Range.End.getPointer(); return Text < Other.Text; } }; /// Instances of this class encapsulate one diagnostic report, allowing /// printing to a raw_ostream as a caret diagnostic. class SMDiagnostic { const SourceMgr *SM; SMLoc Loc; std::string Filename; int LineNo, ColumnNo; SourceMgr::DiagKind Kind; std::string Message, LineContents; std::vector<std::pair<unsigned, unsigned> > Ranges; SmallVector<SMFixIt, 4> FixIts; public: // Null diagnostic. SMDiagnostic() : SM(nullptr), LineNo(0), ColumnNo(0), Kind(SourceMgr::DK_Error) {} // Diagnostic with no location (e.g. file not found, command line arg error). SMDiagnostic(StringRef filename, SourceMgr::DiagKind Knd, StringRef Msg) : SM(nullptr), Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd), Message(Msg) {} // Diagnostic with a location. SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN, int Line, int Col, SourceMgr::DiagKind Kind, StringRef Msg, StringRef LineStr, ArrayRef<std::pair<unsigned,unsigned> > Ranges, ArrayRef<SMFixIt> FixIts = None); const SourceMgr *getSourceMgr() const { return SM; } SMLoc getLoc() const { return Loc; } StringRef getFilename() const { return Filename; } int getLineNo() const { return LineNo; } int getColumnNo() const { return ColumnNo; } SourceMgr::DiagKind getKind() const { return Kind; } StringRef getMessage() const { return Message; } StringRef getLineContents() const { return LineContents; } ArrayRef<std::pair<unsigned, unsigned> > getRanges() const { return Ranges; } void addFixIt(const SMFixIt &Hint) { FixIts.push_back(Hint); } ArrayRef<SMFixIt> getFixIts() const { return FixIts; } void print(const char *ProgName, raw_ostream &S, bool ShowColors = true, bool ShowKindLabel = true) const; }; } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/GraphWriter.h
//===-- llvm/Support/GraphWriter.h - Write graph to a .dot file -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a simple interface that can be used to print out generic // LLVM graphs to ".dot" files. "dot" is a tool that is part of the AT&T // graphviz package (http://www.research.att.com/sw/tools/graphviz/) which can // be used to turn the files output by this interface into a variety of // different graphics formats. // // Graphs do not need to implement any interface past what is already required // by the GraphTraits template, but they can choose to implement specializations // of the DOTGraphTraits template if they want to customize the graphs output in // any way. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_GRAPHWRITER_H #define LLVM_SUPPORT_GRAPHWRITER_H #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/DOTGraphTraits.h" #include "llvm/Support/raw_ostream.h" #include <vector> namespace llvm { namespace DOT { // Private functions... std::string EscapeString(const std::string &Label); /// \brief Get a color string for this node number. Simply round-robin selects /// from a reasonable number of colors. StringRef getColorString(unsigned NodeNumber); } namespace GraphProgram { enum Name { DOT, FDP, NEATO, TWOPI, CIRCO }; } bool DisplayGraph(StringRef Filename, bool wait = true, GraphProgram::Name program = GraphProgram::DOT); template<typename GraphType> class GraphWriter { raw_ostream &O; const GraphType &G; typedef DOTGraphTraits<GraphType> DOTTraits; typedef GraphTraits<GraphType> GTraits; typedef typename GTraits::NodeType NodeType; typedef typename GTraits::nodes_iterator node_iterator; typedef typename GTraits::ChildIteratorType child_iterator; DOTTraits DTraits; // Writes the edge labels of the node to O and returns true if there are any // edge labels not equal to the empty string "". bool getEdgeSourceLabels(raw_ostream &O, NodeType *Node) { child_iterator EI = GTraits::child_begin(Node); child_iterator EE = GTraits::child_end(Node); bool hasEdgeSourceLabels = false; for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i) { std::string label = DTraits.getEdgeSourceLabel(Node, EI); if (label.empty()) continue; hasEdgeSourceLabels = true; if (i) O << "|"; O << "<s" << i << ">" << DOT::EscapeString(label); } if (EI != EE && hasEdgeSourceLabels) O << "|<s64>truncated..."; return hasEdgeSourceLabels; } public: GraphWriter(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) { DTraits = DOTTraits(SN); } void writeGraph(const std::string &Title = "") { // Output the header for the graph... writeHeader(Title); // Emit all of the nodes in the graph... writeNodes(); // Output any customizations on the graph DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, *this); // Output the end of the graph writeFooter(); } void writeHeader(const std::string &Title) { std::string GraphName = DTraits.getGraphName(G); if (!Title.empty()) O << "digraph \"" << DOT::EscapeString(Title) << "\" {\n"; else if (!GraphName.empty()) O << "digraph \"" << DOT::EscapeString(GraphName) << "\" {\n"; else O << "digraph unnamed {\n"; if (DTraits.renderGraphFromBottomUp()) O << "\trankdir=\"BT\";\n"; if (!Title.empty()) O << "\tlabel=\"" << DOT::EscapeString(Title) << "\";\n"; else if (!GraphName.empty()) O << "\tlabel=\"" << DOT::EscapeString(GraphName) << "\";\n"; O << DTraits.getGraphProperties(G); O << "\n"; } void writeFooter() { // Finish off the graph O << "}\n"; } void writeNodes() { // Loop over the graph, printing it out... for (node_iterator I = GTraits::nodes_begin(G), E = GTraits::nodes_end(G); I != E; ++I) if (!isNodeHidden(*I)) writeNode(*I); } bool isNodeHidden(NodeType &Node) { return isNodeHidden(&Node); } bool isNodeHidden(NodeType *const *Node) { return isNodeHidden(*Node); } bool isNodeHidden(NodeType *Node) { return DTraits.isNodeHidden(Node); } void writeNode(NodeType& Node) { writeNode(&Node); } void writeNode(NodeType *const *Node) { writeNode(*Node); } void writeNode(NodeType *Node) { std::string NodeAttributes = DTraits.getNodeAttributes(Node, G); O << "\tNode" << static_cast<const void*>(Node) << " [shape=record,"; if (!NodeAttributes.empty()) O << NodeAttributes << ","; O << "label=\"{"; if (!DTraits.renderGraphFromBottomUp()) { O << DOT::EscapeString(DTraits.getNodeLabel(Node, G)); // If we should include the address of the node in the label, do so now. if (DTraits.hasNodeAddressLabel(Node, G)) O << "|" << static_cast<const void*>(Node); std::string NodeDesc = DTraits.getNodeDescription(Node, G); if (!NodeDesc.empty()) O << "|" << DOT::EscapeString(NodeDesc); } std::string edgeSourceLabels; raw_string_ostream EdgeSourceLabels(edgeSourceLabels); bool hasEdgeSourceLabels = getEdgeSourceLabels(EdgeSourceLabels, Node); if (hasEdgeSourceLabels) { if (!DTraits.renderGraphFromBottomUp()) O << "|"; O << "{" << EdgeSourceLabels.str() << "}"; if (DTraits.renderGraphFromBottomUp()) O << "|"; } if (DTraits.renderGraphFromBottomUp()) { O << DOT::EscapeString(DTraits.getNodeLabel(Node, G)); // If we should include the address of the node in the label, do so now. if (DTraits.hasNodeAddressLabel(Node, G)) O << "|" << static_cast<const void*>(Node); std::string NodeDesc = DTraits.getNodeDescription(Node, G); if (!NodeDesc.empty()) O << "|" << DOT::EscapeString(NodeDesc); } if (DTraits.hasEdgeDestLabels()) { O << "|{"; unsigned i = 0, e = DTraits.numEdgeDestLabels(Node); for (; i != e && i != 64; ++i) { if (i) O << "|"; O << "<d" << i << ">" << DOT::EscapeString(DTraits.getEdgeDestLabel(Node, i)); } if (i != e) O << "|<d64>truncated..."; O << "}"; } O << "}\"];\n"; // Finish printing the "node" line // Output all of the edges now child_iterator EI = GTraits::child_begin(Node); child_iterator EE = GTraits::child_end(Node); for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i) if (!DTraits.isNodeHidden(*EI)) writeEdge(Node, i, EI); for (; EI != EE; ++EI) if (!DTraits.isNodeHidden(*EI)) writeEdge(Node, 64, EI); } void writeEdge(NodeType *Node, unsigned edgeidx, child_iterator EI) { if (NodeType *TargetNode = *EI) { int DestPort = -1; if (DTraits.edgeTargetsEdgeSource(Node, EI)) { child_iterator TargetIt = DTraits.getEdgeTarget(Node, EI); // Figure out which edge this targets... unsigned Offset = (unsigned)std::distance(GTraits::child_begin(TargetNode), TargetIt); DestPort = static_cast<int>(Offset); } if (DTraits.getEdgeSourceLabel(Node, EI).empty()) edgeidx = -1; emitEdge(static_cast<const void*>(Node), edgeidx, static_cast<const void*>(TargetNode), DestPort, DTraits.getEdgeAttributes(Node, EI, G)); } } /// emitSimpleNode - Outputs a simple (non-record) node void emitSimpleNode(const void *ID, const std::string &Attr, const std::string &Label, unsigned NumEdgeSources = 0, const std::vector<std::string> *EdgeSourceLabels = nullptr) { O << "\tNode" << ID << "[ "; if (!Attr.empty()) O << Attr << ","; O << " label =\""; if (NumEdgeSources) O << "{"; O << DOT::EscapeString(Label); if (NumEdgeSources) { O << "|{"; for (unsigned i = 0; i != NumEdgeSources; ++i) { if (i) O << "|"; O << "<s" << i << ">"; if (EdgeSourceLabels) O << DOT::EscapeString((*EdgeSourceLabels)[i]); } O << "}}"; } O << "\"];\n"; } /// emitEdge - Output an edge from a simple node into the graph... void emitEdge(const void *SrcNodeID, int SrcNodePort, const void *DestNodeID, int DestNodePort, const std::string &Attrs) { if (SrcNodePort > 64) return; // Eminating from truncated part? if (DestNodePort > 64) DestNodePort = 64; // Targeting the truncated part? O << "\tNode" << SrcNodeID; if (SrcNodePort >= 0) O << ":s" << SrcNodePort; O << " -> Node" << DestNodeID; if (DestNodePort >= 0 && DTraits.hasEdgeDestLabels()) O << ":d" << DestNodePort; if (!Attrs.empty()) O << "[" << Attrs << "]"; O << ";\n"; } /// getOStream - Get the raw output stream into the graph file. Useful to /// write fancy things using addCustomGraphFeatures(). raw_ostream &getOStream() { return O; } }; template<typename GraphType> raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames = false, const Twine &Title = "") { // Start the graph emission process... GraphWriter<GraphType> W(O, G, ShortNames); // Emit the graph. W.writeGraph(Title.str()); return O; } std::string createGraphFilename(const Twine &Name, int &FD); template <typename GraphType> std::string WriteGraph(const GraphType &G, const Twine &Name, bool ShortNames = false, const Twine &Title = "") { int FD; // Windows can't always handle long paths, so limit the length of the name. std::string N = Name.str(); N = N.substr(0, std::min<std::size_t>(N.size(), 140)); std::string Filename = createGraphFilename(N, FD); raw_fd_ostream O(FD, /*shouldClose=*/ true); if (FD == -1) { errs() << "error opening file '" << Filename << "' for writing!\n"; return ""; } llvm::WriteGraph(O, G, ShortNames, Title); errs() << " done. \n"; return Filename; } /// ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file, /// then cleanup. For use from the debugger. /// template<typename GraphType> void ViewGraph(const GraphType &G, const Twine &Name, bool ShortNames = false, const Twine &Title = "", GraphProgram::Name Program = GraphProgram::DOT) { std::string Filename = llvm::WriteGraph(G, Name, ShortNames, Title); if (Filename.empty()) return; DisplayGraph(Filename, false, Program); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/COM.h
//===- llvm/Support/COM.h ---------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// Provides a library for accessing COM functionality of the Host OS. /// //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_COM_H #define LLVM_SUPPORT_COM_H namespace llvm { namespace sys { enum class COMThreadingMode { SingleThreaded, MultiThreaded }; class InitializeCOMRAII { public: explicit InitializeCOMRAII(COMThreadingMode Threading, bool SpeedOverMemory = false); ~InitializeCOMRAII(); private: InitializeCOMRAII(const InitializeCOMRAII &) = delete; void operator=(const InitializeCOMRAII &) = delete; }; } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/CodeGen.h
//===-- llvm/Support/CodeGen.h - CodeGen Concepts ---------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file define some types which define code generation concepts. For // example, relocation model. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_CODEGEN_H #define LLVM_SUPPORT_CODEGEN_H #include "llvm-c/TargetMachine.h" #include "llvm/Support/ErrorHandling.h" namespace llvm { // Relocation model types. namespace Reloc { enum Model { Default, Static, PIC_, DynamicNoPIC }; } // Code model types. namespace CodeModel { enum Model { Default, JITDefault, Small, Kernel, Medium, Large }; } namespace PICLevel { enum Level { Default=0, Small=1, Large=2 }; } // TLS models. namespace TLSModel { enum Model { GeneralDynamic, LocalDynamic, InitialExec, LocalExec }; } // Code generation optimization level. namespace CodeGenOpt { enum Level { None, // -O0 Less, // -O1 Default, // -O2, -Os Aggressive // -O3 }; } // Create wrappers for C Binding types (see CBindingWrapping.h). inline CodeModel::Model unwrap(LLVMCodeModel Model) { switch (Model) { case LLVMCodeModelDefault: return CodeModel::Default; case LLVMCodeModelJITDefault: return CodeModel::JITDefault; case LLVMCodeModelSmall: return CodeModel::Small; case LLVMCodeModelKernel: return CodeModel::Kernel; case LLVMCodeModelMedium: return CodeModel::Medium; case LLVMCodeModelLarge: return CodeModel::Large; } return CodeModel::Default; } inline LLVMCodeModel wrap(CodeModel::Model Model) { switch (Model) { case CodeModel::Default: return LLVMCodeModelDefault; case CodeModel::JITDefault: return LLVMCodeModelJITDefault; case CodeModel::Small: return LLVMCodeModelSmall; case CodeModel::Kernel: return LLVMCodeModelKernel; case CodeModel::Medium: return LLVMCodeModelMedium; case CodeModel::Large: return LLVMCodeModelLarge; } llvm_unreachable("Bad CodeModel!"); } } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Atomic.h
//===- llvm/Support/Atomic.h - Atomic Operations -----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the llvm::sys atomic operations. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ATOMIC_H #define LLVM_SUPPORT_ATOMIC_H #include "llvm/Support/DataTypes.h" namespace llvm { namespace sys { void MemoryFence(); #ifdef _MSC_VER typedef long cas_flag; #else typedef uint32_t cas_flag; #endif cas_flag CompareAndSwap(volatile cas_flag* ptr, cas_flag new_value, cas_flag old_value); cas_flag AtomicIncrement(volatile cas_flag* ptr); cas_flag AtomicDecrement(volatile cas_flag* ptr); cas_flag AtomicAdd(volatile cas_flag* ptr, cas_flag val); cas_flag AtomicMul(volatile cas_flag* ptr, cas_flag val); cas_flag AtomicDiv(volatile cas_flag* ptr, cas_flag val); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Program.h
//===- llvm/Support/Program.h ------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file declares the llvm::sys::Program class. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_PROGRAM_H #define LLVM_SUPPORT_PROGRAM_H #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/ErrorOr.h" #include <system_error> namespace llvm { class StringRef; namespace sys { /// This is the OS-specific separator for PATH like environment variables: // a colon on Unix or a semicolon on Windows. #if defined(LLVM_ON_UNIX) const char EnvPathSeparator = ':'; #elif defined (LLVM_ON_WIN32) const char EnvPathSeparator = ';'; #endif /// @brief This struct encapsulates information about a process. struct ProcessInfo { #if defined(LLVM_ON_UNIX) typedef pid_t ProcessId; #elif defined(LLVM_ON_WIN32) typedef unsigned long ProcessId; // Must match the type of DWORD on Windows. typedef void * HANDLE; // Must match the type of HANDLE on Windows. /// The handle to the process (available on Windows only). HANDLE ProcessHandle; #else #error "ProcessInfo is not defined for this platform!" #endif enum : ProcessId { InvalidPid = 0 }; /// The process identifier. ProcessId Pid; /// The return code, set after execution. int ReturnCode; ProcessInfo(); }; /// \brief Find the first executable file \p Name in \p Paths. /// /// This does not perform hashing as a shell would but instead stats each PATH /// entry individually so should generally be avoided. Core LLVM library /// functions and options should instead require fully specified paths. /// /// \param Name name of the executable to find. If it contains any system /// slashes, it will be returned as is. /// \param Paths optional list of paths to search for \p Name. If empty it /// will use the system PATH environment instead. /// /// \returns The fully qualified path to the first \p Name in \p Paths if it /// exists. \p Name if \p Name has slashes in it. Otherwise an error. ErrorOr<std::string> findProgramByName(StringRef Name, ArrayRef<StringRef> Paths = ArrayRef<StringRef>()); // These functions change the specified standard stream (stdin or stdout) to // binary mode. They return errc::success if the specified stream // was changed. Otherwise a platform dependent error is returned. std::error_code ChangeStdinToBinary(); std::error_code ChangeStdoutToBinary(); /// This function executes the program using the arguments provided. The /// invoked program will inherit the stdin, stdout, and stderr file /// descriptors, the environment and other configuration settings of the /// invoking program. /// This function waits for the program to finish, so should be avoided in /// library functions that aren't expected to block. Consider using /// ExecuteNoWait() instead. /// @returns an integer result code indicating the status of the program. /// A zero or positive value indicates the result code of the program. /// -1 indicates failure to execute /// -2 indicates a crash during execution or timeout int ExecuteAndWait( StringRef Program, ///< Path of the program to be executed. It is /// presumed this is the result of the findProgramByName method. const char **args, ///< A vector of strings that are passed to the ///< program. The first element should be the name of the program. ///< The list *must* be terminated by a null char* entry. const char **env = nullptr, ///< An optional vector of strings to use for ///< the program's environment. If not provided, the current program's ///< environment will be used. const StringRef **redirects = nullptr, ///< An optional array of pointers ///< to paths. If the array is null, no redirection is done. The array ///< should have a size of at least three. The inferior process's ///< stdin(0), stdout(1), and stderr(2) will be redirected to the ///< corresponding paths. ///< When an empty path is passed in, the corresponding file ///< descriptor will be disconnected (ie, /dev/null'd) in a portable ///< way. unsigned secondsToWait = 0, ///< If non-zero, this specifies the amount ///< of time to wait for the child process to exit. If the time ///< expires, the child is killed and this call returns. If zero, ///< this function will wait until the child finishes or forever if ///< it doesn't. unsigned memoryLimit = 0, ///< If non-zero, this specifies max. amount ///< of memory can be allocated by process. If memory usage will be ///< higher limit, the child is killed and this call returns. If zero ///< - no memory limit. std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a ///< string instance in which error messages will be returned. If the ///< string is non-empty upon return an error occurred while invoking the ///< program. bool *ExecutionFailed = nullptr); /// Similar to ExecuteAndWait, but returns immediately. /// @returns The \see ProcessInfo of the newly launced process. /// \note On Microsoft Windows systems, users will need to either call \see /// Wait until the process finished execution or win32 CloseHandle() API on /// ProcessInfo.ProcessHandle to avoid memory leaks. ProcessInfo ExecuteNoWait(StringRef Program, const char **args, const char **env = nullptr, const StringRef **redirects = nullptr, unsigned memoryLimit = 0, std::string *ErrMsg = nullptr, bool *ExecutionFailed = nullptr); /// Return true if the given arguments fit within system-specific /// argument length limits. bool argumentsFitWithinSystemLimits(ArrayRef<const char*> Args); /// File encoding options when writing contents that a non-UTF8 tool will /// read (on Windows systems). For UNIX, we always use UTF-8. enum WindowsEncodingMethod { /// UTF-8 is the LLVM native encoding, being the same as "do not perform /// encoding conversion". WEM_UTF8, WEM_CurrentCodePage, WEM_UTF16 }; /// Saves the UTF8-encoded \p contents string into the file \p FileName /// using a specific encoding. /// /// This write file function adds the possibility to choose which encoding /// to use when writing a text file. On Windows, this is important when /// writing files with internationalization support with an encoding that is /// different from the one used in LLVM (UTF-8). We use this when writing /// response files, since GCC tools on MinGW only understand legacy code /// pages, and VisualStudio tools only understand UTF-16. /// For UNIX, using different encodings is silently ignored, since all tools /// work well with UTF-8. /// This function assumes that you only use UTF-8 *text* data and will convert /// it to your desired encoding before writing to the file. /// /// FIXME: We use EM_CurrentCodePage to write response files for GNU tools in /// a MinGW/MinGW-w64 environment, which has serious flaws but currently is /// our best shot to make gcc/ld understand international characters. This /// should be changed as soon as binutils fix this to support UTF16 on mingw. /// /// \returns non-zero error_code if failed std::error_code writeFileWithEncoding(StringRef FileName, StringRef Contents, WindowsEncodingMethod Encoding = WEM_UTF8); /// This function waits for the process specified by \p PI to finish. /// \returns A \see ProcessInfo struct with Pid set to: /// \li The process id of the child process if the child process has changed /// state. /// \li 0 if the child process has not changed state. /// \note Users of this function should always check the ReturnCode member of /// the \see ProcessInfo returned from this function. ProcessInfo Wait( const ProcessInfo &PI, ///< The child process that should be waited on. unsigned SecondsToWait, ///< If non-zero, this specifies the amount of ///< time to wait for the child process to exit. If the time expires, the ///< child is killed and this function returns. If zero, this function ///< will perform a non-blocking wait on the child process. bool WaitUntilTerminates, ///< If true, ignores \p SecondsToWait and waits ///< until child has terminated. std::string *ErrMsg = nullptr ///< If non-zero, provides a pointer to a ///< string instance in which error messages will be returned. If the ///< string is non-empty upon return an error occurred while invoking the ///< program. ); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/SystemUtils.h
//===- SystemUtils.h - Utilities to do low-level system stuff ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains functions used to do a variety of low-level, often // system-specific, tasks. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_SYSTEMUTILS_H #define LLVM_SUPPORT_SYSTEMUTILS_H namespace llvm { class raw_ostream; /// Determine if the raw_ostream provided is connected to a terminal. If so, /// generate a warning message to errs() advising against display of bitcode /// and return true. Otherwise just return false. /// @brief Check for output written to a console bool CheckBitcodeOutputToConsole( raw_ostream &stream_to_check, ///< The stream to be checked bool print_warning = true ///< Control whether warnings are printed ); } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Regex.h
//===-- Regex.h - Regular Expression matcher implementation -*- C++ -*-----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a POSIX regular expression matcher. Both Basic and // Extended POSIX regular expressions (ERE) are supported. EREs were extended // to support backreferences in matches. // This implementation also supports matching strings with embedded NUL chars. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_REGEX_H #define LLVM_SUPPORT_REGEX_H #include <string> struct llvm_regex; namespace llvm { class StringRef; template<typename T> class SmallVectorImpl; class Regex { public: enum { NoFlags=0, /// Compile for matching that ignores upper/lower case distinctions. IgnoreCase=1, /// Compile for newline-sensitive matching. With this flag '[^' bracket /// expressions and '.' never match newline. A ^ anchor matches the /// null string after any newline in the string in addition to its normal /// function, and the $ anchor matches the null string before any /// newline in the string in addition to its normal function. Newline=2, /// By default, the POSIX extended regular expression (ERE) syntax is /// assumed. Pass this flag to turn on basic regular expressions (BRE) /// instead. BasicRegex=4 }; /// Compiles the given regular expression \p Regex. Regex(StringRef Regex, unsigned Flags = NoFlags); Regex(const Regex &) = delete; Regex &operator=(Regex regex) { std::swap(preg, regex.preg); std::swap(error, regex.error); return *this; } Regex(Regex &&regex) { preg = regex.preg; error = regex.error; regex.preg = nullptr; } ~Regex(); /// isValid - returns the error encountered during regex compilation, or /// matching, if any. bool isValid(std::string &Error); /// getNumMatches - In a valid regex, return the number of parenthesized /// matches it contains. The number filled in by match will include this /// many entries plus one for the whole regex (as element 0). unsigned getNumMatches() const; /// matches - Match the regex against a given \p String. /// /// \param Matches - If given, on a successful match this will be filled in /// with references to the matched group expressions (inside \p String), /// the first group is always the entire pattern. /// /// This returns true on a successful match. bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = nullptr); /// sub - Return the result of replacing the first match of the regex in /// \p String with the \p Repl string. Backreferences like "\0" in the /// replacement string are replaced with the appropriate match substring. /// /// Note that the replacement string has backslash escaping performed on /// it. Invalid backreferences are ignored (replaced by empty strings). /// /// \param Error If non-null, any errors in the substitution (invalid /// backreferences, trailing backslashes) will be recorded as a non-empty /// string. std::string sub(StringRef Repl, StringRef String, std::string *Error = nullptr); /// \brief If this function returns true, ^Str$ is an extended regular /// expression that matches Str and only Str. static bool isLiteralERE(StringRef Str); /// \brief Turn String into a regex by escaping its special characters. static std::string escape(StringRef String); private: struct llvm_regex *preg; int error; }; } #endif // LLVM_SUPPORT_REGEX_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/StringSaver.h
//===- llvm/Support/StringSaver.h -------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_STRINGSAVER_H #define LLVM_SUPPORT_STRINGSAVER_H #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/Allocator.h" namespace llvm { /// \brief Saves strings in the inheritor's stable storage and returns a stable /// raw character pointer. class StringSaver { protected: ~StringSaver() {} virtual const char *saveImpl(StringRef S); public: StringSaver(BumpPtrAllocator &Alloc) : Alloc(Alloc) {} const char *save(const char *S) { return save(StringRef(S)); } const char *save(StringRef S) { return saveImpl(S); } const char *save(const Twine &S) { return save(StringRef(S.str())); } const char *save(std::string &S) { return save(StringRef(S)); } private: BumpPtrAllocator &Alloc; }; class BumpPtrStringSaver final : public StringSaver { public: BumpPtrStringSaver(BumpPtrAllocator &Alloc) : StringSaver(Alloc) {} }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/Valgrind.h
//===- llvm/Support/Valgrind.h - Communication with Valgrind -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Methods for communicating with a valgrind instance this program is running // under. These are all no-ops unless LLVM was configured on a system with the // valgrind headers installed and valgrind is controlling this process. // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_VALGRIND_H #define LLVM_SUPPORT_VALGRIND_H #include "llvm/Config/llvm-config.h" #include "llvm/Support/Compiler.h" #include <stddef.h> #if LLVM_ENABLE_THREADS != 0 && !defined(NDEBUG) // tsan (Thread Sanitizer) is a valgrind-based tool that detects these exact // functions by name. extern "C" { void AnnotateHappensAfter(const char *file, int line, const volatile void *cv); void AnnotateHappensBefore(const char *file, int line, const volatile void *cv); void AnnotateIgnoreWritesBegin(const char *file, int line); void AnnotateIgnoreWritesEnd(const char *file, int line); } #endif namespace llvm { namespace sys { // True if Valgrind is controlling this process. bool RunningOnValgrind(); // Discard valgrind's translation of code in the range [Addr .. Addr + Len). // Otherwise valgrind may continue to execute the old version of the code. void ValgrindDiscardTranslations(const void *Addr, size_t Len); #if LLVM_ENABLE_THREADS != 0 && !defined(NDEBUG) // Thread Sanitizer is a valgrind tool that finds races in code. // See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations . // This marker is used to define a happens-before arc. The race detector will // infer an arc from the begin to the end when they share the same pointer // argument. #define TsanHappensBefore(cv) \ AnnotateHappensBefore(__FILE__, __LINE__, cv) // This marker defines the destination of a happens-before arc. #define TsanHappensAfter(cv) \ AnnotateHappensAfter(__FILE__, __LINE__, cv) // Ignore any races on writes between here and the next TsanIgnoreWritesEnd. #define TsanIgnoreWritesBegin() \ AnnotateIgnoreWritesBegin(__FILE__, __LINE__) // Resume checking for racy writes. #define TsanIgnoreWritesEnd() \ AnnotateIgnoreWritesEnd(__FILE__, __LINE__) #else #define TsanHappensBefore(cv) #define TsanHappensAfter(cv) #define TsanIgnoreWritesBegin() #define TsanIgnoreWritesEnd() #endif } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/Support/ARMBuildAttributes.h
//===-- ARMBuildAttributes.h - ARM Build Attributes -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains enumerations and support routines for ARM build attributes // as defined in ARM ABI addenda document (ABI release 2.08). // // ELF for the ARM Architecture r2.09 - November 30, 2012 // // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044e/IHI0044E_aaelf.pdf // //===----------------------------------------------------------------------===// #ifndef LLVM_SUPPORT_ARMBUILDATTRIBUTES_H #define LLVM_SUPPORT_ARMBUILDATTRIBUTES_H namespace llvm { class StringRef; namespace ARMBuildAttrs { enum SpecialAttr { // This is for the .cpu asm attr. It translates into one or more // AttrType (below) entries in the .ARM.attributes section in the ELF. SEL_CPU }; enum AttrType { // Rest correspond to ELF/.ARM.attributes File = 1, CPU_raw_name = 4, CPU_name = 5, CPU_arch = 6, CPU_arch_profile = 7, ARM_ISA_use = 8, THUMB_ISA_use = 9, FP_arch = 10, WMMX_arch = 11, Advanced_SIMD_arch = 12, PCS_config = 13, ABI_PCS_R9_use = 14, ABI_PCS_RW_data = 15, ABI_PCS_RO_data = 16, ABI_PCS_GOT_use = 17, ABI_PCS_wchar_t = 18, ABI_FP_rounding = 19, ABI_FP_denormal = 20, ABI_FP_exceptions = 21, ABI_FP_user_exceptions = 22, ABI_FP_number_model = 23, ABI_align_needed = 24, ABI_align_preserved = 25, ABI_enum_size = 26, ABI_HardFP_use = 27, ABI_VFP_args = 28, ABI_WMMX_args = 29, ABI_optimization_goals = 30, ABI_FP_optimization_goals = 31, compatibility = 32, CPU_unaligned_access = 34, FP_HP_extension = 36, ABI_FP_16bit_format = 38, MPextension_use = 42, // recoded from 70 (ABI r2.08) DIV_use = 44, also_compatible_with = 65, conformance = 67, Virtualization_use = 68, /// Legacy Tags Section = 2, // deprecated (ABI r2.09) Symbol = 3, // deprecated (ABI r2.09) ABI_align8_needed = 24, // renamed to ABI_align_needed (ABI r2.09) ABI_align8_preserved = 25, // renamed to ABI_align_preserved (ABI r2.09) nodefaults = 64, // deprecated (ABI r2.09) T2EE_use = 66, // deprecated (ABI r2.09) MPextension_use_old = 70 // recoded to MPextension_use (ABI r2.08) }; StringRef AttrTypeAsString(unsigned Attr, bool HasTagPrefix = true); StringRef AttrTypeAsString(AttrType Attr, bool HasTagPrefix = true); int AttrTypeFromString(StringRef Tag); // Magic numbers for .ARM.attributes enum AttrMagic { Format_Version = 0x41 }; // Legal Values for CPU_arch, (=6), uleb128 enum CPUArch { Pre_v4 = 0, v4 = 1, // e.g. SA110 v4T = 2, // e.g. ARM7TDMI v5T = 3, // e.g. ARM9TDMI v5TE = 4, // e.g. ARM946E_S v5TEJ = 5, // e.g. ARM926EJ_S v6 = 6, // e.g. ARM1136J_S v6KZ = 7, // e.g. ARM1176JZ_S v6T2 = 8, // e.g. ARM1156T2_S v6K = 9, // e.g. ARM1176JZ_S v7 = 10, // e.g. Cortex A8, Cortex M3 v6_M = 11, // e.g. Cortex M1 v6S_M = 12, // v6_M with the System extensions v7E_M = 13, // v7_M with DSP extensions v8 = 14, // v8,v8.1a AArch32 }; enum CPUArchProfile { // (=7), uleb128 Not_Applicable = 0, // pre v7, or cross-profile code ApplicationProfile = (0x41), // 'A' (e.g. for Cortex A8) RealTimeProfile = (0x52), // 'R' (e.g. for Cortex R4) MicroControllerProfile = (0x4D), // 'M' (e.g. for Cortex M3) SystemProfile = (0x53) // 'S' Application or real-time profile }; // The following have a lot of common use cases enum { Not_Allowed = 0, Allowed = 1, // Tag_ARM_ISA_use (=8), uleb128 // Tag_THUMB_ISA_use, (=9), uleb128 AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions) // Tag_FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10) AllowFPv2 = 2, // v2 FP ISA permitted (implies use of the v1 FP ISA) AllowFPv3A = 3, // v3 FP ISA permitted (implies use of the v2 FP ISA) AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31 AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA) AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31 AllowFPARMv8A = 7, // Use of the ARM v8-A FP ISA was permitted AllowFPARMv8B = 8, // Use of the ARM v8-A FP ISA was permitted, but only // D0-D15, S0-S31 // Tag_WMMX_arch, (=11), uleb128 AllowWMMXv1 = 1, // The user permitted this entity to use WMMX v1 AllowWMMXv2 = 2, // The user permitted this entity to use WMMX v2 // Tag_Advanced_SIMD_arch, (=12), uleb128 AllowNeon = 1, // SIMDv1 was permitted AllowNeon2 = 2, // SIMDv2 was permitted (Half-precision FP, MAC operations) AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted AllowNeonARMv8_1a = 4,// ARM v8.1-A SIMD was permitted (RDMA) // Tag_ABI_PCS_R9_use, (=14), uleb128 R9IsGPR = 0, // R9 used as v6 (just another callee-saved register) R9IsSB = 1, // R9 used as a global static base rgister R9IsTLSPointer = 2, // R9 used as a thread local storage pointer R9Reserved = 3, // R9 not used by code associated with attributed entity // Tag_ABI_PCS_RW_data, (=15), uleb128 AddressRWPCRel = 1, // Address RW static data PC-relative AddressRWSBRel = 2, // Address RW static data SB-relative AddressRWNone = 3, // No RW static data permitted // Tag_ABI_PCS_RO_data, (=14), uleb128 AddressROPCRel = 1, // Address RO static data PC-relative AddressRONone = 2, // No RO static data permitted // Tag_ABI_PCS_GOT_use, (=17), uleb128 AddressDirect = 1, // Address imported data directly AddressGOT = 2, // Address imported data indirectly (via GOT) // Tag_ABI_PCS_wchar_t, (=18), uleb128 WCharProhibited = 0, // wchar_t is not used WCharWidth2Bytes = 2, // sizeof(wchar_t) == 2 WCharWidth4Bytes = 4, // sizeof(wchar_t) == 4 // Tag_ABI_FP_denormal, (=20), uleb128 PositiveZero = 0, IEEEDenormals = 1, PreserveFPSign = 2, // sign when flushed-to-zero is preserved // Tag_ABI_FP_number_model, (=23), uleb128 AllowRTABI = 2, // numbers, infinities, and one quiet NaN (see [RTABI]) AllowIEE754 = 3, // this code to use all the IEEE 754-defined FP encodings // Tag_ABI_enum_size, (=26), uleb128 EnumProhibited = 0, // The user prohibited the use of enums when building // this entity. EnumSmallest = 1, // Enum is smallest container big enough to hold all // values. Enum32Bit = 2, // Enum is at least 32 bits. Enum32BitABI = 3, // Every enumeration visible across an ABI-complying // interface contains a value needing 32 bits to encode // it; other enums can be containerized. // Tag_ABI_HardFP_use, (=27), uleb128 HardFPImplied = 0, // FP use should be implied by Tag_FP_arch HardFPSinglePrecision = 1, // Single-precision only // Tag_ABI_VFP_args, (=28), uleb128 BaseAAPCS = 0, HardFPAAPCS = 1, // Tag_FP_HP_extension, (=36), uleb128 AllowHPFP = 1, // Allow use of Half Precision FP // Tag_FP_16bit_format, (=38), uleb128 FP16FormatIEEE = 1, // Tag_MPextension_use, (=42), uleb128 AllowMP = 1, // Allow use of MP extensions // Tag_DIV_use, (=44), uleb128 // Note: AllowDIVExt must be emitted if and only if the permission to use // hardware divide cannot be conveyed using AllowDIVIfExists or DisallowDIV AllowDIVIfExists = 0, // Allow hardware divide if available in arch, or no // info exists. DisallowDIV = 1, // Hardware divide explicitly disallowed. AllowDIVExt = 2, // Allow hardware divide as optional architecture // extension above the base arch specified by // Tag_CPU_arch and Tag_CPU_arch_profile. // Tag_Virtualization_use, (=68), uleb128 AllowTZ = 1, AllowVirtualization = 2, AllowTZVirtualization = 3 }; } // namespace ARMBuildAttrs } // namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/Hexagon.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif // Release 5 ABI ELF_RELOC(R_HEX_NONE, 0) ELF_RELOC(R_HEX_B22_PCREL, 1) ELF_RELOC(R_HEX_B15_PCREL, 2) ELF_RELOC(R_HEX_B7_PCREL, 3) ELF_RELOC(R_HEX_LO16, 4) ELF_RELOC(R_HEX_HI16, 5) ELF_RELOC(R_HEX_32, 6) ELF_RELOC(R_HEX_16, 7) ELF_RELOC(R_HEX_8, 8) ELF_RELOC(R_HEX_GPREL16_0, 9) ELF_RELOC(R_HEX_GPREL16_1, 10) ELF_RELOC(R_HEX_GPREL16_2, 11) ELF_RELOC(R_HEX_GPREL16_3, 12) ELF_RELOC(R_HEX_HL16, 13) ELF_RELOC(R_HEX_B13_PCREL, 14) ELF_RELOC(R_HEX_B9_PCREL, 15) ELF_RELOC(R_HEX_B32_PCREL_X, 16) ELF_RELOC(R_HEX_32_6_X, 17) ELF_RELOC(R_HEX_B22_PCREL_X, 18) ELF_RELOC(R_HEX_B15_PCREL_X, 19) ELF_RELOC(R_HEX_B13_PCREL_X, 20) ELF_RELOC(R_HEX_B9_PCREL_X, 21) ELF_RELOC(R_HEX_B7_PCREL_X, 22) ELF_RELOC(R_HEX_16_X, 23) ELF_RELOC(R_HEX_12_X, 24) ELF_RELOC(R_HEX_11_X, 25) ELF_RELOC(R_HEX_10_X, 26) ELF_RELOC(R_HEX_9_X, 27) ELF_RELOC(R_HEX_8_X, 28) ELF_RELOC(R_HEX_7_X, 29) ELF_RELOC(R_HEX_6_X, 30) ELF_RELOC(R_HEX_32_PCREL, 31) ELF_RELOC(R_HEX_COPY, 32) ELF_RELOC(R_HEX_GLOB_DAT, 33) ELF_RELOC(R_HEX_JMP_SLOT, 34) ELF_RELOC(R_HEX_RELATIVE, 35) ELF_RELOC(R_HEX_PLT_B22_PCREL, 36) ELF_RELOC(R_HEX_GOTREL_LO16, 37) ELF_RELOC(R_HEX_GOTREL_HI16, 38) ELF_RELOC(R_HEX_GOTREL_32, 39) ELF_RELOC(R_HEX_GOT_LO16, 40) ELF_RELOC(R_HEX_GOT_HI16, 41) ELF_RELOC(R_HEX_GOT_32, 42) ELF_RELOC(R_HEX_GOT_16, 43) ELF_RELOC(R_HEX_DTPMOD_32, 44) ELF_RELOC(R_HEX_DTPREL_LO16, 45) ELF_RELOC(R_HEX_DTPREL_HI16, 46) ELF_RELOC(R_HEX_DTPREL_32, 47) ELF_RELOC(R_HEX_DTPREL_16, 48) ELF_RELOC(R_HEX_GD_PLT_B22_PCREL, 49) ELF_RELOC(R_HEX_GD_GOT_LO16, 50) ELF_RELOC(R_HEX_GD_GOT_HI16, 51) ELF_RELOC(R_HEX_GD_GOT_32, 52) ELF_RELOC(R_HEX_GD_GOT_16, 53) ELF_RELOC(R_HEX_IE_LO16, 54) ELF_RELOC(R_HEX_IE_HI16, 55) ELF_RELOC(R_HEX_IE_32, 56) ELF_RELOC(R_HEX_IE_GOT_LO16, 57) ELF_RELOC(R_HEX_IE_GOT_HI16, 58) ELF_RELOC(R_HEX_IE_GOT_32, 59) ELF_RELOC(R_HEX_IE_GOT_16, 60) ELF_RELOC(R_HEX_TPREL_LO16, 61) ELF_RELOC(R_HEX_TPREL_HI16, 62) ELF_RELOC(R_HEX_TPREL_32, 63) ELF_RELOC(R_HEX_TPREL_16, 64) ELF_RELOC(R_HEX_6_PCREL_X, 65) ELF_RELOC(R_HEX_GOTREL_32_6_X, 66) ELF_RELOC(R_HEX_GOTREL_16_X, 67) ELF_RELOC(R_HEX_GOTREL_11_X, 68) ELF_RELOC(R_HEX_GOT_32_6_X, 69) ELF_RELOC(R_HEX_GOT_16_X, 70) ELF_RELOC(R_HEX_GOT_11_X, 71) ELF_RELOC(R_HEX_DTPREL_32_6_X, 72) ELF_RELOC(R_HEX_DTPREL_16_X, 73) ELF_RELOC(R_HEX_DTPREL_11_X, 74) ELF_RELOC(R_HEX_GD_GOT_32_6_X, 75) ELF_RELOC(R_HEX_GD_GOT_16_X, 76) ELF_RELOC(R_HEX_GD_GOT_11_X, 77) ELF_RELOC(R_HEX_IE_32_6_X, 78) ELF_RELOC(R_HEX_IE_16_X, 79) ELF_RELOC(R_HEX_IE_GOT_32_6_X, 80) ELF_RELOC(R_HEX_IE_GOT_16_X, 81) ELF_RELOC(R_HEX_IE_GOT_11_X, 82) ELF_RELOC(R_HEX_TPREL_32_6_X, 83) ELF_RELOC(R_HEX_TPREL_16_X, 84) ELF_RELOC(R_HEX_TPREL_11_X, 85) ELF_RELOC(R_HEX_LD_PLT_B22_PCREL, 86) ELF_RELOC(R_HEX_LD_GOT_LO16, 87) ELF_RELOC(R_HEX_LD_GOT_HI16, 88) ELF_RELOC(R_HEX_LD_GOT_32, 89) ELF_RELOC(R_HEX_LD_GOT_16, 90) ELF_RELOC(R_HEX_LD_GOT_32_6_X, 91) ELF_RELOC(R_HEX_LD_GOT_16_X, 92) ELF_RELOC(R_HEX_LD_GOT_11_X, 93)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/Mips.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif ELF_RELOC(R_MIPS_NONE, 0) ELF_RELOC(R_MIPS_16, 1) ELF_RELOC(R_MIPS_32, 2) ELF_RELOC(R_MIPS_REL32, 3) ELF_RELOC(R_MIPS_26, 4) ELF_RELOC(R_MIPS_HI16, 5) ELF_RELOC(R_MIPS_LO16, 6) ELF_RELOC(R_MIPS_GPREL16, 7) ELF_RELOC(R_MIPS_LITERAL, 8) ELF_RELOC(R_MIPS_GOT16, 9) ELF_RELOC(R_MIPS_PC16, 10) ELF_RELOC(R_MIPS_CALL16, 11) ELF_RELOC(R_MIPS_GPREL32, 12) ELF_RELOC(R_MIPS_UNUSED1, 13) ELF_RELOC(R_MIPS_UNUSED2, 14) ELF_RELOC(R_MIPS_UNUSED3, 15) ELF_RELOC(R_MIPS_SHIFT5, 16) ELF_RELOC(R_MIPS_SHIFT6, 17) ELF_RELOC(R_MIPS_64, 18) ELF_RELOC(R_MIPS_GOT_DISP, 19) ELF_RELOC(R_MIPS_GOT_PAGE, 20) ELF_RELOC(R_MIPS_GOT_OFST, 21) ELF_RELOC(R_MIPS_GOT_HI16, 22) ELF_RELOC(R_MIPS_GOT_LO16, 23) ELF_RELOC(R_MIPS_SUB, 24) ELF_RELOC(R_MIPS_INSERT_A, 25) ELF_RELOC(R_MIPS_INSERT_B, 26) ELF_RELOC(R_MIPS_DELETE, 27) ELF_RELOC(R_MIPS_HIGHER, 28) ELF_RELOC(R_MIPS_HIGHEST, 29) ELF_RELOC(R_MIPS_CALL_HI16, 30) ELF_RELOC(R_MIPS_CALL_LO16, 31) ELF_RELOC(R_MIPS_SCN_DISP, 32) ELF_RELOC(R_MIPS_REL16, 33) ELF_RELOC(R_MIPS_ADD_IMMEDIATE, 34) ELF_RELOC(R_MIPS_PJUMP, 35) ELF_RELOC(R_MIPS_RELGOT, 36) ELF_RELOC(R_MIPS_JALR, 37) ELF_RELOC(R_MIPS_TLS_DTPMOD32, 38) ELF_RELOC(R_MIPS_TLS_DTPREL32, 39) ELF_RELOC(R_MIPS_TLS_DTPMOD64, 40) ELF_RELOC(R_MIPS_TLS_DTPREL64, 41) ELF_RELOC(R_MIPS_TLS_GD, 42) ELF_RELOC(R_MIPS_TLS_LDM, 43) ELF_RELOC(R_MIPS_TLS_DTPREL_HI16, 44) ELF_RELOC(R_MIPS_TLS_DTPREL_LO16, 45) ELF_RELOC(R_MIPS_TLS_GOTTPREL, 46) ELF_RELOC(R_MIPS_TLS_TPREL32, 47) ELF_RELOC(R_MIPS_TLS_TPREL64, 48) ELF_RELOC(R_MIPS_TLS_TPREL_HI16, 49) ELF_RELOC(R_MIPS_TLS_TPREL_LO16, 50) ELF_RELOC(R_MIPS_GLOB_DAT, 51) ELF_RELOC(R_MIPS_PC21_S2, 60) ELF_RELOC(R_MIPS_PC26_S2, 61) ELF_RELOC(R_MIPS_PC18_S3, 62) ELF_RELOC(R_MIPS_PC19_S2, 63) ELF_RELOC(R_MIPS_PCHI16, 64) ELF_RELOC(R_MIPS_PCLO16, 65) ELF_RELOC(R_MIPS16_26, 100) ELF_RELOC(R_MIPS16_GPREL, 101) ELF_RELOC(R_MIPS16_GOT16, 102) ELF_RELOC(R_MIPS16_CALL16, 103) ELF_RELOC(R_MIPS16_HI16, 104) ELF_RELOC(R_MIPS16_LO16, 105) ELF_RELOC(R_MIPS16_TLS_GD, 106) ELF_RELOC(R_MIPS16_TLS_LDM, 107) ELF_RELOC(R_MIPS16_TLS_DTPREL_HI16, 108) ELF_RELOC(R_MIPS16_TLS_DTPREL_LO16, 109) ELF_RELOC(R_MIPS16_TLS_GOTTPREL, 110) ELF_RELOC(R_MIPS16_TLS_TPREL_HI16, 111) ELF_RELOC(R_MIPS16_TLS_TPREL_LO16, 112) ELF_RELOC(R_MIPS_COPY, 126) ELF_RELOC(R_MIPS_JUMP_SLOT, 127) ELF_RELOC(R_MICROMIPS_26_S1, 133) ELF_RELOC(R_MICROMIPS_HI16, 134) ELF_RELOC(R_MICROMIPS_LO16, 135) ELF_RELOC(R_MICROMIPS_GPREL16, 136) ELF_RELOC(R_MICROMIPS_LITERAL, 137) ELF_RELOC(R_MICROMIPS_GOT16, 138) ELF_RELOC(R_MICROMIPS_PC7_S1, 139) ELF_RELOC(R_MICROMIPS_PC10_S1, 140) ELF_RELOC(R_MICROMIPS_PC16_S1, 141) ELF_RELOC(R_MICROMIPS_CALL16, 142) ELF_RELOC(R_MICROMIPS_GOT_DISP, 145) ELF_RELOC(R_MICROMIPS_GOT_PAGE, 146) ELF_RELOC(R_MICROMIPS_GOT_OFST, 147) ELF_RELOC(R_MICROMIPS_GOT_HI16, 148) ELF_RELOC(R_MICROMIPS_GOT_LO16, 149) ELF_RELOC(R_MICROMIPS_SUB, 150) ELF_RELOC(R_MICROMIPS_HIGHER, 151) ELF_RELOC(R_MICROMIPS_HIGHEST, 152) ELF_RELOC(R_MICROMIPS_CALL_HI16, 153) ELF_RELOC(R_MICROMIPS_CALL_LO16, 154) ELF_RELOC(R_MICROMIPS_SCN_DISP, 155) ELF_RELOC(R_MICROMIPS_JALR, 156) ELF_RELOC(R_MICROMIPS_HI0_LO16, 157) ELF_RELOC(R_MICROMIPS_TLS_GD, 162) ELF_RELOC(R_MICROMIPS_TLS_LDM, 163) ELF_RELOC(R_MICROMIPS_TLS_DTPREL_HI16, 164) ELF_RELOC(R_MICROMIPS_TLS_DTPREL_LO16, 165) ELF_RELOC(R_MICROMIPS_TLS_GOTTPREL, 166) ELF_RELOC(R_MICROMIPS_TLS_TPREL_HI16, 169) ELF_RELOC(R_MICROMIPS_TLS_TPREL_LO16, 170) ELF_RELOC(R_MICROMIPS_GPREL7_S2, 172) ELF_RELOC(R_MICROMIPS_PC23_S2, 173) ELF_RELOC(R_MICROMIPS_PC21_S2, 174) ELF_RELOC(R_MICROMIPS_PC26_S2, 175) ELF_RELOC(R_MICROMIPS_PC18_S3, 176) ELF_RELOC(R_MICROMIPS_PC19_S2, 177) ELF_RELOC(R_MIPS_NUM, 218) ELF_RELOC(R_MIPS_PC32, 248) ELF_RELOC(R_MIPS_EH, 249)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/x86_64.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif ELF_RELOC(R_X86_64_NONE, 0) ELF_RELOC(R_X86_64_64, 1) ELF_RELOC(R_X86_64_PC32, 2) ELF_RELOC(R_X86_64_GOT32, 3) ELF_RELOC(R_X86_64_PLT32, 4) ELF_RELOC(R_X86_64_COPY, 5) ELF_RELOC(R_X86_64_GLOB_DAT, 6) ELF_RELOC(R_X86_64_JUMP_SLOT, 7) ELF_RELOC(R_X86_64_RELATIVE, 8) ELF_RELOC(R_X86_64_GOTPCREL, 9) ELF_RELOC(R_X86_64_32, 10) ELF_RELOC(R_X86_64_32S, 11) ELF_RELOC(R_X86_64_16, 12) ELF_RELOC(R_X86_64_PC16, 13) ELF_RELOC(R_X86_64_8, 14) ELF_RELOC(R_X86_64_PC8, 15) ELF_RELOC(R_X86_64_DTPMOD64, 16) ELF_RELOC(R_X86_64_DTPOFF64, 17) ELF_RELOC(R_X86_64_TPOFF64, 18) ELF_RELOC(R_X86_64_TLSGD, 19) ELF_RELOC(R_X86_64_TLSLD, 20) ELF_RELOC(R_X86_64_DTPOFF32, 21) ELF_RELOC(R_X86_64_GOTTPOFF, 22) ELF_RELOC(R_X86_64_TPOFF32, 23) ELF_RELOC(R_X86_64_PC64, 24) ELF_RELOC(R_X86_64_GOTOFF64, 25) ELF_RELOC(R_X86_64_GOTPC32, 26) ELF_RELOC(R_X86_64_GOT64, 27) ELF_RELOC(R_X86_64_GOTPCREL64, 28) ELF_RELOC(R_X86_64_GOTPC64, 29) ELF_RELOC(R_X86_64_GOTPLT64, 30) ELF_RELOC(R_X86_64_PLTOFF64, 31) ELF_RELOC(R_X86_64_SIZE32, 32) ELF_RELOC(R_X86_64_SIZE64, 33) ELF_RELOC(R_X86_64_GOTPC32_TLSDESC, 34) ELF_RELOC(R_X86_64_TLSDESC_CALL, 35) ELF_RELOC(R_X86_64_TLSDESC, 36) ELF_RELOC(R_X86_64_IRELATIVE, 37)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/Sparc.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif ELF_RELOC(R_SPARC_NONE, 0) ELF_RELOC(R_SPARC_8, 1) ELF_RELOC(R_SPARC_16, 2) ELF_RELOC(R_SPARC_32, 3) ELF_RELOC(R_SPARC_DISP8, 4) ELF_RELOC(R_SPARC_DISP16, 5) ELF_RELOC(R_SPARC_DISP32, 6) ELF_RELOC(R_SPARC_WDISP30, 7) ELF_RELOC(R_SPARC_WDISP22, 8) ELF_RELOC(R_SPARC_HI22, 9) ELF_RELOC(R_SPARC_22, 10) ELF_RELOC(R_SPARC_13, 11) ELF_RELOC(R_SPARC_LO10, 12) ELF_RELOC(R_SPARC_GOT10, 13) ELF_RELOC(R_SPARC_GOT13, 14) ELF_RELOC(R_SPARC_GOT22, 15) ELF_RELOC(R_SPARC_PC10, 16) ELF_RELOC(R_SPARC_PC22, 17) ELF_RELOC(R_SPARC_WPLT30, 18) ELF_RELOC(R_SPARC_COPY, 19) ELF_RELOC(R_SPARC_GLOB_DAT, 20) ELF_RELOC(R_SPARC_JMP_SLOT, 21) ELF_RELOC(R_SPARC_RELATIVE, 22) ELF_RELOC(R_SPARC_UA32, 23) ELF_RELOC(R_SPARC_PLT32, 24) ELF_RELOC(R_SPARC_HIPLT22, 25) ELF_RELOC(R_SPARC_LOPLT10, 26) ELF_RELOC(R_SPARC_PCPLT32, 27) ELF_RELOC(R_SPARC_PCPLT22, 28) ELF_RELOC(R_SPARC_PCPLT10, 29) ELF_RELOC(R_SPARC_10, 30) ELF_RELOC(R_SPARC_11, 31) ELF_RELOC(R_SPARC_64, 32) ELF_RELOC(R_SPARC_OLO10, 33) ELF_RELOC(R_SPARC_HH22, 34) ELF_RELOC(R_SPARC_HM10, 35) ELF_RELOC(R_SPARC_LM22, 36) ELF_RELOC(R_SPARC_PC_HH22, 37) ELF_RELOC(R_SPARC_PC_HM10, 38) ELF_RELOC(R_SPARC_PC_LM22, 39) ELF_RELOC(R_SPARC_WDISP16, 40) ELF_RELOC(R_SPARC_WDISP19, 41) ELF_RELOC(R_SPARC_7, 43) ELF_RELOC(R_SPARC_5, 44) ELF_RELOC(R_SPARC_6, 45) ELF_RELOC(R_SPARC_DISP64, 46) ELF_RELOC(R_SPARC_PLT64, 47) ELF_RELOC(R_SPARC_HIX22, 48) ELF_RELOC(R_SPARC_LOX10, 49) ELF_RELOC(R_SPARC_H44, 50) ELF_RELOC(R_SPARC_M44, 51) ELF_RELOC(R_SPARC_L44, 52) ELF_RELOC(R_SPARC_REGISTER, 53) ELF_RELOC(R_SPARC_UA64, 54) ELF_RELOC(R_SPARC_UA16, 55) ELF_RELOC(R_SPARC_TLS_GD_HI22, 56) ELF_RELOC(R_SPARC_TLS_GD_LO10, 57) ELF_RELOC(R_SPARC_TLS_GD_ADD, 58) ELF_RELOC(R_SPARC_TLS_GD_CALL, 59) ELF_RELOC(R_SPARC_TLS_LDM_HI22, 60) ELF_RELOC(R_SPARC_TLS_LDM_LO10, 61) ELF_RELOC(R_SPARC_TLS_LDM_ADD, 62) ELF_RELOC(R_SPARC_TLS_LDM_CALL, 63) ELF_RELOC(R_SPARC_TLS_LDO_HIX22, 64) ELF_RELOC(R_SPARC_TLS_LDO_LOX10, 65) ELF_RELOC(R_SPARC_TLS_LDO_ADD, 66) ELF_RELOC(R_SPARC_TLS_IE_HI22, 67) ELF_RELOC(R_SPARC_TLS_IE_LO10, 68) ELF_RELOC(R_SPARC_TLS_IE_LD, 69) ELF_RELOC(R_SPARC_TLS_IE_LDX, 70) ELF_RELOC(R_SPARC_TLS_IE_ADD, 71) ELF_RELOC(R_SPARC_TLS_LE_HIX22, 72) ELF_RELOC(R_SPARC_TLS_LE_LOX10, 73) ELF_RELOC(R_SPARC_TLS_DTPMOD32, 74) ELF_RELOC(R_SPARC_TLS_DTPMOD64, 75) ELF_RELOC(R_SPARC_TLS_DTPOFF32, 76) ELF_RELOC(R_SPARC_TLS_DTPOFF64, 77) ELF_RELOC(R_SPARC_TLS_TPOFF32, 78) ELF_RELOC(R_SPARC_TLS_TPOFF64, 79) ELF_RELOC(R_SPARC_GOTDATA_HIX22, 80) ELF_RELOC(R_SPARC_GOTDATA_LOX10, 81) ELF_RELOC(R_SPARC_GOTDATA_OP_HIX22, 82) ELF_RELOC(R_SPARC_GOTDATA_OP_LOX10, 83) ELF_RELOC(R_SPARC_GOTDATA_OP, 84)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/i386.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif // TODO: this is just a subset ELF_RELOC(R_386_NONE, 0) ELF_RELOC(R_386_32, 1) ELF_RELOC(R_386_PC32, 2) ELF_RELOC(R_386_GOT32, 3) ELF_RELOC(R_386_PLT32, 4) ELF_RELOC(R_386_COPY, 5) ELF_RELOC(R_386_GLOB_DAT, 6) ELF_RELOC(R_386_JUMP_SLOT, 7) ELF_RELOC(R_386_RELATIVE, 8) ELF_RELOC(R_386_GOTOFF, 9) ELF_RELOC(R_386_GOTPC, 10) ELF_RELOC(R_386_32PLT, 11) ELF_RELOC(R_386_TLS_TPOFF, 14) ELF_RELOC(R_386_TLS_IE, 15) ELF_RELOC(R_386_TLS_GOTIE, 16) ELF_RELOC(R_386_TLS_LE, 17) ELF_RELOC(R_386_TLS_GD, 18) ELF_RELOC(R_386_TLS_LDM, 19) ELF_RELOC(R_386_16, 20) ELF_RELOC(R_386_PC16, 21) ELF_RELOC(R_386_8, 22) ELF_RELOC(R_386_PC8, 23) ELF_RELOC(R_386_TLS_GD_32, 24) ELF_RELOC(R_386_TLS_GD_PUSH, 25) ELF_RELOC(R_386_TLS_GD_CALL, 26) ELF_RELOC(R_386_TLS_GD_POP, 27) ELF_RELOC(R_386_TLS_LDM_32, 28) ELF_RELOC(R_386_TLS_LDM_PUSH, 29) ELF_RELOC(R_386_TLS_LDM_CALL, 30) ELF_RELOC(R_386_TLS_LDM_POP, 31) ELF_RELOC(R_386_TLS_LDO_32, 32) ELF_RELOC(R_386_TLS_IE_32, 33) ELF_RELOC(R_386_TLS_LE_32, 34) ELF_RELOC(R_386_TLS_DTPMOD32, 35) ELF_RELOC(R_386_TLS_DTPOFF32, 36) ELF_RELOC(R_386_TLS_TPOFF32, 37) ELF_RELOC(R_386_TLS_GOTDESC, 39) ELF_RELOC(R_386_TLS_DESC_CALL, 40) ELF_RELOC(R_386_TLS_DESC, 41) ELF_RELOC(R_386_IRELATIVE, 42) ELF_RELOC(R_386_NUM, 43)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/AArch64.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif // ABI release 1.0 ELF_RELOC(R_AARCH64_NONE, 0) ELF_RELOC(R_AARCH64_ABS64, 0x101) ELF_RELOC(R_AARCH64_ABS32, 0x102) ELF_RELOC(R_AARCH64_ABS16, 0x103) ELF_RELOC(R_AARCH64_PREL64, 0x104) ELF_RELOC(R_AARCH64_PREL32, 0x105) ELF_RELOC(R_AARCH64_PREL16, 0x106) ELF_RELOC(R_AARCH64_MOVW_UABS_G0, 0x107) ELF_RELOC(R_AARCH64_MOVW_UABS_G0_NC, 0x108) ELF_RELOC(R_AARCH64_MOVW_UABS_G1, 0x109) ELF_RELOC(R_AARCH64_MOVW_UABS_G1_NC, 0x10a) ELF_RELOC(R_AARCH64_MOVW_UABS_G2, 0x10b) ELF_RELOC(R_AARCH64_MOVW_UABS_G2_NC, 0x10c) ELF_RELOC(R_AARCH64_MOVW_UABS_G3, 0x10d) ELF_RELOC(R_AARCH64_MOVW_SABS_G0, 0x10e) ELF_RELOC(R_AARCH64_MOVW_SABS_G1, 0x10f) ELF_RELOC(R_AARCH64_MOVW_SABS_G2, 0x110) ELF_RELOC(R_AARCH64_LD_PREL_LO19, 0x111) ELF_RELOC(R_AARCH64_ADR_PREL_LO21, 0x112) ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21, 0x113) ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21_NC, 0x114) ELF_RELOC(R_AARCH64_ADD_ABS_LO12_NC, 0x115) ELF_RELOC(R_AARCH64_LDST8_ABS_LO12_NC, 0x116) ELF_RELOC(R_AARCH64_TSTBR14, 0x117) ELF_RELOC(R_AARCH64_CONDBR19, 0x118) ELF_RELOC(R_AARCH64_JUMP26, 0x11a) ELF_RELOC(R_AARCH64_CALL26, 0x11b) ELF_RELOC(R_AARCH64_LDST16_ABS_LO12_NC, 0x11c) ELF_RELOC(R_AARCH64_LDST32_ABS_LO12_NC, 0x11d) ELF_RELOC(R_AARCH64_LDST64_ABS_LO12_NC, 0x11e) ELF_RELOC(R_AARCH64_MOVW_PREL_G0, 0x11f) ELF_RELOC(R_AARCH64_MOVW_PREL_G0_NC, 0x120) ELF_RELOC(R_AARCH64_MOVW_PREL_G1, 0x121) ELF_RELOC(R_AARCH64_MOVW_PREL_G1_NC, 0x122) ELF_RELOC(R_AARCH64_MOVW_PREL_G2, 0x123) ELF_RELOC(R_AARCH64_MOVW_PREL_G2_NC, 0x124) ELF_RELOC(R_AARCH64_MOVW_PREL_G3, 0x125) ELF_RELOC(R_AARCH64_LDST128_ABS_LO12_NC, 0x12b) ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0, 0x12c) ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0_NC, 0x12d) ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1, 0x12e) ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1_NC, 0x12f) ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2, 0x130) ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2_NC, 0x131) ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G3, 0x132) ELF_RELOC(R_AARCH64_GOTREL64, 0x133) ELF_RELOC(R_AARCH64_GOTREL32, 0x134) ELF_RELOC(R_AARCH64_GOT_LD_PREL19, 0x135) ELF_RELOC(R_AARCH64_LD64_GOTOFF_LO15, 0x136) ELF_RELOC(R_AARCH64_ADR_GOT_PAGE, 0x137) ELF_RELOC(R_AARCH64_LD64_GOT_LO12_NC, 0x138) ELF_RELOC(R_AARCH64_LD64_GOTPAGE_LO15, 0x139) ELF_RELOC(R_AARCH64_TLSGD_ADR_PREL21, 0x200) ELF_RELOC(R_AARCH64_TLSGD_ADR_PAGE21, 0x201) ELF_RELOC(R_AARCH64_TLSGD_ADD_LO12_NC, 0x202) ELF_RELOC(R_AARCH64_TLSGD_MOVW_G1, 0x203) ELF_RELOC(R_AARCH64_TLSGD_MOVW_G0_NC, 0x204) ELF_RELOC(R_AARCH64_TLSLD_ADR_PREL21, 0x205) ELF_RELOC(R_AARCH64_TLSLD_ADR_PAGE21, 0x206) ELF_RELOC(R_AARCH64_TLSLD_ADD_LO12_NC, 0x207) ELF_RELOC(R_AARCH64_TLSLD_MOVW_G1, 0x208) ELF_RELOC(R_AARCH64_TLSLD_MOVW_G0_NC, 0x209) ELF_RELOC(R_AARCH64_TLSLD_LD_PREL19, 0x20a) ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G2, 0x20b) ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1, 0x20c) ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC, 0x20d) ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0, 0x20e) ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC, 0x20f) ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_HI12, 0x210) ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12, 0x211) ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC, 0x212) ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12, 0x213) ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC, 0x214) ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12, 0x215) ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC, 0x216) ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12, 0x217) ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC, 0x218) ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12, 0x219) ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC, 0x21a) ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, 0x21b) ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, 0x21c) ELF_RELOC(R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, 0x21d) ELF_RELOC(R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, 0x21e) ELF_RELOC(R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, 0x21f) ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G2, 0x220) ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1, 0x221) ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, 0x222) ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0, 0x223) ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, 0x224) ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_HI12, 0x225) ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12, 0x226) ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, 0x227) ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12, 0x228) ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC, 0x229) ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12, 0x22a) ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC, 0x22b) ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12, 0x22c) ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC, 0x22d) ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12, 0x22e) ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, 0x22f) ELF_RELOC(R_AARCH64_TLSDESC_LD_PREL19, 0x230) ELF_RELOC(R_AARCH64_TLSDESC_ADR_PREL21, 0x231) ELF_RELOC(R_AARCH64_TLSDESC_ADR_PAGE21, 0x232) ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12_NC, 0x233) ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12_NC, 0x234) ELF_RELOC(R_AARCH64_TLSDESC_OFF_G1, 0x235) ELF_RELOC(R_AARCH64_TLSDESC_OFF_G0_NC, 0x236) ELF_RELOC(R_AARCH64_TLSDESC_LDR, 0x237) ELF_RELOC(R_AARCH64_TLSDESC_ADD, 0x238) ELF_RELOC(R_AARCH64_TLSDESC_CALL, 0x239) ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12, 0x23a) ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC, 0x23b) ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12, 0x23c) ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC, 0x23d) ELF_RELOC(R_AARCH64_COPY, 0x400) ELF_RELOC(R_AARCH64_GLOB_DAT, 0x401) ELF_RELOC(R_AARCH64_JUMP_SLOT, 0x402) ELF_RELOC(R_AARCH64_RELATIVE, 0x403) ELF_RELOC(R_AARCH64_TLS_DTPREL64, 0x404) ELF_RELOC(R_AARCH64_TLS_DTPMOD64, 0x405) ELF_RELOC(R_AARCH64_TLS_TPREL64, 0x406) ELF_RELOC(R_AARCH64_TLSDESC, 0x407) ELF_RELOC(R_AARCH64_IRELATIVE, 0x408)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/ARM.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif // Meets 2.09 ABI Specs. ELF_RELOC(R_ARM_NONE, 0x00) ELF_RELOC(R_ARM_PC24, 0x01) ELF_RELOC(R_ARM_ABS32, 0x02) ELF_RELOC(R_ARM_REL32, 0x03) ELF_RELOC(R_ARM_LDR_PC_G0, 0x04) ELF_RELOC(R_ARM_ABS16, 0x05) ELF_RELOC(R_ARM_ABS12, 0x06) ELF_RELOC(R_ARM_THM_ABS5, 0x07) ELF_RELOC(R_ARM_ABS8, 0x08) ELF_RELOC(R_ARM_SBREL32, 0x09) ELF_RELOC(R_ARM_THM_CALL, 0x0a) ELF_RELOC(R_ARM_THM_PC8, 0x0b) ELF_RELOC(R_ARM_BREL_ADJ, 0x0c) ELF_RELOC(R_ARM_TLS_DESC, 0x0d) ELF_RELOC(R_ARM_THM_SWI8, 0x0e) ELF_RELOC(R_ARM_XPC25, 0x0f) ELF_RELOC(R_ARM_THM_XPC22, 0x10) ELF_RELOC(R_ARM_TLS_DTPMOD32, 0x11) ELF_RELOC(R_ARM_TLS_DTPOFF32, 0x12) ELF_RELOC(R_ARM_TLS_TPOFF32, 0x13) ELF_RELOC(R_ARM_COPY, 0x14) ELF_RELOC(R_ARM_GLOB_DAT, 0x15) ELF_RELOC(R_ARM_JUMP_SLOT, 0x16) ELF_RELOC(R_ARM_RELATIVE, 0x17) ELF_RELOC(R_ARM_GOTOFF32, 0x18) ELF_RELOC(R_ARM_BASE_PREL, 0x19) ELF_RELOC(R_ARM_GOT_BREL, 0x1a) ELF_RELOC(R_ARM_PLT32, 0x1b) ELF_RELOC(R_ARM_CALL, 0x1c) ELF_RELOC(R_ARM_JUMP24, 0x1d) ELF_RELOC(R_ARM_THM_JUMP24, 0x1e) ELF_RELOC(R_ARM_BASE_ABS, 0x1f) ELF_RELOC(R_ARM_ALU_PCREL_7_0, 0x20) ELF_RELOC(R_ARM_ALU_PCREL_15_8, 0x21) ELF_RELOC(R_ARM_ALU_PCREL_23_15, 0x22) ELF_RELOC(R_ARM_LDR_SBREL_11_0_NC, 0x23) ELF_RELOC(R_ARM_ALU_SBREL_19_12_NC, 0x24) ELF_RELOC(R_ARM_ALU_SBREL_27_20_CK, 0x25) ELF_RELOC(R_ARM_TARGET1, 0x26) ELF_RELOC(R_ARM_SBREL31, 0x27) ELF_RELOC(R_ARM_V4BX, 0x28) ELF_RELOC(R_ARM_TARGET2, 0x29) ELF_RELOC(R_ARM_PREL31, 0x2a) ELF_RELOC(R_ARM_MOVW_ABS_NC, 0x2b) ELF_RELOC(R_ARM_MOVT_ABS, 0x2c) ELF_RELOC(R_ARM_MOVW_PREL_NC, 0x2d) ELF_RELOC(R_ARM_MOVT_PREL, 0x2e) ELF_RELOC(R_ARM_THM_MOVW_ABS_NC, 0x2f) ELF_RELOC(R_ARM_THM_MOVT_ABS, 0x30) ELF_RELOC(R_ARM_THM_MOVW_PREL_NC, 0x31) ELF_RELOC(R_ARM_THM_MOVT_PREL, 0x32) ELF_RELOC(R_ARM_THM_JUMP19, 0x33) ELF_RELOC(R_ARM_THM_JUMP6, 0x34) ELF_RELOC(R_ARM_THM_ALU_PREL_11_0, 0x35) ELF_RELOC(R_ARM_THM_PC12, 0x36) ELF_RELOC(R_ARM_ABS32_NOI, 0x37) ELF_RELOC(R_ARM_REL32_NOI, 0x38) ELF_RELOC(R_ARM_ALU_PC_G0_NC, 0x39) ELF_RELOC(R_ARM_ALU_PC_G0, 0x3a) ELF_RELOC(R_ARM_ALU_PC_G1_NC, 0x3b) ELF_RELOC(R_ARM_ALU_PC_G1, 0x3c) ELF_RELOC(R_ARM_ALU_PC_G2, 0x3d) ELF_RELOC(R_ARM_LDR_PC_G1, 0x3e) ELF_RELOC(R_ARM_LDR_PC_G2, 0x3f) ELF_RELOC(R_ARM_LDRS_PC_G0, 0x40) ELF_RELOC(R_ARM_LDRS_PC_G1, 0x41) ELF_RELOC(R_ARM_LDRS_PC_G2, 0x42) ELF_RELOC(R_ARM_LDC_PC_G0, 0x43) ELF_RELOC(R_ARM_LDC_PC_G1, 0x44) ELF_RELOC(R_ARM_LDC_PC_G2, 0x45) ELF_RELOC(R_ARM_ALU_SB_G0_NC, 0x46) ELF_RELOC(R_ARM_ALU_SB_G0, 0x47) ELF_RELOC(R_ARM_ALU_SB_G1_NC, 0x48) ELF_RELOC(R_ARM_ALU_SB_G1, 0x49) ELF_RELOC(R_ARM_ALU_SB_G2, 0x4a) ELF_RELOC(R_ARM_LDR_SB_G0, 0x4b) ELF_RELOC(R_ARM_LDR_SB_G1, 0x4c) ELF_RELOC(R_ARM_LDR_SB_G2, 0x4d) ELF_RELOC(R_ARM_LDRS_SB_G0, 0x4e) ELF_RELOC(R_ARM_LDRS_SB_G1, 0x4f) ELF_RELOC(R_ARM_LDRS_SB_G2, 0x50) ELF_RELOC(R_ARM_LDC_SB_G0, 0x51) ELF_RELOC(R_ARM_LDC_SB_G1, 0x52) ELF_RELOC(R_ARM_LDC_SB_G2, 0x53) ELF_RELOC(R_ARM_MOVW_BREL_NC, 0x54) ELF_RELOC(R_ARM_MOVT_BREL, 0x55) ELF_RELOC(R_ARM_MOVW_BREL, 0x56) ELF_RELOC(R_ARM_THM_MOVW_BREL_NC, 0x57) ELF_RELOC(R_ARM_THM_MOVT_BREL, 0x58) ELF_RELOC(R_ARM_THM_MOVW_BREL, 0x59) ELF_RELOC(R_ARM_TLS_GOTDESC, 0x5a) ELF_RELOC(R_ARM_TLS_CALL, 0x5b) ELF_RELOC(R_ARM_TLS_DESCSEQ, 0x5c) ELF_RELOC(R_ARM_THM_TLS_CALL, 0x5d) ELF_RELOC(R_ARM_PLT32_ABS, 0x5e) ELF_RELOC(R_ARM_GOT_ABS, 0x5f) ELF_RELOC(R_ARM_GOT_PREL, 0x60) ELF_RELOC(R_ARM_GOT_BREL12, 0x61) ELF_RELOC(R_ARM_GOTOFF12, 0x62) ELF_RELOC(R_ARM_GOTRELAX, 0x63) ELF_RELOC(R_ARM_GNU_VTENTRY, 0x64) ELF_RELOC(R_ARM_GNU_VTINHERIT, 0x65) ELF_RELOC(R_ARM_THM_JUMP11, 0x66) ELF_RELOC(R_ARM_THM_JUMP8, 0x67) ELF_RELOC(R_ARM_TLS_GD32, 0x68) ELF_RELOC(R_ARM_TLS_LDM32, 0x69) ELF_RELOC(R_ARM_TLS_LDO32, 0x6a) ELF_RELOC(R_ARM_TLS_IE32, 0x6b) ELF_RELOC(R_ARM_TLS_LE32, 0x6c) ELF_RELOC(R_ARM_TLS_LDO12, 0x6d) ELF_RELOC(R_ARM_TLS_LE12, 0x6e) ELF_RELOC(R_ARM_TLS_IE12GP, 0x6f) ELF_RELOC(R_ARM_PRIVATE_0, 0x70) ELF_RELOC(R_ARM_PRIVATE_1, 0x71) ELF_RELOC(R_ARM_PRIVATE_2, 0x72) ELF_RELOC(R_ARM_PRIVATE_3, 0x73) ELF_RELOC(R_ARM_PRIVATE_4, 0x74) ELF_RELOC(R_ARM_PRIVATE_5, 0x75) ELF_RELOC(R_ARM_PRIVATE_6, 0x76) ELF_RELOC(R_ARM_PRIVATE_7, 0x77) ELF_RELOC(R_ARM_PRIVATE_8, 0x78) ELF_RELOC(R_ARM_PRIVATE_9, 0x79) ELF_RELOC(R_ARM_PRIVATE_10, 0x7a) ELF_RELOC(R_ARM_PRIVATE_11, 0x7b) ELF_RELOC(R_ARM_PRIVATE_12, 0x7c) ELF_RELOC(R_ARM_PRIVATE_13, 0x7d) ELF_RELOC(R_ARM_PRIVATE_14, 0x7e) ELF_RELOC(R_ARM_PRIVATE_15, 0x7f) ELF_RELOC(R_ARM_ME_TOO, 0x80) ELF_RELOC(R_ARM_THM_TLS_DESCSEQ16, 0x81) ELF_RELOC(R_ARM_THM_TLS_DESCSEQ32, 0x82) ELF_RELOC(R_ARM_IRELATIVE, 0xa0)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/PowerPC64.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif ELF_RELOC(R_PPC64_NONE, 0) ELF_RELOC(R_PPC64_ADDR32, 1) ELF_RELOC(R_PPC64_ADDR24, 2) ELF_RELOC(R_PPC64_ADDR16, 3) ELF_RELOC(R_PPC64_ADDR16_LO, 4) ELF_RELOC(R_PPC64_ADDR16_HI, 5) ELF_RELOC(R_PPC64_ADDR16_HA, 6) ELF_RELOC(R_PPC64_ADDR14, 7) ELF_RELOC(R_PPC64_ADDR14_BRTAKEN, 8) ELF_RELOC(R_PPC64_ADDR14_BRNTAKEN, 9) ELF_RELOC(R_PPC64_REL24, 10) ELF_RELOC(R_PPC64_REL14, 11) ELF_RELOC(R_PPC64_REL14_BRTAKEN, 12) ELF_RELOC(R_PPC64_REL14_BRNTAKEN, 13) ELF_RELOC(R_PPC64_GOT16, 14) ELF_RELOC(R_PPC64_GOT16_LO, 15) ELF_RELOC(R_PPC64_GOT16_HI, 16) ELF_RELOC(R_PPC64_GOT16_HA, 17) ELF_RELOC(R_PPC64_JMP_SLOT, 21) ELF_RELOC(R_PPC64_REL32, 26) ELF_RELOC(R_PPC64_ADDR64, 38) ELF_RELOC(R_PPC64_ADDR16_HIGHER, 39) ELF_RELOC(R_PPC64_ADDR16_HIGHERA, 40) ELF_RELOC(R_PPC64_ADDR16_HIGHEST, 41) ELF_RELOC(R_PPC64_ADDR16_HIGHESTA, 42) ELF_RELOC(R_PPC64_REL64, 44) ELF_RELOC(R_PPC64_TOC16, 47) ELF_RELOC(R_PPC64_TOC16_LO, 48) ELF_RELOC(R_PPC64_TOC16_HI, 49) ELF_RELOC(R_PPC64_TOC16_HA, 50) ELF_RELOC(R_PPC64_TOC, 51) ELF_RELOC(R_PPC64_ADDR16_DS, 56) ELF_RELOC(R_PPC64_ADDR16_LO_DS, 57) ELF_RELOC(R_PPC64_GOT16_DS, 58) ELF_RELOC(R_PPC64_GOT16_LO_DS, 59) ELF_RELOC(R_PPC64_TOC16_DS, 63) ELF_RELOC(R_PPC64_TOC16_LO_DS, 64) ELF_RELOC(R_PPC64_TLS, 67) ELF_RELOC(R_PPC64_DTPMOD64, 68) ELF_RELOC(R_PPC64_TPREL16, 69) ELF_RELOC(R_PPC64_TPREL16_LO, 70) ELF_RELOC(R_PPC64_TPREL16_HI, 71) ELF_RELOC(R_PPC64_TPREL16_HA, 72) ELF_RELOC(R_PPC64_TPREL64, 73) ELF_RELOC(R_PPC64_DTPREL16, 74) ELF_RELOC(R_PPC64_DTPREL16_LO, 75) ELF_RELOC(R_PPC64_DTPREL16_HI, 76) ELF_RELOC(R_PPC64_DTPREL16_HA, 77) ELF_RELOC(R_PPC64_DTPREL64, 78) ELF_RELOC(R_PPC64_GOT_TLSGD16, 79) ELF_RELOC(R_PPC64_GOT_TLSGD16_LO, 80) ELF_RELOC(R_PPC64_GOT_TLSGD16_HI, 81) ELF_RELOC(R_PPC64_GOT_TLSGD16_HA, 82) ELF_RELOC(R_PPC64_GOT_TLSLD16, 83) ELF_RELOC(R_PPC64_GOT_TLSLD16_LO, 84) ELF_RELOC(R_PPC64_GOT_TLSLD16_HI, 85) ELF_RELOC(R_PPC64_GOT_TLSLD16_HA, 86) ELF_RELOC(R_PPC64_GOT_TPREL16_DS, 87) ELF_RELOC(R_PPC64_GOT_TPREL16_LO_DS, 88) ELF_RELOC(R_PPC64_GOT_TPREL16_HI, 89) ELF_RELOC(R_PPC64_GOT_TPREL16_HA, 90) ELF_RELOC(R_PPC64_GOT_DTPREL16_DS, 91) ELF_RELOC(R_PPC64_GOT_DTPREL16_LO_DS, 92) ELF_RELOC(R_PPC64_GOT_DTPREL16_HI, 93) ELF_RELOC(R_PPC64_GOT_DTPREL16_HA, 94) ELF_RELOC(R_PPC64_TPREL16_DS, 95) ELF_RELOC(R_PPC64_TPREL16_LO_DS, 96) ELF_RELOC(R_PPC64_TPREL16_HIGHER, 97) ELF_RELOC(R_PPC64_TPREL16_HIGHERA, 98) ELF_RELOC(R_PPC64_TPREL16_HIGHEST, 99) ELF_RELOC(R_PPC64_TPREL16_HIGHESTA, 100) ELF_RELOC(R_PPC64_DTPREL16_DS, 101) ELF_RELOC(R_PPC64_DTPREL16_LO_DS, 102) ELF_RELOC(R_PPC64_DTPREL16_HIGHER, 103) ELF_RELOC(R_PPC64_DTPREL16_HIGHERA, 104) ELF_RELOC(R_PPC64_DTPREL16_HIGHEST, 105) ELF_RELOC(R_PPC64_DTPREL16_HIGHESTA, 106) ELF_RELOC(R_PPC64_TLSGD, 107) ELF_RELOC(R_PPC64_TLSLD, 108) ELF_RELOC(R_PPC64_REL16, 249) ELF_RELOC(R_PPC64_REL16_LO, 250) ELF_RELOC(R_PPC64_REL16_HI, 251) ELF_RELOC(R_PPC64_REL16_HA, 252)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/PowerPC.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif ELF_RELOC(R_PPC_NONE, 0) /* No relocation. */ ELF_RELOC(R_PPC_ADDR32, 1) ELF_RELOC(R_PPC_ADDR24, 2) ELF_RELOC(R_PPC_ADDR16, 3) ELF_RELOC(R_PPC_ADDR16_LO, 4) ELF_RELOC(R_PPC_ADDR16_HI, 5) ELF_RELOC(R_PPC_ADDR16_HA, 6) ELF_RELOC(R_PPC_ADDR14, 7) ELF_RELOC(R_PPC_ADDR14_BRTAKEN, 8) ELF_RELOC(R_PPC_ADDR14_BRNTAKEN, 9) ELF_RELOC(R_PPC_REL24, 10) ELF_RELOC(R_PPC_REL14, 11) ELF_RELOC(R_PPC_REL14_BRTAKEN, 12) ELF_RELOC(R_PPC_REL14_BRNTAKEN, 13) ELF_RELOC(R_PPC_GOT16, 14) ELF_RELOC(R_PPC_GOT16_LO, 15) ELF_RELOC(R_PPC_GOT16_HI, 16) ELF_RELOC(R_PPC_GOT16_HA, 17) ELF_RELOC(R_PPC_PLTREL24, 18) ELF_RELOC(R_PPC_JMP_SLOT, 21) ELF_RELOC(R_PPC_LOCAL24PC, 23) ELF_RELOC(R_PPC_REL32, 26) ELF_RELOC(R_PPC_TLS, 67) ELF_RELOC(R_PPC_DTPMOD32, 68) ELF_RELOC(R_PPC_TPREL16, 69) ELF_RELOC(R_PPC_TPREL16_LO, 70) ELF_RELOC(R_PPC_TPREL16_HI, 71) ELF_RELOC(R_PPC_TPREL16_HA, 72) ELF_RELOC(R_PPC_TPREL32, 73) ELF_RELOC(R_PPC_DTPREL16, 74) ELF_RELOC(R_PPC_DTPREL16_LO, 75) ELF_RELOC(R_PPC_DTPREL16_HI, 76) ELF_RELOC(R_PPC_DTPREL16_HA, 77) ELF_RELOC(R_PPC_DTPREL32, 78) ELF_RELOC(R_PPC_GOT_TLSGD16, 79) ELF_RELOC(R_PPC_GOT_TLSGD16_LO, 80) ELF_RELOC(R_PPC_GOT_TLSGD16_HI, 81) ELF_RELOC(R_PPC_GOT_TLSGD16_HA, 82) ELF_RELOC(R_PPC_GOT_TLSLD16, 83) ELF_RELOC(R_PPC_GOT_TLSLD16_LO, 84) ELF_RELOC(R_PPC_GOT_TLSLD16_HI, 85) ELF_RELOC(R_PPC_GOT_TLSLD16_HA, 86) ELF_RELOC(R_PPC_GOT_TPREL16, 87) ELF_RELOC(R_PPC_GOT_TPREL16_LO, 88) ELF_RELOC(R_PPC_GOT_TPREL16_HI, 89) ELF_RELOC(R_PPC_GOT_TPREL16_HA, 90) ELF_RELOC(R_PPC_GOT_DTPREL16, 91) ELF_RELOC(R_PPC_GOT_DTPREL16_LO, 92) ELF_RELOC(R_PPC_GOT_DTPREL16_HI, 93) ELF_RELOC(R_PPC_GOT_DTPREL16_HA, 94) ELF_RELOC(R_PPC_TLSGD, 95) ELF_RELOC(R_PPC_TLSLD, 96) ELF_RELOC(R_PPC_REL16, 249) ELF_RELOC(R_PPC_REL16_LO, 250) ELF_RELOC(R_PPC_REL16_HI, 251) ELF_RELOC(R_PPC_REL16_HA, 252)
0
repos/DirectXShaderCompiler/include/llvm/Support
repos/DirectXShaderCompiler/include/llvm/Support/ELFRelocs/SystemZ.def
#ifndef ELF_RELOC #error "ELF_RELOC must be defined" #endif ELF_RELOC(R_390_NONE, 0) ELF_RELOC(R_390_8, 1) ELF_RELOC(R_390_12, 2) ELF_RELOC(R_390_16, 3) ELF_RELOC(R_390_32, 4) ELF_RELOC(R_390_PC32, 5) ELF_RELOC(R_390_GOT12, 6) ELF_RELOC(R_390_GOT32, 7) ELF_RELOC(R_390_PLT32, 8) ELF_RELOC(R_390_COPY, 9) ELF_RELOC(R_390_GLOB_DAT, 10) ELF_RELOC(R_390_JMP_SLOT, 11) ELF_RELOC(R_390_RELATIVE, 12) ELF_RELOC(R_390_GOTOFF, 13) ELF_RELOC(R_390_GOTPC, 14) ELF_RELOC(R_390_GOT16, 15) ELF_RELOC(R_390_PC16, 16) ELF_RELOC(R_390_PC16DBL, 17) ELF_RELOC(R_390_PLT16DBL, 18) ELF_RELOC(R_390_PC32DBL, 19) ELF_RELOC(R_390_PLT32DBL, 20) ELF_RELOC(R_390_GOTPCDBL, 21) ELF_RELOC(R_390_64, 22) ELF_RELOC(R_390_PC64, 23) ELF_RELOC(R_390_GOT64, 24) ELF_RELOC(R_390_PLT64, 25) ELF_RELOC(R_390_GOTENT, 26) ELF_RELOC(R_390_GOTOFF16, 27) ELF_RELOC(R_390_GOTOFF64, 28) ELF_RELOC(R_390_GOTPLT12, 29) ELF_RELOC(R_390_GOTPLT16, 30) ELF_RELOC(R_390_GOTPLT32, 31) ELF_RELOC(R_390_GOTPLT64, 32) ELF_RELOC(R_390_GOTPLTENT, 33) ELF_RELOC(R_390_PLTOFF16, 34) ELF_RELOC(R_390_PLTOFF32, 35) ELF_RELOC(R_390_PLTOFF64, 36) ELF_RELOC(R_390_TLS_LOAD, 37) ELF_RELOC(R_390_TLS_GDCALL, 38) ELF_RELOC(R_390_TLS_LDCALL, 39) ELF_RELOC(R_390_TLS_GD32, 40) ELF_RELOC(R_390_TLS_GD64, 41) ELF_RELOC(R_390_TLS_GOTIE12, 42) ELF_RELOC(R_390_TLS_GOTIE32, 43) ELF_RELOC(R_390_TLS_GOTIE64, 44) ELF_RELOC(R_390_TLS_LDM32, 45) ELF_RELOC(R_390_TLS_LDM64, 46) ELF_RELOC(R_390_TLS_IE32, 47) ELF_RELOC(R_390_TLS_IE64, 48) ELF_RELOC(R_390_TLS_IEENT, 49) ELF_RELOC(R_390_TLS_LE32, 50) ELF_RELOC(R_390_TLS_LE64, 51) ELF_RELOC(R_390_TLS_LDO32, 52) ELF_RELOC(R_390_TLS_LDO64, 53) ELF_RELOC(R_390_TLS_DTPMOD, 54) ELF_RELOC(R_390_TLS_DTPOFF, 55) ELF_RELOC(R_390_TLS_TPOFF, 56) ELF_RELOC(R_390_20, 57) ELF_RELOC(R_390_GOT20, 58) ELF_RELOC(R_390_GOTPLT20, 59) ELF_RELOC(R_390_TLS_GOTIE20, 60) ELF_RELOC(R_390_IRELATIVE, 61)
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/PassPrinters/PassPrinters.h
//===- PassPrinters.h - Utilities to print analysis info for passes -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// \brief Utilities to print analysis info for various kinds of passes. /// //===----------------------------------------------------------------------===// #ifndef LLVM_TOOLS_OPT_PASSPRINTERS_H #define LLVM_TOOLS_OPT_PASSPRINTERS_H #include "llvm/Analysis/CallGraphSCCPass.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/RegionPass.h" namespace llvm { class BasicBlockPass; class CallGraphSCCPass; class FunctionPass; class ModulePass; class LoopPass; class PassInfo; class RegionPass; class raw_ostream; FunctionPass *createFunctionPassPrinter(const PassInfo *PI, raw_ostream &out, bool Quiet); CallGraphSCCPass *createCallGraphPassPrinter(const PassInfo *PI, raw_ostream &out, bool Quiet); ModulePass *createModulePassPrinter(const PassInfo *PI, raw_ostream &out, bool Quiet); LoopPass *createLoopPassPrinter(const PassInfo *PI, raw_ostream &out, bool Quiet); RegionPass *createRegionPassPrinter(const PassInfo *PI, raw_ostream &out, bool Quiet); BasicBlockPass *createBasicBlockPassPrinter(const PassInfo *PI, raw_ostream &out, bool Quiet); } // namespace llvm #endif // LLVM_TOOLS_OPT_PASSPRINTERS_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/IndexedMap.h
//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements an indexed map. The index map template takes two // types. The first is the mapped type and the second is a functor // that maps its argument to a size_t. On instantiation a "null" value // can be provided to be used as a "does not exist" indicator in the // map. A member function grow() is provided that given the value of // the maximally indexed key (the argument of the functor) makes sure // the map has enough space for it. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_INDEXEDMAP_H #define LLVM_ADT_INDEXEDMAP_H #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include <cassert> #include <functional> namespace llvm { template <typename T, typename ToIndexT = llvm::identity<unsigned> > class IndexedMap { typedef typename ToIndexT::argument_type IndexT; // Prefer SmallVector with zero inline storage over std::vector. IndexedMaps // can grow very large and SmallVector grows more efficiently as long as T // is trivially copyable. typedef SmallVector<T, 0> StorageT; StorageT storage_; T nullVal_; ToIndexT toIndex_; public: IndexedMap() : nullVal_(T()) { } explicit IndexedMap(const T& val) : nullVal_(val) { } typename StorageT::reference operator[](IndexT n) { assert(toIndex_(n) < storage_.size() && "index out of bounds!"); return storage_[toIndex_(n)]; } typename StorageT::const_reference operator[](IndexT n) const { assert(toIndex_(n) < storage_.size() && "index out of bounds!"); return storage_[toIndex_(n)]; } void reserve(typename StorageT::size_type s) { storage_.reserve(s); } void resize(typename StorageT::size_type s) { storage_.resize(s, nullVal_); } void clear() { storage_.clear(); } void grow(IndexT n) { unsigned NewSize = toIndex_(n) + 1; if (NewSize > storage_.size()) resize(NewSize); } bool inBounds(IndexT n) const { return toIndex_(n) < storage_.size(); } typename StorageT::size_type size() const { return storage_.size(); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/IntEqClasses.h
//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Equivalence classes for small integers. This is a mapping of the integers // 0 .. N-1 into M equivalence classes numbered 0 .. M-1. // // Initially each integer has its own equivalence class. Classes are joined by // passing a representative member of each class to join(). // // Once the classes are built, compress() will number them 0 .. M-1 and prevent // further changes. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_INTEQCLASSES_H #define LLVM_ADT_INTEQCLASSES_H #include "llvm/ADT/SmallVector.h" namespace llvm { class IntEqClasses { /// EC - When uncompressed, map each integer to a smaller member of its /// equivalence class. The class leader is the smallest member and maps to /// itself. /// /// When compressed, EC[i] is the equivalence class of i. SmallVector<unsigned, 8> EC; /// NumClasses - The number of equivalence classes when compressed, or 0 when /// uncompressed. unsigned NumClasses; public: /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1. IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); } /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique /// equivalence classes. /// This requires an uncompressed map. void grow(unsigned N); /// clear - Clear all classes so that grow() will assign a unique class to /// every integer. void clear() { EC.clear(); NumClasses = 0; } /// join - Join the equivalence classes of a and b. After joining classes, /// findLeader(a) == findLeader(b). /// This requires an uncompressed map. void join(unsigned a, unsigned b); /// findLeader - Compute the leader of a's equivalence class. This is the /// smallest member of the class. /// This requires an uncompressed map. unsigned findLeader(unsigned a) const; /// compress - Compress equivalence classes by numbering them 0 .. M. /// This makes the equivalence class map immutable. void compress(); /// getNumClasses - Return the number of equivalence classes after compress() /// was called. unsigned getNumClasses() const { return NumClasses; } /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1. /// This requires a compressed map. unsigned operator[](unsigned a) const { assert(NumClasses && "operator[] called before compress()"); return EC[a]; } /// uncompress - Change back to the uncompressed representation that allows /// editing. void uncompress(); }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/EquivalenceClasses.h
//===-- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Generic implementation of equivalence classes through the use Tarjan's // efficient union-find algorithm. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_EQUIVALENCECLASSES_H #define LLVM_ADT_EQUIVALENCECLASSES_H #include "llvm/Support/DataTypes.h" #include <cassert> #include <cstddef> #include <set> namespace llvm { /// EquivalenceClasses - This represents a collection of equivalence classes and /// supports three efficient operations: insert an element into a class of its /// own, union two classes, and find the class for a given element. In /// addition to these modification methods, it is possible to iterate over all /// of the equivalence classes and all of the elements in a class. /// /// This implementation is an efficient implementation that only stores one copy /// of the element being indexed per entry in the set, and allows any arbitrary /// type to be indexed (as long as it can be ordered with operator<). /// /// Here is a simple example using integers: /// /// \code /// EquivalenceClasses<int> EC; /// EC.unionSets(1, 2); // insert 1, 2 into the same set /// EC.insert(4); EC.insert(5); // insert 4, 5 into own sets /// EC.unionSets(5, 1); // merge the set for 1 with 5's set. /// /// for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end(); /// I != E; ++I) { // Iterate over all of the equivalence sets. /// if (!I->isLeader()) continue; // Ignore non-leader sets. /// for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I); /// MI != EC.member_end(); ++MI) // Loop over members in this set. /// cerr << *MI << " "; // Print member. /// cerr << "\n"; // Finish set. /// } /// \endcode /// /// This example prints: /// 4 /// 5 1 2 /// template <class ElemTy> class EquivalenceClasses { /// ECValue - The EquivalenceClasses data structure is just a set of these. /// Each of these represents a relation for a value. First it stores the /// value itself, which provides the ordering that the set queries. Next, it /// provides a "next pointer", which is used to enumerate all of the elements /// in the unioned set. Finally, it defines either a "end of list pointer" or /// "leader pointer" depending on whether the value itself is a leader. A /// "leader pointer" points to the node that is the leader for this element, /// if the node is not a leader. A "end of list pointer" points to the last /// node in the list of members of this list. Whether or not a node is a /// leader is determined by a bit stolen from one of the pointers. class ECValue { friend class EquivalenceClasses; mutable const ECValue *Leader, *Next; ElemTy Data; // ECValue ctor - Start out with EndOfList pointing to this node, Next is // Null, isLeader = true. ECValue(const ElemTy &Elt) : Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {} const ECValue *getLeader() const { if (isLeader()) return this; if (Leader->isLeader()) return Leader; // Path compression. return Leader = Leader->getLeader(); } const ECValue *getEndOfList() const { assert(isLeader() && "Cannot get the end of a list for a non-leader!"); return Leader; } void setNext(const ECValue *NewNext) const { assert(getNext() == nullptr && "Already has a next pointer!"); Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader()); } public: ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1), Data(RHS.Data) { // Only support copying of singleton nodes. assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!"); } bool operator<(const ECValue &UFN) const { return Data < UFN.Data; } bool isLeader() const { return (intptr_t)Next & 1; } const ElemTy &getData() const { return Data; } const ECValue *getNext() const { return (ECValue*)((intptr_t)Next & ~(intptr_t)1); } template<typename T> bool operator<(const T &Val) const { return Data < Val; } }; /// TheMapping - This implicitly provides a mapping from ElemTy values to the /// ECValues, it just keeps the key as part of the value. std::set<ECValue> TheMapping; public: EquivalenceClasses() {} EquivalenceClasses(const EquivalenceClasses &RHS) { operator=(RHS); } const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) { TheMapping.clear(); for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) if (I->isLeader()) { member_iterator MI = RHS.member_begin(I); member_iterator LeaderIt = member_begin(insert(*MI)); for (++MI; MI != member_end(); ++MI) unionSets(LeaderIt, member_begin(insert(*MI))); } return *this; } //===--------------------------------------------------------------------===// // Inspection methods // /// iterator* - Provides a way to iterate over all values in the set. typedef typename std::set<ECValue>::const_iterator iterator; iterator begin() const { return TheMapping.begin(); } iterator end() const { return TheMapping.end(); } bool empty() const { return TheMapping.empty(); } /// member_* Iterate over the members of an equivalence class. /// class member_iterator; member_iterator member_begin(iterator I) const { // Only leaders provide anything to iterate over. return member_iterator(I->isLeader() ? &*I : nullptr); } member_iterator member_end() const { return member_iterator(nullptr); } /// findValue - Return an iterator to the specified value. If it does not /// exist, end() is returned. iterator findValue(const ElemTy &V) const { return TheMapping.find(V); } /// getLeaderValue - Return the leader for the specified value that is in the /// set. It is an error to call this method for a value that is not yet in /// the set. For that, call getOrInsertLeaderValue(V). const ElemTy &getLeaderValue(const ElemTy &V) const { member_iterator MI = findLeader(V); assert(MI != member_end() && "Value is not in the set!"); return *MI; } /// getOrInsertLeaderValue - Return the leader for the specified value that is /// in the set. If the member is not in the set, it is inserted, then /// returned. const ElemTy &getOrInsertLeaderValue(const ElemTy &V) { member_iterator MI = findLeader(insert(V)); assert(MI != member_end() && "Value is not in the set!"); return *MI; } /// getNumClasses - Return the number of equivalence classes in this set. /// Note that this is a linear time operation. unsigned getNumClasses() const { unsigned NC = 0; for (iterator I = begin(), E = end(); I != E; ++I) if (I->isLeader()) ++NC; return NC; } //===--------------------------------------------------------------------===// // Mutation methods /// insert - Insert a new value into the union/find set, ignoring the request /// if the value already exists. iterator insert(const ElemTy &Data) { return TheMapping.insert(ECValue(Data)).first; } /// findLeader - Given a value in the set, return a member iterator for the /// equivalence class it is in. This does the path-compression part that /// makes union-find "union findy". This returns an end iterator if the value /// is not in the equivalence class. /// member_iterator findLeader(iterator I) const { if (I == TheMapping.end()) return member_end(); return member_iterator(I->getLeader()); } member_iterator findLeader(const ElemTy &V) const { return findLeader(TheMapping.find(V)); } /// union - Merge the two equivalence sets for the specified values, inserting /// them if they do not already exist in the equivalence set. member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) { iterator V1I = insert(V1), V2I = insert(V2); return unionSets(findLeader(V1I), findLeader(V2I)); } member_iterator unionSets(member_iterator L1, member_iterator L2) { assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!"); if (L1 == L2) return L1; // Unifying the same two sets, noop. // Otherwise, this is a real union operation. Set the end of the L1 list to // point to the L2 leader node. const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node; L1LV.getEndOfList()->setNext(&L2LV); // Update L1LV's end of list pointer. L1LV.Leader = L2LV.getEndOfList(); // Clear L2's leader flag: L2LV.Next = L2LV.getNext(); // L2's leader is now L1. L2LV.Leader = &L1LV; return L1; } class member_iterator { const ECValue *Node; friend class EquivalenceClasses; public: using iterator_category = std::forward_iterator_tag; using value_type = const ElemTy; using size_type = std::size_t; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; explicit member_iterator() {} explicit member_iterator(const ECValue *N) : Node(N) {} reference operator*() const { assert(Node != nullptr && "Dereferencing end()!"); return Node->getData(); } pointer operator->() const { return &operator*(); } member_iterator &operator++() { assert(Node != nullptr && "++'d off the end of the list!"); Node = Node->getNext(); return *this; } member_iterator operator++(int) { // postincrement operators. member_iterator tmp = *this; ++*this; return tmp; } bool operator==(const member_iterator &RHS) const { return Node == RHS.Node; } bool operator!=(const member_iterator &RHS) const { return Node != RHS.Node; } }; }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/Optional.h
//===-- Optional.h - Simple variant for passing optional values ---*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides Optional, a template class modeled in the spirit of // OCaml's 'opt' variant. The idea is to strongly type whether or not // a value can be optional. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_OPTIONAL_H #define LLVM_ADT_OPTIONAL_H #include "llvm/ADT/None.h" #include "llvm/Support/AlignOf.h" #include "llvm/Support/Compiler.h" #include <cassert> #include <new> #include <utility> namespace llvm { template<typename T> class Optional { AlignedCharArrayUnion<T> storage; bool hasVal; public: typedef T value_type; Optional(NoneType) : hasVal(false) {} explicit Optional() : hasVal(false) {} Optional(const T &y) : hasVal(true) { new (storage.buffer) T(y); } Optional(const Optional &O) : hasVal(O.hasVal) { if (hasVal) new (storage.buffer) T(*O); } Optional(T &&y) : hasVal(true) { new (storage.buffer) T(std::forward<T>(y)); } Optional(Optional<T> &&O) : hasVal(O) { if (O) { new (storage.buffer) T(std::move(*O)); O.reset(); } } Optional &operator=(T &&y) { if (hasVal) **this = std::move(y); else { new (storage.buffer) T(std::move(y)); hasVal = true; } return *this; } Optional &operator=(Optional &&O) { if (!O) reset(); else { *this = std::move(*O); O.reset(); } return *this; } /// Create a new object by constructing it in place with the given arguments. template<typename ...ArgTypes> void emplace(ArgTypes &&...Args) { reset(); hasVal = true; new (storage.buffer) T(std::forward<ArgTypes>(Args)...); } static inline Optional create(const T* y) { return y ? Optional(*y) : Optional(); } // FIXME: these assignments (& the equivalent const T&/const Optional& ctors) // could be made more efficient by passing by value, possibly unifying them // with the rvalue versions above - but this could place a different set of // requirements (notably: the existence of a default ctor) when implemented // in that way. Careful SFINAE to avoid such pitfalls would be required. Optional &operator=(const T &y) { if (hasVal) **this = y; else { new (storage.buffer) T(y); hasVal = true; } return *this; } Optional &operator=(const Optional &O) { if (!O) reset(); else *this = *O; return *this; } void reset() { if (hasVal) { (**this).~T(); hasVal = false; } } ~Optional() { reset(); } const T* getPointer() const { assert(hasVal); return reinterpret_cast<const T*>(storage.buffer); } T* getPointer() { assert(hasVal); return reinterpret_cast<T*>(storage.buffer); } const T& getValue() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); } T& getValue() LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); } explicit operator bool() const { return hasVal; } bool hasValue() const { return hasVal; } const T* operator->() const { return getPointer(); } T* operator->() { return getPointer(); } const T& operator*() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); } T& operator*() LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); } template <typename U> LLVM_CONSTEXPR T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION { return hasValue() ? getValue() : std::forward<U>(value); } #if LLVM_HAS_RVALUE_REFERENCE_THIS T&& getValue() && { assert(hasVal); return std::move(*getPointer()); } T&& operator*() && { assert(hasVal); return std::move(*getPointer()); } template <typename U> T getValueOr(U &&value) && { return hasValue() ? std::move(getValue()) : std::forward<U>(value); } #endif }; template <typename T> struct isPodLike; template <typename T> struct isPodLike<Optional<T> > { // An Optional<T> is pod-like if T is. static const bool value = isPodLike<T>::value; }; /// \brief Poison comparison between two \c Optional objects. Clients needs to /// explicitly compare the underlying values and account for empty \c Optional /// objects. /// /// This routine will never be defined. It returns \c void to help diagnose /// errors at compile time. template<typename T, typename U> void operator==(const Optional<T> &X, const Optional<U> &Y); /// \brief Poison comparison between two \c Optional objects. Clients needs to /// explicitly compare the underlying values and account for empty \c Optional /// objects. /// /// This routine will never be defined. It returns \c void to help diagnose /// errors at compile time. template<typename T, typename U> void operator!=(const Optional<T> &X, const Optional<U> &Y); /// \brief Poison comparison between two \c Optional objects. Clients needs to /// explicitly compare the underlying values and account for empty \c Optional /// objects. /// /// This routine will never be defined. It returns \c void to help diagnose /// errors at compile time. template<typename T, typename U> void operator<(const Optional<T> &X, const Optional<U> &Y); /// \brief Poison comparison between two \c Optional objects. Clients needs to /// explicitly compare the underlying values and account for empty \c Optional /// objects. /// /// This routine will never be defined. It returns \c void to help diagnose /// errors at compile time. template<typename T, typename U> void operator<=(const Optional<T> &X, const Optional<U> &Y); /// \brief Poison comparison between two \c Optional objects. Clients needs to /// explicitly compare the underlying values and account for empty \c Optional /// objects. /// /// This routine will never be defined. It returns \c void to help diagnose /// errors at compile time. template<typename T, typename U> void operator>=(const Optional<T> &X, const Optional<U> &Y); /// \brief Poison comparison between two \c Optional objects. Clients needs to /// explicitly compare the underlying values and account for empty \c Optional /// objects. /// /// This routine will never be defined. It returns \c void to help diagnose /// errors at compile time. template<typename T, typename U> void operator>(const Optional<T> &X, const Optional<U> &Y); } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/APFloat.h
//===- llvm/ADT/APFloat.h - Arbitrary Precision Floating Point ---*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// \brief /// This file declares a class to represent arbitrary precision floating point /// values and provide a variety of arithmetic operations on them. /// //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_APFLOAT_H #define LLVM_ADT_APFLOAT_H #include "llvm/ADT/APInt.h" namespace llvm { struct fltSemantics; class APSInt; class StringRef; /// Enum that represents what fraction of the LSB truncated bits of an fp number /// represent. /// /// This essentially combines the roles of guard and sticky bits. enum lostFraction { // Example of truncated bits: lfExactlyZero, // 000000 lfLessThanHalf, // 0xxxxx x's not all zero lfExactlyHalf, // 100000 lfMoreThanHalf // 1xxxxx x's not all zero }; /// \brief A self-contained host- and target-independent arbitrary-precision /// floating-point software implementation. /// /// APFloat uses bignum integer arithmetic as provided by static functions in /// the APInt class. The library will work with bignum integers whose parts are /// any unsigned type at least 16 bits wide, but 64 bits is recommended. /// /// Written for clarity rather than speed, in particular with a view to use in /// the front-end of a cross compiler so that target arithmetic can be correctly /// performed on the host. Performance should nonetheless be reasonable, /// particularly for its intended use. It may be useful as a base /// implementation for a run-time library during development of a faster /// target-specific one. /// /// All 5 rounding modes in the IEEE-754R draft are handled correctly for all /// implemented operations. Currently implemented operations are add, subtract, /// multiply, divide, fused-multiply-add, conversion-to-float, /// conversion-to-integer and conversion-from-integer. New rounding modes /// (e.g. away from zero) can be added with three or four lines of code. /// /// Four formats are built-in: IEEE single precision, double precision, /// quadruple precision, and x87 80-bit extended double (when operating with /// full extended precision). Adding a new format that obeys IEEE semantics /// only requires adding two lines of code: a declaration and definition of the /// format. /// /// All operations return the status of that operation as an exception bit-mask, /// so multiple operations can be done consecutively with their results or-ed /// together. The returned status can be useful for compiler diagnostics; e.g., /// inexact, underflow and overflow can be easily diagnosed on constant folding, /// and compiler optimizers can determine what exceptions would be raised by /// folding operations and optimize, or perhaps not optimize, accordingly. /// /// At present, underflow tininess is detected after rounding; it should be /// straight forward to add support for the before-rounding case too. /// /// The library reads hexadecimal floating point numbers as per C99, and /// correctly rounds if necessary according to the specified rounding mode. /// Syntax is required to have been validated by the caller. It also converts /// floating point numbers to hexadecimal text as per the C99 %a and %A /// conversions. The output precision (or alternatively the natural minimal /// precision) can be specified; if the requested precision is less than the /// natural precision the output is correctly rounded for the specified rounding /// mode. /// /// It also reads decimal floating point numbers and correctly rounds according /// to the specified rounding mode. /// /// Conversion to decimal text is not currently implemented. /// /// Non-zero finite numbers are represented internally as a sign bit, a 16-bit /// signed exponent, and the significand as an array of integer parts. After /// normalization of a number of precision P the exponent is within the range of /// the format, and if the number is not denormal the P-th bit of the /// significand is set as an explicit integer bit. For denormals the most /// significant bit is shifted right so that the exponent is maintained at the /// format's minimum, so that the smallest denormal has just the least /// significant bit of the significand set. The sign of zeroes and infinities /// is significant; the exponent and significand of such numbers is not stored, /// but has a known implicit (deterministic) value: 0 for the significands, 0 /// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and /// significand are deterministic, although not really meaningful, and preserved /// in non-conversion operations. The exponent is implicitly all 1 bits. /// /// APFloat does not provide any exception handling beyond default exception /// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause /// by encoding Signaling NaNs with the first bit of its trailing significand as /// 0. /// /// TODO /// ==== /// /// Some features that may or may not be worth adding: /// /// Binary to decimal conversion (hard). /// /// Optional ability to detect underflow tininess before rounding. /// /// New formats: x87 in single and double precision mode (IEEE apart from /// extended exponent range) (hard). /// /// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward. /// class APFloat { public: /// A signed type to represent a floating point numbers unbiased exponent. typedef signed short ExponentType; /// \name Floating Point Semantics. /// @{ static const fltSemantics IEEEhalf; static const fltSemantics IEEEsingle; static const fltSemantics IEEEdouble; static const fltSemantics IEEEquad; static const fltSemantics PPCDoubleDouble; static const fltSemantics x87DoubleExtended; /// A Pseudo fltsemantic used to construct APFloats that cannot conflict with /// anything real. static const fltSemantics Bogus; /// @} static unsigned int semanticsPrecision(const fltSemantics &); /// IEEE-754R 5.11: Floating Point Comparison Relations. enum cmpResult { cmpLessThan, cmpEqual, cmpGreaterThan, cmpUnordered }; /// IEEE-754R 4.3: Rounding-direction attributes. enum roundingMode { rmNearestTiesToEven, rmTowardPositive, rmTowardNegative, rmTowardZero, rmNearestTiesToAway }; /// IEEE-754R 7: Default exception handling. /// /// opUnderflow or opOverflow are always returned or-ed with opInexact. enum opStatus { opOK = 0x00, opInvalidOp = 0x01, opDivByZero = 0x02, opOverflow = 0x04, opUnderflow = 0x08, opInexact = 0x10 }; /// Category of internally-represented number. enum fltCategory { fcInfinity, fcNaN, fcNormal, fcZero }; /// Convenience enum used to construct an uninitialized APFloat. enum uninitializedTag { uninitialized }; /// \name Constructors /// @{ APFloat(const fltSemantics &); // Default construct to 0.0 APFloat(const fltSemantics &, StringRef); APFloat(const fltSemantics &, integerPart); APFloat(const fltSemantics &, uninitializedTag); APFloat(const fltSemantics &, const APInt &); explicit APFloat(double d); explicit APFloat(float f); APFloat(const APFloat &); APFloat(APFloat &&); ~APFloat(); /// @} /// \brief Returns whether this instance allocated memory. bool needsCleanup() const { return partCount() > 1; } /// \name Convenience "constructors" /// @{ /// Factory for Positive and Negative Zero. /// /// \param Negative True iff the number should be negative. static APFloat getZero(const fltSemantics &Sem, bool Negative = false) { APFloat Val(Sem, uninitialized); Val.makeZero(Negative); return Val; } /// Factory for Positive and Negative Infinity. /// /// \param Negative True iff the number should be negative. static APFloat getInf(const fltSemantics &Sem, bool Negative = false) { APFloat Val(Sem, uninitialized); Val.makeInf(Negative); return Val; } /// Factory for QNaN values. /// /// \param Negative - True iff the NaN generated should be negative. /// \param type - The unspecified fill bits for creating the NaN, 0 by /// default. The value is truncated as necessary. static APFloat getNaN(const fltSemantics &Sem, bool Negative = false, unsigned type = 0) { if (type) { APInt fill(64, type); return getQNaN(Sem, Negative, &fill); } else { return getQNaN(Sem, Negative, nullptr); } } /// Factory for QNaN values. static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false, const APInt *payload = nullptr) { return makeNaN(Sem, false, Negative, payload); } /// Factory for SNaN values. static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false, const APInt *payload = nullptr) { return makeNaN(Sem, true, Negative, payload); } /// Returns the largest finite number in the given semantics. /// /// \param Negative - True iff the number should be negative static APFloat getLargest(const fltSemantics &Sem, bool Negative = false); /// Returns the smallest (by magnitude) finite number in the given semantics. /// Might be denormalized, which implies a relative loss of precision. /// /// \param Negative - True iff the number should be negative static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false); /// Returns the smallest (by magnitude) normalized finite number in the given /// semantics. /// /// \param Negative - True iff the number should be negative static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative = false); /// Returns a float which is bitcasted from an all one value int. /// /// \param BitWidth - Select float type /// \param isIEEE - If 128 bit number, select between PPC and IEEE static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false); /// Returns the size of the floating point number (in bits) in the given /// semantics. static unsigned getSizeInBits(const fltSemantics &Sem); /// @} /// Used to insert APFloat objects, or objects that contain APFloat objects, /// into FoldingSets. void Profile(FoldingSetNodeID &NID) const; /// \name Arithmetic /// @{ opStatus add(const APFloat &, roundingMode); opStatus subtract(const APFloat &, roundingMode); opStatus multiply(const APFloat &, roundingMode); opStatus divide(const APFloat &, roundingMode); /// IEEE remainder. opStatus remainder(const APFloat &); /// C fmod, or llvm frem. opStatus mod(const APFloat &, roundingMode); opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode); opStatus roundToIntegral(roundingMode); /// IEEE-754R 5.3.1: nextUp/nextDown. opStatus next(bool nextDown); /// \brief Operator+ overload which provides the default /// \c nmNearestTiesToEven rounding mode and *no* error checking. APFloat operator+(const APFloat &RHS) const { APFloat Result = *this; Result.add(RHS, rmNearestTiesToEven); return Result; } /// \brief Operator- overload which provides the default /// \c nmNearestTiesToEven rounding mode and *no* error checking. APFloat operator-(const APFloat &RHS) const { APFloat Result = *this; Result.subtract(RHS, rmNearestTiesToEven); return Result; } /// \brief Operator* overload which provides the default /// \c nmNearestTiesToEven rounding mode and *no* error checking. APFloat operator*(const APFloat &RHS) const { APFloat Result = *this; Result.multiply(RHS, rmNearestTiesToEven); return Result; } /// \brief Operator/ overload which provides the default /// \c nmNearestTiesToEven rounding mode and *no* error checking. APFloat operator/(const APFloat &RHS) const { APFloat Result = *this; Result.divide(RHS, rmNearestTiesToEven); return Result; } /// @} /// \name Sign operations. /// @{ void changeSign(); void clearSign(); void copySign(const APFloat &); /// \brief A static helper to produce a copy of an APFloat value with its sign /// copied from some other APFloat. static APFloat copySign(APFloat Value, const APFloat &Sign) { Value.copySign(Sign); return Value; } /// @} /// \name Conversions /// @{ opStatus convert(const fltSemantics &, roundingMode, bool *); opStatus convertToInteger(integerPart *, unsigned int, bool, roundingMode, bool *) const; opStatus convertToInteger(APSInt &, roundingMode, bool *) const; opStatus convertFromAPInt(const APInt &, bool, roundingMode); opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int, bool, roundingMode); opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int, bool, roundingMode); opStatus convertFromString(StringRef, roundingMode); APInt bitcastToAPInt() const; double convertToDouble() const; float convertToFloat() const; /// @} /// The definition of equality is not straightforward for floating point, so /// we won't use operator==. Use one of the following, or write whatever it /// is you really mean. bool operator==(const APFloat &) const = delete; /// IEEE comparison with another floating point number (NaNs compare /// unordered, 0==-0). cmpResult compare(const APFloat &) const; /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0). bool bitwiseIsEqual(const APFloat &) const; #if 0 // HLSL Change - dst should be _Out_writes_(constant), but this turns out to be unused in any case /// Write out a hexadecimal representation of the floating point value to DST, /// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d. /// Return the number of characters written, excluding the terminating NUL. unsigned int convertToHexString(char *dst, unsigned int hexDigits, bool upperCase, roundingMode) const; #endif // HLSL Change /// \name IEEE-754R 5.7.2 General operations. /// @{ /// IEEE-754R isSignMinus: Returns true if and only if the current value is /// negative. /// /// This applies to zeros and NaNs as well. bool isNegative() const { return sign; } /// IEEE-754R isNormal: Returns true if and only if the current value is normal. /// /// This implies that the current value of the float is not zero, subnormal, /// infinite, or NaN following the definition of normality from IEEE-754R. bool isNormal() const { return !isDenormal() && isFiniteNonZero(); } /// Returns true if and only if the current value is zero, subnormal, or /// normal. /// /// This means that the value is not infinite or NaN. bool isFinite() const { return !isNaN() && !isInfinity(); } /// Returns true if and only if the float is plus or minus zero. bool isZero() const { return category == fcZero; } /// IEEE-754R isSubnormal(): Returns true if and only if the float is a /// denormal. bool isDenormal() const; /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity. bool isInfinity() const { return category == fcInfinity; } /// Returns true if and only if the float is a quiet or signaling NaN. bool isNaN() const { return category == fcNaN; } /// Returns true if and only if the float is a signaling NaN. bool isSignaling() const; /// @} /// \name Simple Queries /// @{ fltCategory getCategory() const { return category; } const fltSemantics &getSemantics() const { return *semantics; } bool isNonZero() const { return category != fcZero; } bool isFiniteNonZero() const { return isFinite() && !isZero(); } bool isPosZero() const { return isZero() && !isNegative(); } bool isNegZero() const { return isZero() && isNegative(); } /// Returns true if and only if the number has the smallest possible non-zero /// magnitude in the current semantics. bool isSmallest() const; /// Returns true if and only if the number has the largest possible finite /// magnitude in the current semantics. bool isLargest() const; /// @} APFloat &operator=(const APFloat &); APFloat &operator=(APFloat &&); /// \brief Overload to compute a hash code for an APFloat value. /// /// Note that the use of hash codes for floating point values is in general /// frought with peril. Equality is hard to define for these values. For /// example, should negative and positive zero hash to different codes? Are /// they equal or not? This hash value implementation specifically /// emphasizes producing different codes for different inputs in order to /// be used in canonicalization and memoization. As such, equality is /// bitwiseIsEqual, and 0 != -0. friend hash_code hash_value(const APFloat &Arg); /// Converts this value into a decimal string. /// /// \param FormatPrecision The maximum number of digits of /// precision to output. If there are fewer digits available, /// zero padding will not be used unless the value is /// integral and small enough to be expressed in /// FormatPrecision digits. 0 means to use the natural /// precision of the number. /// \param FormatMaxPadding The maximum number of zeros to /// consider inserting before falling back to scientific /// notation. 0 means to always use scientific notation. /// /// Number Precision MaxPadding Result /// ------ --------- ---------- ------ /// 1.01E+4 5 2 10100 /// 1.01E+4 4 2 1.01E+4 /// 1.01E+4 5 1 1.01E+4 /// 1.01E-2 5 2 0.0101 /// 1.01E-2 4 2 0.0101 /// 1.01E-2 4 1 1.01E-2 void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0, unsigned FormatMaxPadding = 3) const; /// If this value has an exact multiplicative inverse, store it in inv and /// return true. bool getExactInverse(APFloat *inv) const; /// \brief Enumeration of \c ilogb error results. enum IlogbErrorKinds { IEK_Zero = INT_MIN+1, IEK_NaN = INT_MIN, IEK_Inf = INT_MAX }; /// \brief Returns the exponent of the internal representation of the APFloat. /// /// Because the radix of APFloat is 2, this is equivalent to floor(log2(x)). /// For special APFloat values, this returns special error codes: /// /// NaN -> \c IEK_NaN /// 0 -> \c IEK_Zero /// Inf -> \c IEK_Inf /// friend int ilogb(const APFloat &Arg) { if (Arg.isNaN()) return IEK_NaN; if (Arg.isZero()) return IEK_Zero; if (Arg.isInfinity()) return IEK_Inf; return Arg.exponent; } /// \brief Returns: X * 2^Exp for integral exponents. friend APFloat scalbn(APFloat X, int Exp); private: /// \name Simple Queries /// @{ integerPart *significandParts(); const integerPart *significandParts() const; unsigned int partCount() const; /// @} /// \name Significand operations. /// @{ integerPart addSignificand(const APFloat &); integerPart subtractSignificand(const APFloat &, integerPart); lostFraction addOrSubtractSignificand(const APFloat &, bool subtract); lostFraction multiplySignificand(const APFloat &, const APFloat *); lostFraction divideSignificand(const APFloat &); void incrementSignificand(); void initialize(const fltSemantics *); void shiftSignificandLeft(unsigned int); lostFraction shiftSignificandRight(unsigned int); unsigned int significandLSB() const; unsigned int significandMSB() const; void zeroSignificand(); /// Return true if the significand excluding the integral bit is all ones. bool isSignificandAllOnes() const; /// Return true if the significand excluding the integral bit is all zeros. bool isSignificandAllZeros() const; /// @} /// \name Arithmetic on special values. /// @{ opStatus addOrSubtractSpecials(const APFloat &, bool subtract); opStatus divideSpecials(const APFloat &); opStatus multiplySpecials(const APFloat &); opStatus modSpecials(const APFloat &); /// @} /// \name Special value setters. /// @{ void makeLargest(bool Neg = false); void makeSmallest(bool Neg = false); void makeNaN(bool SNaN = false, bool Neg = false, const APInt *fill = nullptr); static APFloat makeNaN(const fltSemantics &Sem, bool SNaN, bool Negative, const APInt *fill); void makeInf(bool Neg = false); void makeZero(bool Neg = false); /// @} /// \name Miscellany /// @{ bool convertFromStringSpecials(StringRef str); opStatus normalize(roundingMode, lostFraction); opStatus addOrSubtract(const APFloat &, roundingMode, bool subtract); cmpResult compareAbsoluteValue(const APFloat &) const; opStatus handleOverflow(roundingMode); bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const; opStatus convertToSignExtendedInteger(integerPart *, unsigned int, bool, roundingMode, bool *) const; opStatus convertFromUnsignedParts(const integerPart *, unsigned int, roundingMode); opStatus convertFromHexadecimalString(StringRef, roundingMode); opStatus convertFromDecimalString(StringRef, roundingMode); #if 0 // HLSL Change - dst should be _Out_writes_(constant), but this turns out to be unused in any case char *convertNormalToHexString( char *dst, unsigned int hexDigits, bool upperCase, roundingMode rounding_mode) const; #endif opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int, roundingMode); /// @} APInt convertHalfAPFloatToAPInt() const; APInt convertFloatAPFloatToAPInt() const; APInt convertDoubleAPFloatToAPInt() const; APInt convertQuadrupleAPFloatToAPInt() const; APInt convertF80LongDoubleAPFloatToAPInt() const; APInt convertPPCDoubleDoubleAPFloatToAPInt() const; void initFromAPInt(const fltSemantics *Sem, const APInt &api); void initFromHalfAPInt(const APInt &api); void initFromFloatAPInt(const APInt &api); void initFromDoubleAPInt(const APInt &api); void initFromQuadrupleAPInt(const APInt &api); void initFromF80LongDoubleAPInt(const APInt &api); void initFromPPCDoubleDoubleAPInt(const APInt &api); void assign(const APFloat &); void copySignificand(const APFloat &); void freeSignificand(); /// The semantics that this value obeys. const fltSemantics *semantics; /// A binary fraction with an explicit integer bit. /// /// The significand must be at least one bit wider than the target precision. union Significand { integerPart part; integerPart *parts; } significand; /// The signed unbiased exponent of the value. ExponentType exponent; /// What kind of floating point number this is. /// /// Only 2 bits are required, but VisualStudio incorrectly sign extends it. /// Using the extra bit keeps it from failing under VisualStudio. fltCategory category : 3; /// Sign bit of the number. unsigned int sign : 1; }; /// See friend declarations above. /// /// These additional declarations are required in order to compile LLVM with IBM /// xlC compiler. hash_code hash_value(const APFloat &Arg); APFloat scalbn(APFloat X, int Exp); /// \brief Returns the absolute value of the argument. inline APFloat abs(APFloat X) { X.clearSign(); return X; } /// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if /// both are not NaN. If either argument is a NaN, returns the other argument. LLVM_READONLY inline APFloat minnum(const APFloat &A, const APFloat &B) { if (A.isNaN()) return B; if (B.isNaN()) return A; return (B.compare(A) == APFloat::cmpLessThan) ? B : A; } /// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if /// both are not NaN. If either argument is a NaN, returns the other argument. LLVM_READONLY inline APFloat maxnum(const APFloat &A, const APFloat &B) { if (A.isNaN()) return B; if (B.isNaN()) return A; return (A.compare(B) == APFloat::cmpLessThan) ? B : A; } } // namespace llvm #endif // LLVM_ADT_APFLOAT_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/ilist.h
//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines classes to implement an intrusive doubly linked list class // (i.e. each node of the list must contain a next and previous field for the // list. // // The ilist_traits trait class is used to gain access to the next and previous // fields of the node type that the list is instantiated with. If it is not // specialized, the list defaults to using the getPrev(), getNext() method calls // to get the next and previous pointers. // // The ilist class itself, should be a plug in replacement for list, assuming // that the nodes contain next/prev pointers. This list replacement does not // provide a constant time size() method, so be careful to use empty() when you // really want to know if it's empty. // // The ilist class is implemented by allocating a 'tail' node when the list is // created (using ilist_traits<>::createSentinel()). This tail node is // absolutely required because the user must be able to compute end()-1. Because // of this, users of the direct next/prev links will see an extra link on the // end of the list, which should be ignored. // // Requirements for a user of this list: // // 1. The user must provide {g|s}et{Next|Prev} methods, or specialize // ilist_traits to provide an alternate way of getting and setting next and // prev links. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_ILIST_H #define LLVM_ADT_ILIST_H #include "llvm/Support/Compiler.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <memory> // HLSL Change #include <utility> // HLSL Change namespace llvm { template<typename NodeTy, typename Traits> class iplist; template<typename NodeTy> class ilist_iterator; /// ilist_nextprev_traits - A fragment for template traits for intrusive list /// that provides default next/prev implementations for common operations. /// template<typename NodeTy> struct ilist_nextprev_traits { static NodeTy *getPrev(NodeTy *N) { return N->getPrev(); } static NodeTy *getNext(NodeTy *N) { return N->getNext(); } static const NodeTy *getPrev(const NodeTy *N) { return N->getPrev(); } static const NodeTy *getNext(const NodeTy *N) { return N->getNext(); } static void setPrev(NodeTy *N, NodeTy *Prev) { N->setPrev(Prev); } static void setNext(NodeTy *N, NodeTy *Next) { N->setNext(Next); } }; template<typename NodeTy> struct ilist_traits; /// ilist_sentinel_traits - A fragment for template traits for intrusive list /// that provides default sentinel implementations for common operations. /// /// ilist_sentinel_traits implements a lazy dynamic sentinel allocation /// strategy. The sentinel is stored in the prev field of ilist's Head. /// template<typename NodeTy> struct ilist_sentinel_traits { /// createSentinel - create the dynamic sentinel static NodeTy *createSentinel() { return new NodeTy(); } /// destroySentinel - deallocate the dynamic sentinel static void destroySentinel(NodeTy *N) { delete N; } /// provideInitialHead - when constructing an ilist, provide a starting /// value for its Head /// @return null node to indicate that it needs to be allocated later static NodeTy *provideInitialHead() { return nullptr; } /// ensureHead - make sure that Head is either already /// initialized or assigned a fresh sentinel /// @return the sentinel static NodeTy *ensureHead(NodeTy *&Head) { if (!Head) { Head = ilist_traits<NodeTy>::createSentinel(); ilist_traits<NodeTy>::noteHead(Head, Head); ilist_traits<NodeTy>::setNext(Head, nullptr); return Head; } return ilist_traits<NodeTy>::getPrev(Head); } /// noteHead - stash the sentinel into its default location static void noteHead(NodeTy *NewHead, NodeTy *Sentinel) { ilist_traits<NodeTy>::setPrev(NewHead, Sentinel); } // HLSL Change Starts /// setSentinel - Take ownership of a constructed sentinel object. /// Unused by this implementation. void setSentinel(std::unique_ptr<NodeTy> &&) {} // HLSL Change Ends }; /// ilist_node_traits - A fragment for template traits for intrusive list /// that provides default node related operations. /// template<typename NodeTy> struct ilist_node_traits { static NodeTy *createNode(const NodeTy &V) { return new NodeTy(V); } static void deleteNode(NodeTy *V) { delete V; } void addNodeToList(NodeTy *) {} void removeNodeFromList(NodeTy *) {} void transferNodesFromList(ilist_node_traits & /*SrcTraits*/, ilist_iterator<NodeTy> /*first*/, ilist_iterator<NodeTy> /*last*/) {} }; /// ilist_default_traits - Default template traits for intrusive list. /// By inheriting from this, you can easily use default implementations /// for all common operations. /// template<typename NodeTy> struct ilist_default_traits : public ilist_nextprev_traits<NodeTy>, public ilist_sentinel_traits<NodeTy>, public ilist_node_traits<NodeTy> { }; // Template traits for intrusive list. By specializing this template class, you // can change what next/prev fields are used to store the links... template<typename NodeTy> struct ilist_traits : public ilist_default_traits<NodeTy> {}; // Const traits are the same as nonconst traits... template<typename Ty> struct ilist_traits<const Ty> : public ilist_traits<Ty> {}; //===----------------------------------------------------------------------===// // ilist_iterator<Node> - Iterator for intrusive list. // template<typename NodeTy> class ilist_iterator { public: using iterator_category = std::bidirectional_iterator_tag; using value_type = NodeTy; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; typedef ilist_traits<NodeTy> Traits; private: pointer NodePtr; // ilist_iterator is not a random-access iterator, but it has an // implicit conversion to pointer-type, which is. Declare (but // don't define) these functions as private to help catch // accidental misuse. void operator[](difference_type) const; void operator+(difference_type) const; void operator-(difference_type) const; void operator+=(difference_type) const; void operator-=(difference_type) const; template<class T> void operator<(T) const; template<class T> void operator<=(T) const; template<class T> void operator>(T) const; template<class T> void operator>=(T) const; template<class T> void operator-(T) const; public: ilist_iterator(pointer NP) : NodePtr(NP) {} ilist_iterator(reference NR) : NodePtr(&NR) {} ilist_iterator() : NodePtr(nullptr) {} // This is templated so that we can allow constructing a const iterator from // a nonconst iterator... template<class node_ty> ilist_iterator(const ilist_iterator<node_ty> &RHS) : NodePtr(RHS.getNodePtrUnchecked()) {} // This is templated so that we can allow assigning to a const iterator from // a nonconst iterator... template<class node_ty> const ilist_iterator &operator=(const ilist_iterator<node_ty> &RHS) { NodePtr = RHS.getNodePtrUnchecked(); return *this; } // Accessors... operator pointer() const { return NodePtr; } reference operator*() const { return *NodePtr; } pointer operator->() const { return &operator*(); } // Comparison operators bool operator==(const ilist_iterator &RHS) const { return NodePtr == RHS.NodePtr; } bool operator!=(const ilist_iterator &RHS) const { return NodePtr != RHS.NodePtr; } // Increment and decrement operators... ilist_iterator &operator--() { // predecrement - Back up NodePtr = Traits::getPrev(NodePtr); assert(NodePtr && "--'d off the beginning of an ilist!"); return *this; } ilist_iterator &operator++() { // preincrement - Advance NodePtr = Traits::getNext(NodePtr); return *this; } ilist_iterator operator--(int) { // postdecrement operators... ilist_iterator tmp = *this; --*this; return tmp; } ilist_iterator operator++(int) { // postincrement operators... ilist_iterator tmp = *this; ++*this; return tmp; } // Internal interface, do not use... pointer getNodePtrUnchecked() const { return NodePtr; } }; // These are to catch errors when people try to use them as random access // iterators. template<typename T> void operator-(int, ilist_iterator<T>) = delete; template<typename T> void operator-(ilist_iterator<T>,int) = delete; template<typename T> void operator+(int, ilist_iterator<T>) = delete; template<typename T> void operator+(ilist_iterator<T>,int) = delete; // operator!=/operator== - Allow mixed comparisons without dereferencing // the iterator, which could very likely be pointing to end(). // HLSL Change Begin: Support for C++20 template<typename T, typename U> bool operator!=(const T* LHS, const ilist_iterator<const U> &RHS) { return LHS != RHS.getNodePtrUnchecked(); } template<typename T, typename U> bool operator==(const T* LHS, const ilist_iterator<const U> &RHS) { return LHS == RHS.getNodePtrUnchecked(); } template<typename T, typename U> bool operator!=(T* LHS, const ilist_iterator<U> &RHS) { return LHS != RHS.getNodePtrUnchecked(); } template<typename T, typename U> bool operator==(T* LHS, const ilist_iterator<U> &RHS) { return LHS == RHS.getNodePtrUnchecked(); } // HLSL Change End // Allow ilist_iterators to convert into pointers to a node automatically when // used by the dyn_cast, cast, isa mechanisms... template<typename From> struct simplify_type; template<typename NodeTy> struct simplify_type<ilist_iterator<NodeTy> > { typedef NodeTy* SimpleType; static SimpleType getSimplifiedValue(ilist_iterator<NodeTy> &Node) { return &*Node; } }; template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > { typedef /*const*/ NodeTy* SimpleType; static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) { return &*Node; } }; // // /////////////////////////////////////////////////////////////////////////////// // /// iplist - The subset of list functionality that can safely be used on nodes /// of polymorphic types, i.e. a heterogeneous list with a common base class that /// holds the next/prev pointers. The only state of the list itself is a single /// pointer to the head of the list. /// /// This list can be in one of three interesting states: /// 1. The list may be completely unconstructed. In this case, the head /// pointer is null. When in this form, any query for an iterator (e.g. /// begin() or end()) causes the list to transparently change to state #2. /// 2. The list may be empty, but contain a sentinel for the end iterator. This /// sentinel is created by the Traits::createSentinel method and is a link /// in the list. When the list is empty, the pointer in the iplist points /// to the sentinel. Once the sentinel is constructed, it /// is not destroyed until the list is. /// 3. The list may contain actual objects in it, which are stored as a doubly /// linked list of nodes. One invariant of the list is that the predecessor /// of the first node in the list always points to the last node in the list, /// and the successor pointer for the sentinel (which always stays at the /// end of the list) is always null. /// template<typename NodeTy, typename Traits=ilist_traits<NodeTy> > class iplist : public Traits { mutable NodeTy *Head; // Use the prev node pointer of 'head' as the tail pointer. This is really a // circularly linked list where we snip the 'next' link from the sentinel node // back to the first node in the list (to preserve assertions about going off // the end of the list). NodeTy *getTail() { return this->ensureHead(Head); } const NodeTy *getTail() const { return this->ensureHead(Head); } void setTail(NodeTy *N) const { this->noteHead(Head, N); } /// CreateLazySentinel - This method verifies whether the sentinel for the /// list has been created and lazily makes it if not. void CreateLazySentinel() const { this->ensureHead(Head); } static bool op_less(NodeTy &L, NodeTy &R) { return L < R; } static bool op_equal(NodeTy &L, NodeTy &R) { return L == R; } // No fundamental reason why iplist can't be copyable, but the default // copy/copy-assign won't do. iplist(const iplist &) = delete; void operator=(const iplist &) = delete; public: typedef NodeTy *pointer; typedef const NodeTy *const_pointer; typedef NodeTy &reference; typedef const NodeTy &const_reference; typedef NodeTy value_type; typedef ilist_iterator<NodeTy> iterator; typedef ilist_iterator<const NodeTy> const_iterator; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef std::reverse_iterator<const_iterator> const_reverse_iterator; typedef std::reverse_iterator<iterator> reverse_iterator; // Default constructor. Relies on base classes to create and manage // the lifetime of the sentinel. iplist() : Head(this->provideInitialHead()) {} // HLSL Change Starts // Construct with a sentinel object, and take ownership of the sentinel. iplist(std::unique_ptr<NodeTy> initial_head) : Head(initial_head.get()) { // Transfer ownership to the sentinel traits base class implementation. this->setSentinel(std::move(initial_head)); } // HLSL Change Ends ~iplist() { if (!Head) return; clear(); Traits::destroySentinel(getTail()); } // Iterator creation methods. iterator begin() { CreateLazySentinel(); return iterator(Head); } const_iterator begin() const { CreateLazySentinel(); return const_iterator(Head); } iterator end() { CreateLazySentinel(); return iterator(getTail()); } const_iterator end() const { CreateLazySentinel(); return const_iterator(getTail()); } // reverse iterator creation methods. reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin());} // Miscellaneous inspection routines. size_type max_size() const { return size_type(-1); } bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return !Head || Head == getTail(); } // Front and back accessor functions... reference front() { assert(!empty() && "Called front() on empty list!"); return *Head; } const_reference front() const { assert(!empty() && "Called front() on empty list!"); return *Head; } reference back() { assert(!empty() && "Called back() on empty list!"); return *this->getPrev(getTail()); } const_reference back() const { assert(!empty() && "Called back() on empty list!"); return *this->getPrev(getTail()); } void swap(iplist &RHS) { assert(0 && "Swap does not use list traits callback correctly yet!"); std::swap(Head, RHS.Head); } iterator insert(iterator where, NodeTy *New) { NodeTy *CurNode = where.getNodePtrUnchecked(); NodeTy *PrevNode = this->getPrev(CurNode); this->setNext(New, CurNode); this->setPrev(New, PrevNode); if (CurNode != Head) // Is PrevNode off the beginning of the list? this->setNext(PrevNode, New); else Head = New; this->setPrev(CurNode, New); // HLSL Change Begin: Undo insertion if exception try { this->addNodeToList(New); // Notify traits that we added a node... } catch (...) { // Undo insertion if (New == Head) Head = CurNode; else this->setNext(PrevNode, CurNode); this->setPrev(CurNode, PrevNode); this->setPrev(New, nullptr); this->setNext(New, nullptr); throw; } // HLSL Change End return New; } iterator insertAfter(iterator where, NodeTy *New) { if (empty()) return insert(begin(), New); else return insert(++where, New); } NodeTy *remove(iterator &IT) { assert(IT != end() && "Cannot remove end of list!"); NodeTy *Node = &*IT; NodeTy *NextNode = this->getNext(Node); NodeTy *PrevNode = this->getPrev(Node); if (Node != Head) // Is PrevNode off the beginning of the list? this->setNext(PrevNode, NextNode); else Head = NextNode; this->setPrev(NextNode, PrevNode); IT = NextNode; this->removeNodeFromList(Node); // Notify traits that we removed a node... // Set the next/prev pointers of the current node to null. This isn't // strictly required, but this catches errors where a node is removed from // an ilist (and potentially deleted) with iterators still pointing at it. // When those iterators are incremented or decremented, they will assert on // the null next/prev pointer instead of "usually working". this->setNext(Node, nullptr); this->setPrev(Node, nullptr); return Node; } NodeTy *remove(const iterator &IT) { iterator MutIt = IT; return remove(MutIt); } // erase - remove a node from the controlled sequence... and delete it. iterator erase(iterator where) { this->deleteNode(remove(where)); return where; } /// Remove all nodes from the list like clear(), but do not call /// removeNodeFromList() or deleteNode(). /// /// This should only be used immediately before freeing nodes in bulk to /// avoid traversing the list and bringing all the nodes into cache. void clearAndLeakNodesUnsafely() { if (Head) { Head = getTail(); this->setPrev(Head, Head); } } private: // transfer - The heart of the splice function. Move linked list nodes from // [first, last) into position. // void transfer(iterator position, iplist &L2, iterator first, iterator last) { assert(first != last && "Should be checked by callers"); // Position cannot be contained in the range to be transferred. // Check for the most common mistake. assert(position != first && "Insertion point can't be one of the transferred nodes"); if (position != last) { // Note: we have to be careful about the case when we move the first node // in the list. This node is the list sentinel node and we can't move it. NodeTy *ThisSentinel = getTail(); setTail(nullptr); NodeTy *L2Sentinel = L2.getTail(); L2.setTail(nullptr); // Remove [first, last) from its old position. NodeTy *First = &*first, *Prev = this->getPrev(First); NodeTy *Next = last.getNodePtrUnchecked(), *Last = this->getPrev(Next); if (Prev) this->setNext(Prev, Next); else L2.Head = Next; this->setPrev(Next, Prev); // Splice [first, last) into its new position. NodeTy *PosNext = position.getNodePtrUnchecked(); NodeTy *PosPrev = this->getPrev(PosNext); // Fix head of list... if (PosPrev) this->setNext(PosPrev, First); else Head = First; this->setPrev(First, PosPrev); // Fix end of list... this->setNext(Last, PosNext); this->setPrev(PosNext, Last); this->transferNodesFromList(L2, First, PosNext); // Now that everything is set, restore the pointers to the list sentinels. L2.setTail(L2Sentinel); setTail(ThisSentinel); } } public: //===----------------------------------------------------------------------=== // Functionality derived from other functions defined above... // size_type LLVM_ATTRIBUTE_UNUSED_RESULT size() const { if (!Head) return 0; // Don't require construction of sentinel if empty. return std::distance(begin(), end()); } iterator erase(iterator first, iterator last) { while (first != last) first = erase(first); return last; } void clear() { if (Head) erase(begin(), end()); } // Front and back inserters... void push_front(NodeTy *val) { insert(begin(), val); } void push_back(NodeTy *val) { insert(end(), val); } void pop_front() { assert(!empty() && "pop_front() on empty list!"); erase(begin()); } void pop_back() { assert(!empty() && "pop_back() on empty list!"); iterator t = end(); erase(--t); } // Special forms of insert... template<class InIt> void insert(iterator where, InIt first, InIt last) { for (; first != last; ++first) insert(where, *first); } // Splice members - defined in terms of transfer... void splice(iterator where, iplist &L2) { if (!L2.empty()) transfer(where, L2, L2.begin(), L2.end()); } void splice(iterator where, iplist &L2, iterator first) { iterator last = first; ++last; if (where == first || where == last) return; // No change transfer(where, L2, first, last); } void splice(iterator where, iplist &L2, iterator first, iterator last) { if (first != last) transfer(where, L2, first, last); } }; template<typename NodeTy> struct ilist : public iplist<NodeTy> { typedef typename iplist<NodeTy>::size_type size_type; typedef typename iplist<NodeTy>::iterator iterator; ilist() {} ilist(const ilist &right) { insert(this->begin(), right.begin(), right.end()); } explicit ilist(size_type count) { insert(this->begin(), count, NodeTy()); } ilist(size_type count, const NodeTy &val) { insert(this->begin(), count, val); } template<class InIt> ilist(InIt first, InIt last) { insert(this->begin(), first, last); } // bring hidden functions into scope using iplist<NodeTy>::insert; using iplist<NodeTy>::push_front; using iplist<NodeTy>::push_back; // Main implementation here - Insert for a node passed by value... iterator insert(iterator where, const NodeTy &val) { return insert(where, this->createNode(val)); } // Front and back inserters... void push_front(const NodeTy &val) { insert(this->begin(), val); } void push_back(const NodeTy &val) { insert(this->end(), val); } void insert(iterator where, size_type count, const NodeTy &val) { for (; count != 0; --count) insert(where, val); } // Assign special forms... void assign(size_type count, const NodeTy &val) { iterator I = this->begin(); for (; I != this->end() && count != 0; ++I, --count) *I = val; if (count != 0) insert(this->end(), val, val); else erase(I, this->end()); } template<class InIt> void assign(InIt first1, InIt last1) { iterator first2 = this->begin(), last2 = this->end(); for ( ; first1 != last1 && first2 != last2; ++first1, ++first2) *first1 = *first2; if (first2 == last2) erase(first1, last1); else insert(last1, first2, last2); } // Resize members... void resize(size_type newsize, NodeTy val) { iterator i = this->begin(); size_type len = 0; for ( ; i != this->end() && len < newsize; ++i, ++len) /* empty*/ ; if (len == newsize) erase(i, this->end()); else // i == end() insert(this->end(), newsize - len, val); } void resize(size_type newsize) { resize(newsize, NodeTy()); } }; } // End llvm namespace namespace std { // Ensure that swap uses the fast list swap... template<class Ty> void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) { Left.swap(Right); } } // End 'std' extensions... #endif // LLVM_ADT_ILIST_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SetOperations.h
//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines generic set operations that may be used on set's of // different types, and different element types. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SETOPERATIONS_H #define LLVM_ADT_SETOPERATIONS_H namespace llvm { /// set_union(A, B) - Compute A := A u B, return whether A changed. /// template <class S1Ty, class S2Ty> bool set_union(S1Ty &S1, const S2Ty &S2) { bool Changed = false; for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end(); SI != SE; ++SI) if (S1.insert(*SI).second) Changed = true; return Changed; } /// set_intersect(A, B) - Compute A := A ^ B /// Identical to set_intersection, except that it works on set<>'s and /// is nicer to use. Functionally, this iterates through S1, removing /// elements that are not contained in S2. /// template <class S1Ty, class S2Ty> void set_intersect(S1Ty &S1, const S2Ty &S2) { for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) { const typename S1Ty::key_type &E = *I; ++I; if (!S2.count(E)) S1.erase(E); // Erase element if not in S2 } } /// set_difference(A, B) - Return A - B /// template <class S1Ty, class S2Ty> S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) { S1Ty Result; for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end(); SI != SE; ++SI) if (!S2.count(*SI)) // if the element is not in set2 Result.insert(*SI); return Result; } /// set_subtract(A, B) - Compute A := A - B /// template <class S1Ty, class S2Ty> void set_subtract(S1Ty &S1, const S2Ty &S2) { for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end(); SI != SE; ++SI) S1.erase(*SI); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/MapVector.h
//===- llvm/ADT/MapVector.h - Map w/ deterministic value order --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a map that provides insertion order iteration. The // interface is purposefully minimal. The key is assumed to be cheap to copy // and 2 copies are kept, one for indexing in a DenseMap, one for iteration in // a std::vector. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_MAPVECTOR_H #define LLVM_ADT_MAPVECTOR_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include <vector> namespace llvm { /// This class implements a map that also provides access to all stored values /// in a deterministic order. The values are kept in a std::vector and the /// mapping is done with DenseMap from Keys to indexes in that vector. template<typename KeyT, typename ValueT, typename MapType = llvm::DenseMap<KeyT, unsigned>, typename VectorType = std::vector<std::pair<KeyT, ValueT> > > class MapVector { typedef typename VectorType::size_type size_type; MapType Map; VectorType Vector; public: typedef typename VectorType::iterator iterator; typedef typename VectorType::const_iterator const_iterator; typedef typename VectorType::reverse_iterator reverse_iterator; typedef typename VectorType::const_reverse_iterator const_reverse_iterator; size_type size() const { return Vector.size(); } iterator begin() { return Vector.begin(); } const_iterator begin() const { return Vector.begin(); } iterator end() { return Vector.end(); } const_iterator end() const { return Vector.end(); } reverse_iterator rbegin() { return Vector.rbegin(); } const_reverse_iterator rbegin() const { return Vector.rbegin(); } reverse_iterator rend() { return Vector.rend(); } const_reverse_iterator rend() const { return Vector.rend(); } bool empty() const { return Vector.empty(); } std::pair<KeyT, ValueT> &front() { return Vector.front(); } const std::pair<KeyT, ValueT> &front() const { return Vector.front(); } std::pair<KeyT, ValueT> &back() { return Vector.back(); } const std::pair<KeyT, ValueT> &back() const { return Vector.back(); } void clear() { Map.clear(); Vector.clear(); } void swap(MapVector &RHS) { std::swap(Map, RHS.Map); std::swap(Vector, RHS.Vector); } ValueT &operator[](const KeyT &Key) { std::pair<KeyT, unsigned> Pair = std::make_pair(Key, 0); std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair); unsigned &I = Result.first->second; if (Result.second) { Vector.push_back(std::make_pair(Key, ValueT())); I = Vector.size() - 1; } return Vector[I].second; } ValueT lookup(const KeyT &Key) const { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? ValueT() : Vector[Pos->second].second; } std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0); std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair); unsigned &I = Result.first->second; if (Result.second) { Vector.push_back(std::make_pair(KV.first, KV.second)); I = Vector.size() - 1; return std::make_pair(std::prev(end()), true); } return std::make_pair(begin() + I, false); } size_type count(const KeyT &Key) const { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? 0 : 1; } iterator find(const KeyT &Key) { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? Vector.end() : (Vector.begin() + Pos->second); } const_iterator find(const KeyT &Key) const { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? Vector.end() : (Vector.begin() + Pos->second); } /// \brief Remove the last element from the vector. void pop_back() { typename MapType::iterator Pos = Map.find(Vector.back().first); Map.erase(Pos); Vector.pop_back(); } /// \brief Remove the element given by Iterator. /// /// Returns an iterator to the element following the one which was removed, /// which may be end(). /// /// \note This is a deceivingly expensive operation (linear time). It's /// usually better to use \a remove_if() if possible. typename VectorType::iterator erase(typename VectorType::iterator Iterator) { Map.erase(Iterator->first); auto Next = Vector.erase(Iterator); if (Next == Vector.end()) return Next; // Update indices in the map. size_t Index = Next - Vector.begin(); for (auto &I : Map) { assert(I.second != Index && "Index was already erased!"); if (I.second > Index) --I.second; } return Next; } /// \brief Remove all elements with the key value Key. /// /// Returns the number of elements removed. size_type erase(const KeyT &Key) { auto Iterator = find(Key); if (Iterator == end()) return 0; erase(Iterator); return 1; } /// \brief Remove the elements that match the predicate. /// /// Erase all elements that match \c Pred in a single pass. Takes linear /// time. template <class Predicate> void remove_if(Predicate Pred); }; template <typename KeyT, typename ValueT, typename MapType, typename VectorType> template <class Function> void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) { auto O = Vector.begin(); for (auto I = O, E = Vector.end(); I != E; ++I) { if (Pred(*I)) { // Erase from the map. Map.erase(I->first); continue; } if (I != O) { // Move the value and update the index in the map. *O = std::move(*I); Map[O->first] = O - Vector.begin(); } ++O; } // Erase trailing entries in the vector. Vector.erase(O, Vector.end()); } /// \brief A MapVector that performs no allocations if smaller than a certain /// size. template <typename KeyT, typename ValueT, unsigned N> struct SmallMapVector : MapVector<KeyT, ValueT, SmallDenseMap<KeyT, unsigned, N>, SmallVector<std::pair<KeyT, ValueT>, N>> { }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/IntervalMap.h
//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a coalescing interval map for small objects. // // KeyT objects are mapped to ValT objects. Intervals of keys that map to the // same value are represented in a compressed form. // // Iterators provide ordered access to the compressed intervals rather than the // individual keys, and insert and erase operations use key intervals as well. // // Like SmallVector, IntervalMap will store the first N intervals in the map // object itself without any allocations. When space is exhausted it switches to // a B+-tree representation with very small overhead for small key and value // objects. // // A Traits class specifies how keys are compared. It also allows IntervalMap to // work with both closed and half-open intervals. // // Keys and values are not stored next to each other in a std::pair, so we don't // provide such a value_type. Dereferencing iterators only returns the mapped // value. The interval bounds are accessible through the start() and stop() // iterator methods. // // IntervalMap is optimized for small key and value objects, 4 or 8 bytes each // is the optimal size. For large objects use std::map instead. // //===----------------------------------------------------------------------===// // // Synopsis: // // template <typename KeyT, typename ValT, unsigned N, typename Traits> // class IntervalMap { // public: // typedef KeyT key_type; // typedef ValT mapped_type; // typedef RecyclingAllocator<...> Allocator; // class iterator; // class const_iterator; // // explicit IntervalMap(Allocator&); // ~IntervalMap(): // // bool empty() const; // KeyT start() const; // KeyT stop() const; // ValT lookup(KeyT x, Value NotFound = Value()) const; // // const_iterator begin() const; // const_iterator end() const; // iterator begin(); // iterator end(); // const_iterator find(KeyT x) const; // iterator find(KeyT x); // // void insert(KeyT a, KeyT b, ValT y); // void clear(); // }; // // template <typename KeyT, typename ValT, unsigned N, typename Traits> // class IntervalMap::const_iterator { // public: // using iterator_category = std::bidirectional_iterator_tag; // using value_type = ValT; // using difference_type = std::ptrdiff_t; // using pointer = value_type *; // using reference = value_type &; // // bool operator==(const const_iterator &) const; // bool operator!=(const const_iterator &) const; // bool valid() const; // // const KeyT &start() const; // const KeyT &stop() const; // const ValT &value() const; // const ValT &operator*() const; // const ValT *operator->() const; // // const_iterator &operator++(); // const_iterator &operator++(int); // const_iterator &operator--(); // const_iterator &operator--(int); // void goToBegin(); // void goToEnd(); // void find(KeyT x); // void advanceTo(KeyT x); // }; // // template <typename KeyT, typename ValT, unsigned N, typename Traits> // class IntervalMap::iterator : public const_iterator { // public: // void insert(KeyT a, KeyT b, Value y); // void erase(); // }; // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_INTERVALMAP_H #define LLVM_ADT_INTERVALMAP_H #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/AlignOf.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/RecyclingAllocator.h" #include <iterator> namespace llvm { //===----------------------------------------------------------------------===// //--- Key traits ---// //===----------------------------------------------------------------------===// // // The IntervalMap works with closed or half-open intervals. // Adjacent intervals that map to the same value are coalesced. // // The IntervalMapInfo traits class is used to determine if a key is contained // in an interval, and if two intervals are adjacent so they can be coalesced. // The provided implementation works for closed integer intervals, other keys // probably need a specialized version. // // The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x). // // It is assumed that (a;b] half-open intervals are not used, only [a;b) is // allowed. This is so that stopLess(a, b) can be used to determine if two // intervals overlap. // //===----------------------------------------------------------------------===// template <typename T> struct IntervalMapInfo { /// startLess - Return true if x is not in [a;b]. /// This is x < a both for closed intervals and for [a;b) half-open intervals. static inline bool startLess(const T &x, const T &a) { return x < a; } /// stopLess - Return true if x is not in [a;b]. /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals. static inline bool stopLess(const T &b, const T &x) { return b < x; } /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce. /// This is a+1 == b for closed intervals, a == b for half-open intervals. static inline bool adjacent(const T &a, const T &b) { return a+1 == b; } }; template <typename T> struct IntervalMapHalfOpenInfo { /// startLess - Return true if x is not in [a;b). static inline bool startLess(const T &x, const T &a) { return x < a; } /// stopLess - Return true if x is not in [a;b). static inline bool stopLess(const T &b, const T &x) { return b <= x; } /// adjacent - Return true when the intervals [x;a) and [b;y) can coalesce. static inline bool adjacent(const T &a, const T &b) { return a == b; } }; /// IntervalMapImpl - Namespace used for IntervalMap implementation details. /// It should be considered private to the implementation. namespace IntervalMapImpl { // Forward declarations. template <typename, typename, unsigned, typename> class LeafNode; template <typename, typename, unsigned, typename> class BranchNode; typedef std::pair<unsigned,unsigned> IdxPair; //===----------------------------------------------------------------------===// //--- IntervalMapImpl::NodeBase ---// //===----------------------------------------------------------------------===// // // Both leaf and branch nodes store vectors of pairs. // Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT). // // Keys and values are stored in separate arrays to avoid padding caused by // different object alignments. This also helps improve locality of reference // when searching the keys. // // The nodes don't know how many elements they contain - that information is // stored elsewhere. Omitting the size field prevents padding and allows a node // to fill the allocated cache lines completely. // // These are typical key and value sizes, the node branching factor (N), and // wasted space when nodes are sized to fit in three cache lines (192 bytes): // // T1 T2 N Waste Used by // 4 4 24 0 Branch<4> (32-bit pointers) // 8 4 16 0 Leaf<4,4>, Branch<4> // 8 8 12 0 Leaf<4,8>, Branch<8> // 16 4 9 12 Leaf<8,4> // 16 8 8 0 Leaf<8,8> // //===----------------------------------------------------------------------===// template <typename T1, typename T2, unsigned N> class NodeBase { public: enum { Capacity = N }; T1 first[N]; T2 second[N]; /// copy - Copy elements from another node. /// @param Other Node elements are copied from. /// @param i Beginning of the source range in other. /// @param j Beginning of the destination range in this. /// @param Count Number of elements to copy. template <unsigned M> void copy(const NodeBase<T1, T2, M> &Other, unsigned i, unsigned j, unsigned Count) { assert(i + Count <= M && "Invalid source range"); assert(j + Count <= N && "Invalid dest range"); for (unsigned e = i + Count; i != e; ++i, ++j) { first[j] = Other.first[i]; second[j] = Other.second[i]; } } /// moveLeft - Move elements to the left. /// @param i Beginning of the source range. /// @param j Beginning of the destination range. /// @param Count Number of elements to copy. void moveLeft(unsigned i, unsigned j, unsigned Count) { assert(j <= i && "Use moveRight shift elements right"); copy(*this, i, j, Count); } /// moveRight - Move elements to the right. /// @param i Beginning of the source range. /// @param j Beginning of the destination range. /// @param Count Number of elements to copy. void moveRight(unsigned i, unsigned j, unsigned Count) { assert(i <= j && "Use moveLeft shift elements left"); assert(j + Count <= N && "Invalid range"); while (Count--) { first[j + Count] = first[i + Count]; second[j + Count] = second[i + Count]; } } /// erase - Erase elements [i;j). /// @param i Beginning of the range to erase. /// @param j End of the range. (Exclusive). /// @param Size Number of elements in node. void erase(unsigned i, unsigned j, unsigned Size) { moveLeft(j, i, Size - j); } /// erase - Erase element at i. /// @param i Index of element to erase. /// @param Size Number of elements in node. void erase(unsigned i, unsigned Size) { erase(i, i+1, Size); } /// shift - Shift elements [i;size) 1 position to the right. /// @param i Beginning of the range to move. /// @param Size Number of elements in node. void shift(unsigned i, unsigned Size) { moveRight(i, i + 1, Size - i); } /// transferToLeftSib - Transfer elements to a left sibling node. /// @param Size Number of elements in this. /// @param Sib Left sibling node. /// @param SSize Number of elements in sib. /// @param Count Number of elements to transfer. void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, unsigned Count) { Sib.copy(*this, 0, SSize, Count); erase(0, Count, Size); } /// transferToRightSib - Transfer elements to a right sibling node. /// @param Size Number of elements in this. /// @param Sib Right sibling node. /// @param SSize Number of elements in sib. /// @param Count Number of elements to transfer. void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize, unsigned Count) { Sib.moveRight(0, Count, SSize); Sib.copy(*this, Size-Count, 0, Count); } /// adjustFromLeftSib - Adjust the number if elements in this node by moving /// elements to or from a left sibling node. /// @param Size Number of elements in this. /// @param Sib Right sibling node. /// @param SSize Number of elements in sib. /// @param Add The number of elements to add to this node, possibly < 0. /// @return Number of elements added to this node, possibly negative. int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) { if (Add > 0) { // We want to grow, copy from sib. unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size); Sib.transferToRightSib(SSize, *this, Size, Count); return Count; } else { // We want to shrink, copy to sib. unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize); transferToLeftSib(Size, Sib, SSize, Count); return -Count; } } }; /// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes. /// @param Node Array of pointers to sibling nodes. /// @param Nodes Number of nodes. /// @param CurSize Array of current node sizes, will be overwritten. /// @param NewSize Array of desired node sizes. template <typename NodeT> void adjustSiblingSizes(NodeT *Node[], unsigned Nodes, unsigned CurSize[], const unsigned NewSize[]) { // Move elements right. for (int n = Nodes - 1; n; --n) { if (CurSize[n] == NewSize[n]) continue; for (int m = n - 1; m != -1; --m) { int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m], NewSize[n] - CurSize[n]); CurSize[m] -= d; CurSize[n] += d; // Keep going if the current node was exhausted. if (CurSize[n] >= NewSize[n]) break; } } if (Nodes == 0) return; // Move elements left. for (unsigned n = 0; n != Nodes - 1; ++n) { if (CurSize[n] == NewSize[n]) continue; for (unsigned m = n + 1; m != Nodes; ++m) { int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n], CurSize[n] - NewSize[n]); CurSize[m] += d; CurSize[n] -= d; // Keep going if the current node was exhausted. if (CurSize[n] >= NewSize[n]) break; } } #ifndef NDEBUG for (unsigned n = 0; n != Nodes; n++) assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle"); #endif } /// IntervalMapImpl::distribute - Compute a new distribution of node elements /// after an overflow or underflow. Reserve space for a new element at Position, /// and compute the node that will hold Position after redistributing node /// elements. /// /// It is required that /// /// Elements == sum(CurSize), and /// Elements + Grow <= Nodes * Capacity. /// /// NewSize[] will be filled in such that: /// /// sum(NewSize) == Elements, and /// NewSize[i] <= Capacity. /// /// The returned index is the node where Position will go, so: /// /// sum(NewSize[0..idx-1]) <= Position /// sum(NewSize[0..idx]) >= Position /// /// The last equality, sum(NewSize[0..idx]) == Position, can only happen when /// Grow is set and NewSize[idx] == Capacity-1. The index points to the node /// before the one holding the Position'th element where there is room for an /// insertion. /// /// @param Nodes The number of nodes. /// @param Elements Total elements in all nodes. /// @param Capacity The capacity of each node. /// @param CurSize Array[Nodes] of current node sizes, or NULL. /// @param NewSize Array[Nodes] to receive the new node sizes. /// @param Position Insert position. /// @param Grow Reserve space for a new element at Position. /// @return (node, offset) for Position. IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity, const unsigned *CurSize, unsigned NewSize[], unsigned Position, bool Grow); //===----------------------------------------------------------------------===// //--- IntervalMapImpl::NodeSizer ---// //===----------------------------------------------------------------------===// // // Compute node sizes from key and value types. // // The branching factors are chosen to make nodes fit in three cache lines. // This may not be possible if keys or values are very large. Such large objects // are handled correctly, but a std::map would probably give better performance. // //===----------------------------------------------------------------------===// enum { // Cache line size. Most architectures have 32 or 64 byte cache lines. // We use 64 bytes here because it provides good branching factors. Log2CacheLine = 6, CacheLineBytes = 1 << Log2CacheLine, DesiredNodeBytes = 3 * CacheLineBytes }; template <typename KeyT, typename ValT> struct NodeSizer { enum { // Compute the leaf node branching factor that makes a node fit in three // cache lines. The branching factor must be at least 3, or some B+-tree // balancing algorithms won't work. // LeafSize can't be larger than CacheLineBytes. This is required by the // PointerIntPair used by NodeRef. DesiredLeafSize = DesiredNodeBytes / static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)), MinLeafSize = 3, LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize }; typedef NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize> LeafBase; enum { // Now that we have the leaf branching factor, compute the actual allocation // unit size by rounding up to a whole number of cache lines. AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1), // Determine the branching factor for branch nodes. BranchSize = AllocBytes / static_cast<unsigned>(sizeof(KeyT) + sizeof(void*)) }; /// Allocator - The recycling allocator used for both branch and leaf nodes. /// This typedef is very likely to be identical for all IntervalMaps with /// reasonably sized entries, so the same allocator can be shared among /// different kinds of maps. typedef RecyclingAllocator<BumpPtrAllocator, char, AllocBytes, CacheLineBytes> Allocator; }; //===----------------------------------------------------------------------===// //--- IntervalMapImpl::NodeRef ---// //===----------------------------------------------------------------------===// // // B+-tree nodes can be leaves or branches, so we need a polymorphic node // pointer that can point to both kinds. // // All nodes are cache line aligned and the low 6 bits of a node pointer are // always 0. These bits are used to store the number of elements in the // referenced node. Besides saving space, placing node sizes in the parents // allow tree balancing algorithms to run without faulting cache lines for nodes // that may not need to be modified. // // A NodeRef doesn't know whether it references a leaf node or a branch node. // It is the responsibility of the caller to use the correct types. // // Nodes are never supposed to be empty, and it is invalid to store a node size // of 0 in a NodeRef. The valid range of sizes is 1-64. // //===----------------------------------------------------------------------===// class NodeRef { struct CacheAlignedPointerTraits { static inline void *getAsVoidPointer(void *P) { return P; } static inline void *getFromVoidPointer(void *P) { return P; } enum { NumLowBitsAvailable = Log2CacheLine }; }; PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip; public: /// NodeRef - Create a null ref. NodeRef() {} /// operator bool - Detect a null ref. explicit operator bool() const { return pip.getOpaqueValue(); } /// NodeRef - Create a reference to the node p with n elements. template <typename NodeT> NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) { assert(n <= NodeT::Capacity && "Size too big for node"); } /// size - Return the number of elements in the referenced node. unsigned size() const { return pip.getInt() + 1; } /// setSize - Update the node size. void setSize(unsigned n) { pip.setInt(n - 1); } /// subtree - Access the i'th subtree reference in a branch node. /// This depends on branch nodes storing the NodeRef array as their first /// member. NodeRef &subtree(unsigned i) const { return reinterpret_cast<NodeRef*>(pip.getPointer())[i]; } /// get - Dereference as a NodeT reference. template <typename NodeT> NodeT &get() const { return *reinterpret_cast<NodeT*>(pip.getPointer()); } bool operator==(const NodeRef &RHS) const { if (pip == RHS.pip) return true; assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs"); return false; } bool operator!=(const NodeRef &RHS) const { return !operator==(RHS); } }; //===----------------------------------------------------------------------===// //--- IntervalMapImpl::LeafNode ---// //===----------------------------------------------------------------------===// // // Leaf nodes store up to N disjoint intervals with corresponding values. // // The intervals are kept sorted and fully coalesced so there are no adjacent // intervals mapping to the same value. // // These constraints are always satisfied: // // - Traits::stopLess(start(i), stop(i)) - Non-empty, sane intervals. // // - Traits::stopLess(stop(i), start(i + 1) - Sorted. // // - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1)) // - Fully coalesced. // //===----------------------------------------------------------------------===// template <typename KeyT, typename ValT, unsigned N, typename Traits> class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> { public: const KeyT &start(unsigned i) const { return this->first[i].first; } const KeyT &stop(unsigned i) const { return this->first[i].second; } const ValT &value(unsigned i) const { return this->second[i]; } KeyT &start(unsigned i) { return this->first[i].first; } KeyT &stop(unsigned i) { return this->first[i].second; } ValT &value(unsigned i) { return this->second[i]; } /// findFrom - Find the first interval after i that may contain x. /// @param i Starting index for the search. /// @param Size Number of elements in node. /// @param x Key to search for. /// @return First index with !stopLess(key[i].stop, x), or size. /// This is the first interval that can possibly contain x. unsigned findFrom(unsigned i, unsigned Size, KeyT x) const { assert(i <= Size && Size <= N && "Bad indices"); assert((i == 0 || Traits::stopLess(stop(i - 1), x)) && "Index is past the needed point"); while (i != Size && Traits::stopLess(stop(i), x)) ++i; return i; } /// safeFind - Find an interval that is known to exist. This is the same as /// findFrom except is it assumed that x is at least within range of the last /// interval. /// @param i Starting index for the search. /// @param x Key to search for. /// @return First index with !stopLess(key[i].stop, x), never size. /// This is the first interval that can possibly contain x. unsigned safeFind(unsigned i, KeyT x) const { assert(i < N && "Bad index"); assert((i == 0 || Traits::stopLess(stop(i - 1), x)) && "Index is past the needed point"); while (Traits::stopLess(stop(i), x)) ++i; assert(i < N && "Unsafe intervals"); return i; } /// safeLookup - Lookup mapped value for a safe key. /// It is assumed that x is within range of the last entry. /// @param x Key to search for. /// @param NotFound Value to return if x is not in any interval. /// @return The mapped value at x or NotFound. ValT safeLookup(KeyT x, ValT NotFound) const { unsigned i = safeFind(0, x); return Traits::startLess(x, start(i)) ? NotFound : value(i); } unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y); }; /// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as /// possible. This may cause the node to grow by 1, or it may cause the node /// to shrink because of coalescing. /// @param Pos Starting index = insertFrom(0, size, a) /// @param Size Number of elements in node. /// @param a Interval start. /// @param b Interval stop. /// @param y Value be mapped. /// @return (insert position, new size), or (i, Capacity+1) on overflow. template <typename KeyT, typename ValT, unsigned N, typename Traits> unsigned LeafNode<KeyT, ValT, N, Traits>:: insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) { unsigned i = Pos; assert(i <= Size && Size <= N && "Invalid index"); assert(!Traits::stopLess(b, a) && "Invalid interval"); // Verify the findFrom invariant. assert((i == 0 || Traits::stopLess(stop(i - 1), a))); assert((i == Size || !Traits::stopLess(stop(i), a))); assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert"); // Coalesce with previous interval. if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) { Pos = i - 1; // Also coalesce with next interval? if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) { stop(i - 1) = stop(i); this->erase(i, Size); return Size - 1; } stop(i - 1) = b; return Size; } // Detect overflow. if (i == N) return N + 1; // Add new interval at end. if (i == Size) { start(i) = a; stop(i) = b; value(i) = y; return Size + 1; } // Try to coalesce with following interval. if (value(i) == y && Traits::adjacent(b, start(i))) { start(i) = a; return Size; } // We must insert before i. Detect overflow. if (Size == N) return N + 1; // Insert before i. this->shift(i, Size); start(i) = a; stop(i) = b; value(i) = y; return Size + 1; } //===----------------------------------------------------------------------===// //--- IntervalMapImpl::BranchNode ---// //===----------------------------------------------------------------------===// // // A branch node stores references to 1--N subtrees all of the same height. // // The key array in a branch node holds the rightmost stop key of each subtree. // It is redundant to store the last stop key since it can be found in the // parent node, but doing so makes tree balancing a lot simpler. // // It is unusual for a branch node to only have one subtree, but it can happen // in the root node if it is smaller than the normal nodes. // // When all of the leaf nodes from all the subtrees are concatenated, they must // satisfy the same constraints as a single leaf node. They must be sorted, // sane, and fully coalesced. // //===----------------------------------------------------------------------===// template <typename KeyT, typename ValT, unsigned N, typename Traits> class BranchNode : public NodeBase<NodeRef, KeyT, N> { public: const KeyT &stop(unsigned i) const { return this->second[i]; } const NodeRef &subtree(unsigned i) const { return this->first[i]; } KeyT &stop(unsigned i) { return this->second[i]; } NodeRef &subtree(unsigned i) { return this->first[i]; } /// findFrom - Find the first subtree after i that may contain x. /// @param i Starting index for the search. /// @param Size Number of elements in node. /// @param x Key to search for. /// @return First index with !stopLess(key[i], x), or size. /// This is the first subtree that can possibly contain x. unsigned findFrom(unsigned i, unsigned Size, KeyT x) const { assert(i <= Size && Size <= N && "Bad indices"); assert((i == 0 || Traits::stopLess(stop(i - 1), x)) && "Index to findFrom is past the needed point"); while (i != Size && Traits::stopLess(stop(i), x)) ++i; return i; } /// safeFind - Find a subtree that is known to exist. This is the same as /// findFrom except is it assumed that x is in range. /// @param i Starting index for the search. /// @param x Key to search for. /// @return First index with !stopLess(key[i], x), never size. /// This is the first subtree that can possibly contain x. unsigned safeFind(unsigned i, KeyT x) const { assert(i < N && "Bad index"); assert((i == 0 || Traits::stopLess(stop(i - 1), x)) && "Index is past the needed point"); while (Traits::stopLess(stop(i), x)) ++i; assert(i < N && "Unsafe intervals"); return i; } /// safeLookup - Get the subtree containing x, Assuming that x is in range. /// @param x Key to search for. /// @return Subtree containing x NodeRef safeLookup(KeyT x) const { return subtree(safeFind(0, x)); } /// insert - Insert a new (subtree, stop) pair. /// @param i Insert position, following entries will be shifted. /// @param Size Number of elements in node. /// @param Node Subtree to insert. /// @param Stop Last key in subtree. void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) { assert(Size < N && "branch node overflow"); assert(i <= Size && "Bad insert position"); this->shift(i, Size); subtree(i) = Node; stop(i) = Stop; } }; //===----------------------------------------------------------------------===// //--- IntervalMapImpl::Path ---// //===----------------------------------------------------------------------===// // // A Path is used by iterators to represent a position in a B+-tree, and the // path to get there from the root. // // The Path class also contains the tree navigation code that doesn't have to // be templatized. // //===----------------------------------------------------------------------===// class Path { /// Entry - Each step in the path is a node pointer and an offset into that /// node. struct Entry { void *node; unsigned size; unsigned offset; Entry(void *Node, unsigned Size, unsigned Offset) : node(Node), size(Size), offset(Offset) {} Entry(NodeRef Node, unsigned Offset) : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {} NodeRef &subtree(unsigned i) const { return reinterpret_cast<NodeRef*>(node)[i]; } }; /// path - The path entries, path[0] is the root node, path.back() is a leaf. SmallVector<Entry, 4> path; public: // Node accessors. template <typename NodeT> NodeT &node(unsigned Level) const { return *reinterpret_cast<NodeT*>(path[Level].node); } unsigned size(unsigned Level) const { return path[Level].size; } unsigned offset(unsigned Level) const { return path[Level].offset; } unsigned &offset(unsigned Level) { return path[Level].offset; } // Leaf accessors. template <typename NodeT> NodeT &leaf() const { return *reinterpret_cast<NodeT*>(path.back().node); } unsigned leafSize() const { return path.back().size; } unsigned leafOffset() const { return path.back().offset; } unsigned &leafOffset() { return path.back().offset; } /// valid - Return true if path is at a valid node, not at end(). bool valid() const { return !path.empty() && path.front().offset < path.front().size; } /// height - Return the height of the tree corresponding to this path. /// This matches map->height in a full path. unsigned height() const { return path.size() - 1; } /// subtree - Get the subtree referenced from Level. When the path is /// consistent, node(Level + 1) == subtree(Level). /// @param Level 0..height-1. The leaves have no subtrees. NodeRef &subtree(unsigned Level) const { return path[Level].subtree(path[Level].offset); } /// reset - Reset cached information about node(Level) from subtree(Level -1). /// @param Level 1..height. THe node to update after parent node changed. void reset(unsigned Level) { path[Level] = Entry(subtree(Level - 1), offset(Level)); } /// push - Add entry to path. /// @param Node Node to add, should be subtree(path.size()-1). /// @param Offset Offset into Node. void push(NodeRef Node, unsigned Offset) { path.push_back(Entry(Node, Offset)); } /// pop - Remove the last path entry. void pop() { path.pop_back(); } /// setSize - Set the size of a node both in the path and in the tree. /// @param Level 0..height. Note that setting the root size won't change /// map->rootSize. /// @param Size New node size. void setSize(unsigned Level, unsigned Size) { path[Level].size = Size; if (Level) subtree(Level - 1).setSize(Size); } /// setRoot - Clear the path and set a new root node. /// @param Node New root node. /// @param Size New root size. /// @param Offset Offset into root node. void setRoot(void *Node, unsigned Size, unsigned Offset) { path.clear(); path.push_back(Entry(Node, Size, Offset)); } /// replaceRoot - Replace the current root node with two new entries after the /// tree height has increased. /// @param Root The new root node. /// @param Size Number of entries in the new root. /// @param Offsets Offsets into the root and first branch nodes. void replaceRoot(void *Root, unsigned Size, IdxPair Offsets); /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef. /// @param Level Get the sibling to node(Level). /// @return Left sibling, or NodeRef(). NodeRef getLeftSibling(unsigned Level) const; /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level /// unaltered. /// @param Level Move node(Level). void moveLeft(unsigned Level); /// fillLeft - Grow path to Height by taking leftmost branches. /// @param Height The target height. void fillLeft(unsigned Height) { while (height() < Height) push(subtree(height()), 0); } /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef. /// @param Level Get the sinbling to node(Level). /// @return Left sibling, or NodeRef(). NodeRef getRightSibling(unsigned Level) const; /// moveRight - Move path to the left sibling at Level. Leave nodes below /// Level unaltered. /// @param Level Move node(Level). void moveRight(unsigned Level); /// atBegin - Return true if path is at begin(). bool atBegin() const { for (unsigned i = 0, e = path.size(); i != e; ++i) if (path[i].offset != 0) return false; return true; } /// atLastEntry - Return true if the path is at the last entry of the node at /// Level. /// @param Level Node to examine. bool atLastEntry(unsigned Level) const { return path[Level].offset == path[Level].size - 1; } /// legalizeForInsert - Prepare the path for an insertion at Level. When the /// path is at end(), node(Level) may not be a legal node. legalizeForInsert /// ensures that node(Level) is real by moving back to the last node at Level, /// and setting offset(Level) to size(Level) if required. /// @param Level The level where an insertion is about to take place. void legalizeForInsert(unsigned Level) { if (valid()) return; moveLeft(Level); ++path[Level].offset; } }; } // namespace IntervalMapImpl //===----------------------------------------------------------------------===// //--- IntervalMap ----// //===----------------------------------------------------------------------===// template <typename KeyT, typename ValT, unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize, typename Traits = IntervalMapInfo<KeyT> > class IntervalMap { typedef IntervalMapImpl::NodeSizer<KeyT, ValT> Sizer; typedef IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits> Leaf; typedef IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits> Branch; typedef IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits> RootLeaf; typedef IntervalMapImpl::IdxPair IdxPair; // The RootLeaf capacity is given as a template parameter. We must compute the // corresponding RootBranch capacity. enum { DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) / (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)), RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1 }; typedef IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits> RootBranch; // When branched, we store a global start key as well as the branch node. struct RootBranchData { KeyT start; RootBranch node; }; public: typedef typename Sizer::Allocator Allocator; typedef KeyT KeyType; typedef ValT ValueType; typedef Traits KeyTraits; private: // The root data is either a RootLeaf or a RootBranchData instance. AlignedCharArrayUnion<RootLeaf, RootBranchData> data; // Tree height. // 0: Leaves in root. // 1: Root points to leaf. // 2: root->branch->leaf ... unsigned height; // Number of entries in the root node. unsigned rootSize; // Allocator used for creating external nodes. Allocator &allocator; /// dataAs - Represent data as a node type without breaking aliasing rules. template <typename T> T &dataAs() const { union { const char *d; T *t; } u; u.d = data.buffer; return *u.t; } const RootLeaf &rootLeaf() const { assert(!branched() && "Cannot acces leaf data in branched root"); return dataAs<RootLeaf>(); } RootLeaf &rootLeaf() { assert(!branched() && "Cannot acces leaf data in branched root"); return dataAs<RootLeaf>(); } RootBranchData &rootBranchData() const { assert(branched() && "Cannot access branch data in non-branched root"); return dataAs<RootBranchData>(); } RootBranchData &rootBranchData() { assert(branched() && "Cannot access branch data in non-branched root"); return dataAs<RootBranchData>(); } const RootBranch &rootBranch() const { return rootBranchData().node; } RootBranch &rootBranch() { return rootBranchData().node; } KeyT rootBranchStart() const { return rootBranchData().start; } KeyT &rootBranchStart() { return rootBranchData().start; } template <typename NodeT> NodeT *newNode() { return new(allocator.template Allocate<NodeT>()) NodeT(); } template <typename NodeT> void deleteNode(NodeT *P) { P->~NodeT(); allocator.Deallocate(P); } IdxPair branchRoot(unsigned Position); IdxPair splitRoot(unsigned Position); void switchRootToBranch() { rootLeaf().~RootLeaf(); height = 1; new (&rootBranchData()) RootBranchData(); } void switchRootToLeaf() { rootBranchData().~RootBranchData(); height = 0; new(&rootLeaf()) RootLeaf(); } bool branched() const { return height > 0; } ValT treeSafeLookup(KeyT x, ValT NotFound) const; void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Level)); void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level); public: explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) { assert((uintptr_t(data.buffer) & (alignOf<RootLeaf>() - 1)) == 0 && "Insufficient alignment"); new(&rootLeaf()) RootLeaf(); } ~IntervalMap() { clear(); rootLeaf().~RootLeaf(); } /// empty - Return true when no intervals are mapped. bool empty() const { return rootSize == 0; } /// start - Return the smallest mapped key in a non-empty map. KeyT start() const { assert(!empty() && "Empty IntervalMap has no start"); return !branched() ? rootLeaf().start(0) : rootBranchStart(); } /// stop - Return the largest mapped key in a non-empty map. KeyT stop() const { assert(!empty() && "Empty IntervalMap has no stop"); return !branched() ? rootLeaf().stop(rootSize - 1) : rootBranch().stop(rootSize - 1); } /// lookup - Return the mapped value at x or NotFound. ValT lookup(KeyT x, ValT NotFound = ValT()) const { if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x)) return NotFound; return branched() ? treeSafeLookup(x, NotFound) : rootLeaf().safeLookup(x, NotFound); } /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals. /// It is assumed that no key in the interval is mapped to another value, but /// overlapping intervals already mapped to y will be coalesced. void insert(KeyT a, KeyT b, ValT y) { if (branched() || rootSize == RootLeaf::Capacity) return find(a).insert(a, b, y); // Easy insert into root leaf. unsigned p = rootLeaf().findFrom(0, rootSize, a); rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y); } /// clear - Remove all entries. void clear(); class const_iterator; class iterator; friend class const_iterator; friend class iterator; const_iterator begin() const { const_iterator I(*this); I.goToBegin(); return I; } iterator begin() { iterator I(*this); I.goToBegin(); return I; } const_iterator end() const { const_iterator I(*this); I.goToEnd(); return I; } iterator end() { iterator I(*this); I.goToEnd(); return I; } /// find - Return an iterator pointing to the first interval ending at or /// after x, or end(). const_iterator find(KeyT x) const { const_iterator I(*this); I.find(x); return I; } iterator find(KeyT x) { iterator I(*this); I.find(x); return I; } }; /// treeSafeLookup - Return the mapped value at x or NotFound, assuming a /// branched root. template <typename KeyT, typename ValT, unsigned N, typename Traits> ValT IntervalMap<KeyT, ValT, N, Traits>:: treeSafeLookup(KeyT x, ValT NotFound) const { assert(branched() && "treeLookup assumes a branched root"); IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x); for (unsigned h = height-1; h; --h) NR = NR.get<Branch>().safeLookup(x); return NR.get<Leaf>().safeLookup(x, NotFound); } // branchRoot - Switch from a leaf root to a branched root. // Return the new (root offset, node offset) corresponding to Position. template <typename KeyT, typename ValT, unsigned N, typename Traits> IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>:: branchRoot(unsigned Position) { using namespace IntervalMapImpl; // How many external leaf nodes to hold RootLeaf+1? const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1; // Compute element distribution among new nodes. unsigned size[Nodes]; IdxPair NewOffset(0, Position); // Is is very common for the root node to be smaller than external nodes. if (Nodes == 1) size[0] = rootSize; else NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, size, Position, true); // Allocate new nodes. unsigned pos = 0; NodeRef node[Nodes]; for (unsigned n = 0; n != Nodes; ++n) { Leaf *L = newNode<Leaf>(); L->copy(rootLeaf(), pos, 0, size[n]); node[n] = NodeRef(L, size[n]); pos += size[n]; } // Destroy the old leaf node, construct branch node instead. switchRootToBranch(); for (unsigned n = 0; n != Nodes; ++n) { rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1); rootBranch().subtree(n) = node[n]; } rootBranchStart() = node[0].template get<Leaf>().start(0); rootSize = Nodes; return NewOffset; } // splitRoot - Split the current BranchRoot into multiple Branch nodes. // Return the new (root offset, node offset) corresponding to Position. template <typename KeyT, typename ValT, unsigned N, typename Traits> IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>:: splitRoot(unsigned Position) { using namespace IntervalMapImpl; // How many external leaf nodes to hold RootBranch+1? const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1; // Compute element distribution among new nodes. unsigned Size[Nodes]; IdxPair NewOffset(0, Position); // Is is very common for the root node to be smaller than external nodes. if (Nodes == 1) Size[0] = rootSize; else NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, Size, Position, true); // Allocate new nodes. unsigned Pos = 0; NodeRef Node[Nodes]; for (unsigned n = 0; n != Nodes; ++n) { Branch *B = newNode<Branch>(); B->copy(rootBranch(), Pos, 0, Size[n]); Node[n] = NodeRef(B, Size[n]); Pos += Size[n]; } for (unsigned n = 0; n != Nodes; ++n) { rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1); rootBranch().subtree(n) = Node[n]; } rootSize = Nodes; ++height; return NewOffset; } /// visitNodes - Visit each external node. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) { if (!branched()) return; SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs; // Collect level 0 nodes from the root. for (unsigned i = 0; i != rootSize; ++i) Refs.push_back(rootBranch().subtree(i)); // Visit all branch nodes. for (unsigned h = height - 1; h; --h) { for (unsigned i = 0, e = Refs.size(); i != e; ++i) { for (unsigned j = 0, s = Refs[i].size(); j != s; ++j) NextRefs.push_back(Refs[i].subtree(j)); (this->*f)(Refs[i], h); } Refs.clear(); Refs.swap(NextRefs); } // Visit all leaf nodes. for (unsigned i = 0, e = Refs.size(); i != e; ++i) (this->*f)(Refs[i], 0); } template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) { if (Level) deleteNode(&Node.get<Branch>()); else deleteNode(&Node.get<Leaf>()); } template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: clear() { if (branched()) { visitNodes(&IntervalMap::deleteNode); switchRootToLeaf(); } rootSize = 0; } //===----------------------------------------------------------------------===// //--- IntervalMap::const_iterator ----// //===----------------------------------------------------------------------===// template <typename KeyT, typename ValT, unsigned N, typename Traits> class IntervalMap<KeyT, ValT, N, Traits>::const_iterator { friend class IntervalMap; public: using iterator_category = std::bidirectional_iterator_tag; using value_type = ValT; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; protected: // The map referred to. IntervalMap *map; // We store a full path from the root to the current position. // The path may be partially filled, but never between iterator calls. IntervalMapImpl::Path path; explicit const_iterator(const IntervalMap &map) : map(const_cast<IntervalMap*>(&map)) {} bool branched() const { assert(map && "Invalid iterator"); return map->branched(); } void setRoot(unsigned Offset) { if (branched()) path.setRoot(&map->rootBranch(), map->rootSize, Offset); else path.setRoot(&map->rootLeaf(), map->rootSize, Offset); } void pathFillFind(KeyT x); void treeFind(KeyT x); void treeAdvanceTo(KeyT x); /// unsafeStart - Writable access to start() for iterator. KeyT &unsafeStart() const { assert(valid() && "Cannot access invalid iterator"); return branched() ? path.leaf<Leaf>().start(path.leafOffset()) : path.leaf<RootLeaf>().start(path.leafOffset()); } /// unsafeStop - Writable access to stop() for iterator. KeyT &unsafeStop() const { assert(valid() && "Cannot access invalid iterator"); return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) : path.leaf<RootLeaf>().stop(path.leafOffset()); } /// unsafeValue - Writable access to value() for iterator. ValT &unsafeValue() const { assert(valid() && "Cannot access invalid iterator"); return branched() ? path.leaf<Leaf>().value(path.leafOffset()) : path.leaf<RootLeaf>().value(path.leafOffset()); } public: /// const_iterator - Create an iterator that isn't pointing anywhere. const_iterator() : map(nullptr) {} /// setMap - Change the map iterated over. This call must be followed by a /// call to goToBegin(), goToEnd(), or find() void setMap(const IntervalMap &m) { map = const_cast<IntervalMap*>(&m); } /// valid - Return true if the current position is valid, false for end(). bool valid() const { return path.valid(); } /// atBegin - Return true if the current position is the first map entry. bool atBegin() const { return path.atBegin(); } /// start - Return the beginning of the current interval. const KeyT &start() const { return unsafeStart(); } /// stop - Return the end of the current interval. const KeyT &stop() const { return unsafeStop(); } /// value - Return the mapped value at the current interval. const ValT &value() const { return unsafeValue(); } const ValT &operator*() const { return value(); } bool operator==(const const_iterator &RHS) const { assert(map == RHS.map && "Cannot compare iterators from different maps"); if (!valid()) return !RHS.valid(); if (path.leafOffset() != RHS.path.leafOffset()) return false; return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>(); } bool operator!=(const const_iterator &RHS) const { return !operator==(RHS); } /// goToBegin - Move to the first interval in map. void goToBegin() { setRoot(0); if (branched()) path.fillLeft(map->height); } /// goToEnd - Move beyond the last interval in map. void goToEnd() { setRoot(map->rootSize); } /// preincrement - move to the next interval. const_iterator &operator++() { assert(valid() && "Cannot increment end()"); if (++path.leafOffset() == path.leafSize() && branched()) path.moveRight(map->height); return *this; } /// postincrement - Dont do that! const_iterator operator++(int) { const_iterator tmp = *this; operator++(); return tmp; } /// predecrement - move to the previous interval. const_iterator &operator--() { if (path.leafOffset() && (valid() || !branched())) --path.leafOffset(); else path.moveLeft(map->height); return *this; } /// postdecrement - Dont do that! const_iterator operator--(int) { const_iterator tmp = *this; operator--(); return tmp; } /// find - Move to the first interval with stop >= x, or end(). /// This is a full search from the root, the current position is ignored. void find(KeyT x) { if (branched()) treeFind(x); else setRoot(map->rootLeaf().findFrom(0, map->rootSize, x)); } /// advanceTo - Move to the first interval with stop >= x, or end(). /// The search is started from the current position, and no earlier positions /// can be found. This is much faster than find() for small moves. void advanceTo(KeyT x) { if (!valid()) return; if (branched()) treeAdvanceTo(x); else path.leafOffset() = map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x); } }; /// pathFillFind - Complete path by searching for x. /// @param x Key to search for. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: const_iterator::pathFillFind(KeyT x) { IntervalMapImpl::NodeRef NR = path.subtree(path.height()); for (unsigned i = map->height - path.height() - 1; i; --i) { unsigned p = NR.get<Branch>().safeFind(0, x); path.push(NR, p); NR = NR.subtree(p); } path.push(NR, NR.get<Leaf>().safeFind(0, x)); } /// treeFind - Find in a branched tree. /// @param x Key to search for. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: const_iterator::treeFind(KeyT x) { setRoot(map->rootBranch().findFrom(0, map->rootSize, x)); if (valid()) pathFillFind(x); } /// treeAdvanceTo - Find position after the current one. /// @param x Key to search for. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: const_iterator::treeAdvanceTo(KeyT x) { // Can we stay on the same leaf node? if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) { path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x); return; } // Drop the current leaf. path.pop(); // Search towards the root for a usable subtree. if (path.height()) { for (unsigned l = path.height() - 1; l; --l) { if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) { // The branch node at l+1 is usable path.offset(l + 1) = path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x); return pathFillFind(x); } path.pop(); } // Is the level-1 Branch usable? if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) { path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x); return pathFillFind(x); } } // We reached the root. setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x)); if (valid()) pathFillFind(x); } //===----------------------------------------------------------------------===// //--- IntervalMap::iterator ----// //===----------------------------------------------------------------------===// template <typename KeyT, typename ValT, unsigned N, typename Traits> class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator { friend class IntervalMap; typedef IntervalMapImpl::IdxPair IdxPair; explicit iterator(IntervalMap &map) : const_iterator(map) {} void setNodeStop(unsigned Level, KeyT Stop); bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop); template <typename NodeT> bool overflow(unsigned Level); void treeInsert(KeyT a, KeyT b, ValT y); void eraseNode(unsigned Level); void treeErase(bool UpdateRoot = true); bool canCoalesceLeft(KeyT Start, ValT x); bool canCoalesceRight(KeyT Stop, ValT x); public: /// iterator - Create null iterator. iterator() {} /// setStart - Move the start of the current interval. /// This may cause coalescing with the previous interval. /// @param a New start key, must not overlap the previous interval. void setStart(KeyT a); /// setStop - Move the end of the current interval. /// This may cause coalescing with the following interval. /// @param b New stop key, must not overlap the following interval. void setStop(KeyT b); /// setValue - Change the mapped value of the current interval. /// This may cause coalescing with the previous and following intervals. /// @param x New value. void setValue(ValT x); /// setStartUnchecked - Move the start of the current interval without /// checking for coalescing or overlaps. /// This should only be used when it is known that coalescing is not required. /// @param a New start key. void setStartUnchecked(KeyT a) { this->unsafeStart() = a; } /// setStopUnchecked - Move the end of the current interval without checking /// for coalescing or overlaps. /// This should only be used when it is known that coalescing is not required. /// @param b New stop key. void setStopUnchecked(KeyT b) { this->unsafeStop() = b; // Update keys in branch nodes as well. if (this->path.atLastEntry(this->path.height())) setNodeStop(this->path.height(), b); } /// setValueUnchecked - Change the mapped value of the current interval /// without checking for coalescing. /// @param x New value. void setValueUnchecked(ValT x) { this->unsafeValue() = x; } /// insert - Insert mapping [a;b] -> y before the current position. void insert(KeyT a, KeyT b, ValT y); /// erase - Erase the current interval. void erase(); iterator &operator++() { const_iterator::operator++(); return *this; } iterator operator++(int) { iterator tmp = *this; operator++(); return tmp; } iterator &operator--() { const_iterator::operator--(); return *this; } iterator operator--(int) { iterator tmp = *this; operator--(); return tmp; } }; /// canCoalesceLeft - Can the current interval coalesce to the left after /// changing start or value? /// @param Start New start of current interval. /// @param Value New value for current interval. /// @return True when updating the current interval would enable coalescing. template <typename KeyT, typename ValT, unsigned N, typename Traits> bool IntervalMap<KeyT, ValT, N, Traits>:: iterator::canCoalesceLeft(KeyT Start, ValT Value) { using namespace IntervalMapImpl; Path &P = this->path; if (!this->branched()) { unsigned i = P.leafOffset(); RootLeaf &Node = P.leaf<RootLeaf>(); return i && Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start); } // Branched. if (unsigned i = P.leafOffset()) { Leaf &Node = P.leaf<Leaf>(); return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start); } else if (NodeRef NR = P.getLeftSibling(P.height())) { unsigned i = NR.size() - 1; Leaf &Node = NR.get<Leaf>(); return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start); } return false; } /// canCoalesceRight - Can the current interval coalesce to the right after /// changing stop or value? /// @param Stop New stop of current interval. /// @param Value New value for current interval. /// @return True when updating the current interval would enable coalescing. template <typename KeyT, typename ValT, unsigned N, typename Traits> bool IntervalMap<KeyT, ValT, N, Traits>:: iterator::canCoalesceRight(KeyT Stop, ValT Value) { using namespace IntervalMapImpl; Path &P = this->path; unsigned i = P.leafOffset() + 1; if (!this->branched()) { if (i >= P.leafSize()) return false; RootLeaf &Node = P.leaf<RootLeaf>(); return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i)); } // Branched. if (i < P.leafSize()) { Leaf &Node = P.leaf<Leaf>(); return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i)); } else if (NodeRef NR = P.getRightSibling(P.height())) { Leaf &Node = NR.get<Leaf>(); return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0)); } return false; } /// setNodeStop - Update the stop key of the current node at level and above. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::setNodeStop(unsigned Level, KeyT Stop) { // There are no references to the root node, so nothing to update. if (!Level) return; IntervalMapImpl::Path &P = this->path; // Update nodes pointing to the current node. while (--Level) { P.node<Branch>(Level).stop(P.offset(Level)) = Stop; if (!P.atLastEntry(Level)) return; } // Update root separately since it has a different layout. P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop; } template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::setStart(KeyT a) { assert(Traits::stopLess(a, this->stop()) && "Cannot move start beyond stop"); KeyT &CurStart = this->unsafeStart(); if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) { CurStart = a; return; } // Coalesce with the interval to the left. --*this; a = this->start(); erase(); setStartUnchecked(a); } template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::setStop(KeyT b) { assert(Traits::stopLess(this->start(), b) && "Cannot move stop beyond start"); if (Traits::startLess(b, this->stop()) || !canCoalesceRight(b, this->value())) { setStopUnchecked(b); return; } // Coalesce with interval to the right. KeyT a = this->start(); erase(); setStartUnchecked(a); } template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::setValue(ValT x) { setValueUnchecked(x); if (canCoalesceRight(this->stop(), x)) { KeyT a = this->start(); erase(); setStartUnchecked(a); } if (canCoalesceLeft(this->start(), x)) { --*this; KeyT a = this->start(); erase(); setStartUnchecked(a); } } /// insertNode - insert a node before the current path at level. /// Leave the current path pointing at the new node. /// @param Level path index of the node to be inserted. /// @param Node The node to be inserted. /// @param Stop The last index in the new node. /// @return True if the tree height was increased. template <typename KeyT, typename ValT, unsigned N, typename Traits> bool IntervalMap<KeyT, ValT, N, Traits>:: iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) { assert(Level && "Cannot insert next to the root"); bool SplitRoot = false; IntervalMap &IM = *this->map; IntervalMapImpl::Path &P = this->path; if (Level == 1) { // Insert into the root branch node. if (IM.rootSize < RootBranch::Capacity) { IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop); P.setSize(0, ++IM.rootSize); P.reset(Level); return SplitRoot; } // We need to split the root while keeping our position. SplitRoot = true; IdxPair Offset = IM.splitRoot(P.offset(0)); P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset); // Fall through to insert at the new higher level. ++Level; } // When inserting before end(), make sure we have a valid path. P.legalizeForInsert(--Level); // Insert into the branch node at Level-1. if (P.size(Level) == Branch::Capacity) { // Branch node is full, handle handle the overflow. assert(!SplitRoot && "Cannot overflow after splitting the root"); SplitRoot = overflow<Branch>(Level); Level += SplitRoot; } P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop); P.setSize(Level, P.size(Level) + 1); if (P.atLastEntry(Level)) setNodeStop(Level, Stop); P.reset(Level + 1); return SplitRoot; } // insert template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::insert(KeyT a, KeyT b, ValT y) { if (this->branched()) return treeInsert(a, b, y); IntervalMap &IM = *this->map; IntervalMapImpl::Path &P = this->path; // Try simple root leaf insert. unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y); // Was the root node insert successful? if (Size <= RootLeaf::Capacity) { P.setSize(0, IM.rootSize = Size); return; } // Root leaf node is full, we must branch. IdxPair Offset = IM.branchRoot(P.leafOffset()); P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset); // Now it fits in the new leaf. treeInsert(a, b, y); } template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::treeInsert(KeyT a, KeyT b, ValT y) { using namespace IntervalMapImpl; Path &P = this->path; if (!P.valid()) P.legalizeForInsert(this->map->height); // Check if this insertion will extend the node to the left. if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) { // Node is growing to the left, will it affect a left sibling node? if (NodeRef Sib = P.getLeftSibling(P.height())) { Leaf &SibLeaf = Sib.get<Leaf>(); unsigned SibOfs = Sib.size() - 1; if (SibLeaf.value(SibOfs) == y && Traits::adjacent(SibLeaf.stop(SibOfs), a)) { // This insertion will coalesce with the last entry in SibLeaf. We can // handle it in two ways: // 1. Extend SibLeaf.stop to b and be done, or // 2. Extend a to SibLeaf, erase the SibLeaf entry and continue. // We prefer 1., but need 2 when coalescing to the right as well. Leaf &CurLeaf = P.leaf<Leaf>(); P.moveLeft(P.height()); if (Traits::stopLess(b, CurLeaf.start(0)) && (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) { // Easy, just extend SibLeaf and we're done. setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b); return; } else { // We have both left and right coalescing. Erase the old SibLeaf entry // and continue inserting the larger interval. a = SibLeaf.start(SibOfs); treeErase(/* UpdateRoot= */false); } } } else { // No left sibling means we are at begin(). Update cached bound. this->map->rootBranchStart() = a; } } // When we are inserting at the end of a leaf node, we must update stops. unsigned Size = P.leafSize(); bool Grow = P.leafOffset() == Size; Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y); // Leaf insertion unsuccessful? Overflow and try again. if (Size > Leaf::Capacity) { overflow<Leaf>(P.height()); Grow = P.leafOffset() == P.leafSize(); Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y); assert(Size <= Leaf::Capacity && "overflow() didn't make room"); } // Inserted, update offset and leaf size. P.setSize(P.height(), Size); // Insert was the last node entry, update stops. if (Grow) setNodeStop(P.height(), b); } /// erase - erase the current interval and move to the next position. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::erase() { IntervalMap &IM = *this->map; IntervalMapImpl::Path &P = this->path; assert(P.valid() && "Cannot erase end()"); if (this->branched()) return treeErase(); IM.rootLeaf().erase(P.leafOffset(), IM.rootSize); P.setSize(0, --IM.rootSize); } /// treeErase - erase() for a branched tree. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::treeErase(bool UpdateRoot) { IntervalMap &IM = *this->map; IntervalMapImpl::Path &P = this->path; Leaf &Node = P.leaf<Leaf>(); // Nodes are not allowed to become empty. if (P.leafSize() == 1) { IM.deleteNode(&Node); eraseNode(IM.height); // Update rootBranchStart if we erased begin(). if (UpdateRoot && IM.branched() && P.valid() && P.atBegin()) IM.rootBranchStart() = P.leaf<Leaf>().start(0); return; } // Erase current entry. Node.erase(P.leafOffset(), P.leafSize()); unsigned NewSize = P.leafSize() - 1; P.setSize(IM.height, NewSize); // When we erase the last entry, update stop and move to a legal position. if (P.leafOffset() == NewSize) { setNodeStop(IM.height, Node.stop(NewSize - 1)); P.moveRight(IM.height); } else if (UpdateRoot && P.atBegin()) IM.rootBranchStart() = P.leaf<Leaf>().start(0); } /// eraseNode - Erase the current node at Level from its parent and move path to /// the first entry of the next sibling node. /// The node must be deallocated by the caller. /// @param Level 1..height, the root node cannot be erased. template <typename KeyT, typename ValT, unsigned N, typename Traits> void IntervalMap<KeyT, ValT, N, Traits>:: iterator::eraseNode(unsigned Level) { assert(Level && "Cannot erase root node"); IntervalMap &IM = *this->map; IntervalMapImpl::Path &P = this->path; if (--Level == 0) { IM.rootBranch().erase(P.offset(0), IM.rootSize); P.setSize(0, --IM.rootSize); // If this cleared the root, switch to height=0. if (IM.empty()) { IM.switchRootToLeaf(); this->setRoot(0); return; } } else { // Remove node ref from branch node at Level. Branch &Parent = P.node<Branch>(Level); if (P.size(Level) == 1) { // Branch node became empty, remove it recursively. IM.deleteNode(&Parent); eraseNode(Level); } else { // Branch node won't become empty. Parent.erase(P.offset(Level), P.size(Level)); unsigned NewSize = P.size(Level) - 1; P.setSize(Level, NewSize); // If we removed the last branch, update stop and move to a legal pos. if (P.offset(Level) == NewSize) { setNodeStop(Level, Parent.stop(NewSize - 1)); P.moveRight(Level); } } } // Update path cache for the new right sibling position. if (P.valid()) { P.reset(Level + 1); P.offset(Level + 1) = 0; } } /// overflow - Distribute entries of the current node evenly among /// its siblings and ensure that the current node is not full. /// This may require allocating a new node. /// @tparam NodeT The type of node at Level (Leaf or Branch). /// @param Level path index of the overflowing node. /// @return True when the tree height was changed. template <typename KeyT, typename ValT, unsigned N, typename Traits> template <typename NodeT> bool IntervalMap<KeyT, ValT, N, Traits>:: iterator::overflow(unsigned Level) { using namespace IntervalMapImpl; Path &P = this->path; unsigned CurSize[4]; NodeT *Node[4]; unsigned Nodes = 0; unsigned Elements = 0; unsigned Offset = P.offset(Level); // Do we have a left sibling? NodeRef LeftSib = P.getLeftSibling(Level); if (LeftSib) { Offset += Elements = CurSize[Nodes] = LeftSib.size(); Node[Nodes++] = &LeftSib.get<NodeT>(); } // Current node. Elements += CurSize[Nodes] = P.size(Level); Node[Nodes++] = &P.node<NodeT>(Level); // Do we have a right sibling? NodeRef RightSib = P.getRightSibling(Level); if (RightSib) { Elements += CurSize[Nodes] = RightSib.size(); Node[Nodes++] = &RightSib.get<NodeT>(); } // Do we need to allocate a new node? unsigned NewNode = 0; if (Elements + 1 > Nodes * NodeT::Capacity) { // Insert NewNode at the penultimate position, or after a single node. NewNode = Nodes == 1 ? 1 : Nodes - 1; CurSize[Nodes] = CurSize[NewNode]; Node[Nodes] = Node[NewNode]; CurSize[NewNode] = 0; Node[NewNode] = this->map->template newNode<NodeT>(); ++Nodes; } // Compute the new element distribution. unsigned NewSize[4]; IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity, CurSize, NewSize, Offset, true); adjustSiblingSizes(Node, Nodes, CurSize, NewSize); // Move current location to the leftmost node. if (LeftSib) P.moveLeft(Level); // Elements have been rearranged, now update node sizes and stops. bool SplitRoot = false; unsigned Pos = 0; for (;;) { KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1); if (NewNode && Pos == NewNode) { SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop); Level += SplitRoot; } else { P.setSize(Level, NewSize[Pos]); setNodeStop(Level, Stop); } if (Pos + 1 == Nodes) break; P.moveRight(Level); ++Pos; } // Where was I? Find NewOffset. while(Pos != NewOffset.first) { P.moveLeft(Level); --Pos; } P.offset(Level) = NewOffset.second; return SplitRoot; } //===----------------------------------------------------------------------===// //--- IntervalMapOverlaps ----// // // /////////////////////////////////////////////////////////////////////////////// /// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two /// IntervalMaps. The maps may be different, but the KeyT and Traits types /// should be the same. /// /// Typical uses: /// /// 1. Test for overlap: /// bool overlap = IntervalMapOverlaps(a, b).valid(); /// /// 2. Enumerate overlaps: /// for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... } /// template <typename MapA, typename MapB> class IntervalMapOverlaps { typedef typename MapA::KeyType KeyType; typedef typename MapA::KeyTraits Traits; typename MapA::const_iterator posA; typename MapB::const_iterator posB; /// advance - Move posA and posB forward until reaching an overlap, or until /// either meets end. /// Don't move the iterators if they are already overlapping. void advance() { if (!valid()) return; if (Traits::stopLess(posA.stop(), posB.start())) { // A ends before B begins. Catch up. posA.advanceTo(posB.start()); if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start())) return; } else if (Traits::stopLess(posB.stop(), posA.start())) { // B ends before A begins. Catch up. posB.advanceTo(posA.start()); if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start())) return; } else // Already overlapping. return; for (;;) { // Make a.end > b.start. posA.advanceTo(posB.start()); if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start())) return; // Make b.end > a.start. posB.advanceTo(posA.start()); if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start())) return; } } public: /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b. IntervalMapOverlaps(const MapA &a, const MapB &b) : posA(b.empty() ? a.end() : a.find(b.start())), posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); } /// valid - Return true if iterator is at an overlap. bool valid() const { return posA.valid() && posB.valid(); } /// a - access the left hand side in the overlap. const typename MapA::const_iterator &a() const { return posA; } /// b - access the right hand side in the overlap. const typename MapB::const_iterator &b() const { return posB; } /// start - Beginning of the overlapping interval. KeyType start() const { KeyType ak = a().start(); KeyType bk = b().start(); return Traits::startLess(ak, bk) ? bk : ak; } /// stop - End of the overlapping interval. KeyType stop() const { KeyType ak = a().stop(); KeyType bk = b().stop(); return Traits::startLess(ak, bk) ? ak : bk; } /// skipA - Move to the next overlap that doesn't involve a(). void skipA() { ++posA; advance(); } /// skipB - Move to the next overlap that doesn't involve b(). void skipB() { ++posB; advance(); } /// Preincrement - Move to the next overlap. IntervalMapOverlaps &operator++() { // Bump the iterator that ends first. The other one may have more overlaps. if (Traits::startLess(posB.stop(), posA.stop())) skipB(); else skipA(); return *this; } /// advanceTo - Move to the first overlapping interval with /// stopLess(x, stop()). void advanceTo(KeyType x) { if (!valid()) return; // Make sure advanceTo sees monotonic keys. if (Traits::stopLess(posA.stop(), x)) posA.advanceTo(x); if (Traits::stopLess(posB.stop(), x)) posB.advanceTo(x); advance(); } }; } // namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/PackedVector.h
//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the PackedVector class. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_PACKEDVECTOR_H #define LLVM_ADT_PACKEDVECTOR_H #include "llvm/ADT/BitVector.h" #include <limits> namespace llvm { template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned> class PackedVectorBase; // This won't be necessary if we can specialize members without specializing // the parent template. template <typename T, unsigned BitNum, typename BitVectorTy> class PackedVectorBase<T, BitNum, BitVectorTy, false> { protected: static T getValue(const BitVectorTy &Bits, unsigned Idx) { T val = T(); for (unsigned i = 0; i != BitNum; ++i) val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i)); return val; } static void setValue(BitVectorTy &Bits, unsigned Idx, T val) { assert((val >> BitNum) == 0 && "value is too big"); for (unsigned i = 0; i != BitNum; ++i) Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i); } }; template <typename T, unsigned BitNum, typename BitVectorTy> class PackedVectorBase<T, BitNum, BitVectorTy, true> { protected: static T getValue(const BitVectorTy &Bits, unsigned Idx) { T val = T(); for (unsigned i = 0; i != BitNum-1; ++i) val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i)); if (Bits[(Idx << (BitNum-1)) + BitNum-1]) val = ~val; return val; } static void setValue(BitVectorTy &Bits, unsigned Idx, T val) { if (val < 0) { val = ~val; Bits.set((Idx << (BitNum-1)) + BitNum-1); } assert((val >> (BitNum-1)) == 0 && "value is too big"); for (unsigned i = 0; i != BitNum-1; ++i) Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i); } }; /// \brief Store a vector of values using a specific number of bits for each /// value. Both signed and unsigned types can be used, e.g /// @code /// PackedVector<signed, 2> vec; /// @endcode /// will create a vector accepting values -2, -1, 0, 1. Any other value will hit /// an assertion. template <typename T, unsigned BitNum, typename BitVectorTy = BitVector> class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy, std::numeric_limits<T>::is_signed> { BitVectorTy Bits; typedef PackedVectorBase<T, BitNum, BitVectorTy, std::numeric_limits<T>::is_signed> base; public: class reference { PackedVector &Vec; const unsigned Idx; reference(); // Undefined public: reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) { } reference &operator=(T val) { Vec.setValue(Vec.Bits, Idx, val); return *this; } operator T() const { return Vec.getValue(Vec.Bits, Idx); } }; PackedVector() { } explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) { } bool empty() const { return Bits.empty(); } unsigned size() const { return Bits.size() >> (BitNum-1); } void clear() { Bits.clear(); } void resize(unsigned N) { Bits.resize(N << (BitNum-1)); } void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); } PackedVector &reset() { Bits.reset(); return *this; } void push_back(T val) { resize(size()+1); (*this)[size()-1] = val; } reference operator[](unsigned Idx) { return reference(*this, Idx); } T operator[](unsigned Idx) const { return base::getValue(Bits, Idx); } bool operator==(const PackedVector &RHS) const { return Bits == RHS.Bits; } bool operator!=(const PackedVector &RHS) const { return Bits != RHS.Bits; } PackedVector &operator|=(const PackedVector &RHS) { Bits |= RHS.Bits; return *this; } void swap(PackedVector &RHS) { Bits.swap(RHS.Bits); } }; // Leave BitNum=0 undefined. template <typename T> class PackedVector<T, 0>; } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/ImmutableList.h
//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ImmutableList class. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_IMMUTABLELIST_H #define LLVM_ADT_IMMUTABLELIST_H #include "llvm/ADT/FoldingSet.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/DataTypes.h" #include <cassert> namespace llvm { template <typename T> class ImmutableListFactory; template <typename T> class ImmutableListImpl : public FoldingSetNode { T Head; const ImmutableListImpl* Tail; ImmutableListImpl(const T& head, const ImmutableListImpl* tail = 0) : Head(head), Tail(tail) {} friend class ImmutableListFactory<T>; void operator=(const ImmutableListImpl&) = delete; ImmutableListImpl(const ImmutableListImpl&) = delete; public: const T& getHead() const { return Head; } const ImmutableListImpl* getTail() const { return Tail; } static inline void Profile(FoldingSetNodeID& ID, const T& H, const ImmutableListImpl* L){ ID.AddPointer(L); ID.Add(H); } void Profile(FoldingSetNodeID& ID) { Profile(ID, Head, Tail); } }; /// ImmutableList - This class represents an immutable (functional) list. /// It is implemented as a smart pointer (wraps ImmutableListImpl), so it /// it is intended to always be copied by value as if it were a pointer. /// This interface matches ImmutableSet and ImmutableMap. ImmutableList /// objects should almost never be created directly, and instead should /// be created by ImmutableListFactory objects that manage the lifetime /// of a group of lists. When the factory object is reclaimed, all lists /// created by that factory are released as well. template <typename T> class ImmutableList { public: typedef T value_type; typedef ImmutableListFactory<T> Factory; private: const ImmutableListImpl<T>* X; public: // This constructor should normally only be called by ImmutableListFactory<T>. // There may be cases, however, when one needs to extract the internal pointer // and reconstruct a list object from that pointer. ImmutableList(const ImmutableListImpl<T>* x = 0) : X(x) {} const ImmutableListImpl<T>* getInternalPointer() const { return X; } class iterator { const ImmutableListImpl<T>* L; public: iterator() : L(0) {} iterator(ImmutableList l) : L(l.getInternalPointer()) {} iterator& operator++() { L = L->getTail(); return *this; } bool operator==(const iterator& I) const { return L == I.L; } bool operator!=(const iterator& I) const { return L != I.L; } const value_type& operator*() const { return L->getHead(); } ImmutableList getList() const { return L; } }; /// begin - Returns an iterator referring to the head of the list, or /// an iterator denoting the end of the list if the list is empty. iterator begin() const { return iterator(X); } /// end - Returns an iterator denoting the end of the list. This iterator /// does not refer to a valid list element. iterator end() const { return iterator(); } /// isEmpty - Returns true if the list is empty. bool isEmpty() const { return !X; } bool contains(const T& V) const { for (iterator I = begin(), E = end(); I != E; ++I) { if (*I == V) return true; } return false; } /// isEqual - Returns true if two lists are equal. Because all lists created /// from the same ImmutableListFactory are uniqued, this has O(1) complexity /// because it the contents of the list do not need to be compared. Note /// that you should only compare two lists created from the same /// ImmutableListFactory. bool isEqual(const ImmutableList& L) const { return X == L.X; } bool operator==(const ImmutableList& L) const { return isEqual(L); } /// getHead - Returns the head of the list. const T& getHead() { assert (!isEmpty() && "Cannot get the head of an empty list."); return X->getHead(); } /// getTail - Returns the tail of the list, which is another (possibly empty) /// ImmutableList. ImmutableList getTail() { return X ? X->getTail() : 0; } void Profile(FoldingSetNodeID& ID) const { ID.AddPointer(X); } }; template <typename T> class ImmutableListFactory { typedef ImmutableListImpl<T> ListTy; typedef FoldingSet<ListTy> CacheTy; CacheTy Cache; uintptr_t Allocator; bool ownsAllocator() const { return Allocator & 0x1 ? false : true; } BumpPtrAllocator& getAllocator() const { return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1); } public: ImmutableListFactory() : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {} ImmutableListFactory(BumpPtrAllocator& Alloc) : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {} ~ImmutableListFactory() { if (ownsAllocator()) delete &getAllocator(); } ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) { // Profile the new list to see if it already exists in our cache. FoldingSetNodeID ID; void* InsertPos; const ListTy* TailImpl = Tail.getInternalPointer(); ListTy::Profile(ID, Head, TailImpl); ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos); if (!L) { // The list does not exist in our cache. Create it. BumpPtrAllocator& A = getAllocator(); L = (ListTy*) A.Allocate<ListTy>(); new (L) ListTy(Head, TailImpl); // Insert the new list into the cache. Cache.InsertNode(L, InsertPos); } return L; } ImmutableList<T> add(const T& D, ImmutableList<T> L) { return concat(D, L); } ImmutableList<T> getEmptyList() const { return ImmutableList<T>(0); } ImmutableList<T> create(const T& X) { return Concat(X, getEmptyList()); } }; //===----------------------------------------------------------------------===// // Partially-specialized Traits. // // /////////////////////////////////////////////////////////////////////////////// template<typename T> struct DenseMapInfo; template<typename T> struct DenseMapInfo<ImmutableList<T> > { static inline ImmutableList<T> getEmptyKey() { return reinterpret_cast<ImmutableListImpl<T>*>(-1); } static inline ImmutableList<T> getTombstoneKey() { return reinterpret_cast<ImmutableListImpl<T>*>(-2); } static unsigned getHashValue(ImmutableList<T> X) { uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer()); return (unsigned((uintptr_t)PtrVal) >> 4) ^ (unsigned((uintptr_t)PtrVal) >> 9); } static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) { return X1 == X2; } }; template <typename T> struct isPodLike; template <typename T> struct isPodLike<ImmutableList<T> > { static const bool value = true; }; } // end llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/APInt.h
//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// \brief This file implements a class to represent arbitrary precision /// integral constant values and operations on them. /// //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_APINT_H #define LLVM_ADT_APINT_H #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/MathExtras.h" #include <cassert> #include <climits> #include <cstring> #include <string> namespace llvm { class FoldingSetNodeID; class StringRef; class hash_code; class raw_ostream; template <typename T> class SmallVectorImpl; // An unsigned host type used as a single part of a multi-part // bignum. typedef uint64_t integerPart; const unsigned int host_char_bit = 8; const unsigned int integerPartWidth = host_char_bit * static_cast<unsigned int>(sizeof(integerPart)); //===----------------------------------------------------------------------===// // APInt Class // // /////////////////////////////////////////////////////////////////////////////// /// \brief Class for arbitrary precision integers. /// /// APInt is a functional replacement for common case unsigned integer type like /// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width /// integer sizes and large integer value types such as 3-bits, 15-bits, or more /// than 64-bits of precision. APInt provides a variety of arithmetic operators /// and methods to manipulate integer values of any bit-width. It supports both /// the typical integer arithmetic and comparison operations as well as bitwise /// manipulation. /// /// The class has several invariants worth noting: /// * All bit, byte, and word positions are zero-based. /// * Once the bit width is set, it doesn't change except by the Truncate, /// SignExtend, or ZeroExtend operations. /// * All binary operators must be on APInt instances of the same bit width. /// Attempting to use these operators on instances with different bit /// widths will yield an assertion. /// * The value is stored canonically as an unsigned value. For operations /// where it makes a difference, there are both signed and unsigned variants /// of the operation. For example, sdiv and udiv. However, because the bit /// widths must be the same, operations such as Mul and Add produce the same /// results regardless of whether the values are interpreted as signed or /// not. /// * In general, the class tries to follow the style of computation that LLVM /// uses in its IR. This simplifies its use for LLVM. /// class APInt { unsigned BitWidth; ///< The number of bits in this APInt. /// This union is used to store the integer value. When the /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal. union { uint64_t VAL; ///< Used to store the <= 64 bits integer value. uint64_t *pVal; ///< Used to store the >64 bits integer value. }; /// This enum is used to hold the constants we needed for APInt. enum { /// Bits in a word APINT_BITS_PER_WORD = static_cast<unsigned int>(sizeof(uint64_t)) * CHAR_BIT, /// Byte size of a word APINT_WORD_SIZE = static_cast<unsigned int>(sizeof(uint64_t)) }; friend struct DenseMapAPIntKeyInfo; /// \brief Fast internal constructor /// /// This constructor is used only internally for speed of construction of /// temporaries. It is unsafe for general use so it is not public. APInt(uint64_t *val, unsigned bits) : BitWidth(bits), pVal(val) {} /// \brief Determine if this APInt just has one word to store value. /// /// \returns true if the number of bits <= 64, false otherwise. bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; } /// \brief Determine which word a bit is in. /// /// \returns the word position for the specified bit position. static unsigned whichWord(unsigned bitPosition) { return bitPosition / APINT_BITS_PER_WORD; } /// \brief Determine which bit in a word a bit is in. /// /// \returns the bit position in a word for the specified bit position /// in the APInt. static unsigned whichBit(unsigned bitPosition) { return bitPosition % APINT_BITS_PER_WORD; } /// \brief Get a single bit mask. /// /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set /// This method generates and returns a uint64_t (word) mask for a single /// bit at a specific bit position. This is used to mask the bit in the /// corresponding word. static uint64_t maskBit(unsigned bitPosition) { return 1ULL << whichBit(bitPosition); } /// \brief Clear unused high order bits /// /// This method is used internally to clear the top "N" bits in the high order /// word that are not used by the APInt. This is needed after the most /// significant word is assigned a value to ensure that those bits are /// zero'd out. APInt &clearUnusedBits() { // Compute how many bits are used in the final word unsigned wordBits = BitWidth % APINT_BITS_PER_WORD; if (wordBits == 0) // If all bits are used, we want to leave the value alone. This also // avoids the undefined behavior of >> when the shift is the same size as // the word size (64). return *this; // Mask out the high bits. uint64_t mask = ~uint64_t(0ULL) >> (APINT_BITS_PER_WORD - wordBits); if (isSingleWord()) VAL &= mask; else pVal[getNumWords() - 1] &= mask; return *this; } /// \brief Get the word corresponding to a bit position /// \returns the corresponding word for the specified bit position. uint64_t getWord(unsigned bitPosition) const { return isSingleWord() ? VAL : pVal[whichWord(bitPosition)]; } /// \brief Convert a char array into an APInt /// /// \param radix 2, 8, 10, 16, or 36 /// Converts a string into a number. The string must be non-empty /// and well-formed as a number of the given base. The bit-width /// must be sufficient to hold the result. /// /// This is used by the constructors that take string arguments. /// /// StringRef::getAsInteger is superficially similar but (1) does /// not assume that the string is well-formed and (2) grows the /// result to hold the input. void fromString(unsigned numBits, StringRef str, uint8_t radix); /// \brief An internal division function for dividing APInts. /// /// This is used by the toString method to divide by the radix. It simply /// provides a more convenient form of divide for internal use since KnuthDiv /// has specific constraints on its inputs. If those constraints are not met /// then it provides a simpler form of divide. static void divide(const APInt LHS, unsigned lhsWords, const APInt &RHS, unsigned rhsWords, APInt *Quotient, APInt *Remainder); /// out-of-line slow case for inline constructor void initSlowCase(unsigned numBits, uint64_t val, bool isSigned); /// shared code between two array constructors void initFromArray(ArrayRef<uint64_t> array); /// out-of-line slow case for inline copy constructor void initSlowCase(const APInt &that); /// out-of-line slow case for shl APInt shlSlowCase(unsigned shiftAmt) const; /// out-of-line slow case for operator& APInt AndSlowCase(const APInt &RHS) const; /// out-of-line slow case for operator| APInt OrSlowCase(const APInt &RHS) const; /// out-of-line slow case for operator^ APInt XorSlowCase(const APInt &RHS) const; /// out-of-line slow case for operator= APInt &AssignSlowCase(const APInt &RHS); /// out-of-line slow case for operator== bool EqualSlowCase(const APInt &RHS) const; /// out-of-line slow case for operator== bool EqualSlowCase(uint64_t Val) const; /// out-of-line slow case for countLeadingZeros unsigned countLeadingZerosSlowCase() const; /// out-of-line slow case for countTrailingOnes unsigned countTrailingOnesSlowCase() const; /// out-of-line slow case for countPopulation unsigned countPopulationSlowCase() const; public: /// \name Constructors /// @{ /// \brief Create a new APInt of numBits width, initialized as val. /// /// If isSigned is true then val is treated as if it were a signed value /// (i.e. as an int64_t) and the appropriate sign extension to the bit width /// will be done. Otherwise, no sign extension occurs (high order bits beyond /// the range of val are zero filled). /// /// \param numBits the bit width of the constructed APInt /// \param val the initial value of the APInt /// \param isSigned how to treat signedness of val APInt(unsigned numBits, uint64_t val, bool isSigned = false) : BitWidth(numBits), VAL(0) { assert(BitWidth && "bitwidth too small"); if (isSingleWord()) VAL = val; else initSlowCase(numBits, val, isSigned); clearUnusedBits(); } /// \brief Construct an APInt of numBits width, initialized as bigVal[]. /// /// Note that bigVal.size() can be smaller or larger than the corresponding /// bit width but any extraneous bits will be dropped. /// /// \param numBits the bit width of the constructed APInt /// \param bigVal a sequence of words to form the initial value of the APInt APInt(unsigned numBits, ArrayRef<uint64_t> bigVal); /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but /// deprecated because this constructor is prone to ambiguity with the /// APInt(unsigned, uint64_t, bool) constructor. /// /// If this overload is ever deleted, care should be taken to prevent calls /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool) /// constructor. APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]); /// \brief Construct an APInt from a string representation. /// /// This constructor interprets the string \p str in the given radix. The /// interpretation stops when the first character that is not suitable for the /// radix is encountered, or the end of the string. Acceptable radix values /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the /// string to require more bits than numBits. /// /// \param numBits the bit width of the constructed APInt /// \param str the string to be interpreted /// \param radix the radix to use for the conversion APInt(unsigned numBits, StringRef str, uint8_t radix); /// Simply makes *this a copy of that. /// @brief Copy Constructor. APInt(const APInt &that) : BitWidth(that.BitWidth), VAL(0) { if (isSingleWord()) VAL = that.VAL; else initSlowCase(that); } /// \brief Move Constructor. APInt(APInt &&that) : BitWidth(that.BitWidth), VAL(that.VAL) { that.BitWidth = 0; } /// \brief Destructor. ~APInt() { if (needsCleanup()) delete[] pVal; } /// \brief Default constructor that creates an uninitialized APInt. /// /// This is useful for object deserialization (pair this with the static /// method Read). explicit APInt() : BitWidth(1) {} /// \brief Returns whether this instance allocated memory. bool needsCleanup() const { return !isSingleWord(); } /// Used to insert APInt objects, or objects that contain APInt objects, into /// FoldingSets. void Profile(FoldingSetNodeID &id) const; /// @} /// \name Value Tests /// @{ /// \brief Determine sign of this APInt. /// /// This tests the high bit of this APInt to determine if it is set. /// /// \returns true if this APInt is negative, false otherwise bool isNegative() const { return (*this)[BitWidth - 1]; } /// \brief Determine if this APInt Value is non-negative (>= 0) /// /// This tests the high bit of the APInt to determine if it is unset. bool isNonNegative() const { return !isNegative(); } /// \brief Determine if this APInt Value is positive. /// /// This tests if the value of this APInt is positive (> 0). Note /// that 0 is not a positive value. /// /// \returns true if this APInt is positive. bool isStrictlyPositive() const { return isNonNegative() && !!*this; } /// \brief Determine if all bits are set /// /// This checks to see if the value has all bits of the APInt are set or not. bool isAllOnesValue() const { if (isSingleWord()) return VAL == ~integerPart(0) >> (APINT_BITS_PER_WORD - BitWidth); return countPopulationSlowCase() == BitWidth; } /// \brief Determine if this is the largest unsigned value. /// /// This checks to see if the value of this APInt is the maximum unsigned /// value for the APInt's bit width. bool isMaxValue() const { return isAllOnesValue(); } /// \brief Determine if this is the largest signed value. /// /// This checks to see if the value of this APInt is the maximum signed /// value for the APInt's bit width. bool isMaxSignedValue() const { return !isNegative() && countPopulation() == BitWidth - 1; } /// \brief Determine if this is the smallest unsigned value. /// /// This checks to see if the value of this APInt is the minimum unsigned /// value for the APInt's bit width. bool isMinValue() const { return !*this; } /// \brief Determine if this is the smallest signed value. /// /// This checks to see if the value of this APInt is the minimum signed /// value for the APInt's bit width. bool isMinSignedValue() const { return isNegative() && isPowerOf2(); } /// \brief Check if this APInt has an N-bits unsigned integer value. bool isIntN(unsigned N) const { assert(N && "N == 0 ???"); return getActiveBits() <= N; } /// \brief Check if this APInt has an N-bits signed integer value. bool isSignedIntN(unsigned N) const { assert(N && "N == 0 ???"); return getMinSignedBits() <= N; } /// \brief Check if this APInt's value is a power of two greater than zero. /// /// \returns true if the argument APInt value is a power of two > 0. bool isPowerOf2() const { if (isSingleWord()) return isPowerOf2_64(VAL); return countPopulationSlowCase() == 1; } /// \brief Check if the APInt's value is returned by getSignBit. /// /// \returns true if this is the value returned by getSignBit. bool isSignBit() const { return isMinSignedValue(); } /// \brief Convert APInt to a boolean value. /// /// This converts the APInt to a boolean value as a test against zero. bool getBoolValue() const { return !!*this; } /// If this value is smaller than the specified limit, return it, otherwise /// return the limit value. This causes the value to saturate to the limit. uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const { return (getActiveBits() > 64 || getZExtValue() > Limit) ? Limit : getZExtValue(); } /// \brief Check if the APInt consists of a repeated bit pattern. /// /// e.g. 0x01010101 satisfies isSplat(8). /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit /// width without remainder. bool isSplat(unsigned SplatSizeInBits) const; /// @} /// \name Value Generators /// @{ /// \brief Gets maximum unsigned value of APInt for specific bit width. static APInt getMaxValue(unsigned numBits) { return getAllOnesValue(numBits); } /// \brief Gets maximum signed value of APInt for a specific bit width. static APInt getSignedMaxValue(unsigned numBits) { APInt API = getAllOnesValue(numBits); API.clearBit(numBits - 1); return API; } /// \brief Gets minimum unsigned value of APInt for a specific bit width. static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); } /// \brief Gets minimum signed value of APInt for a specific bit width. static APInt getSignedMinValue(unsigned numBits) { APInt API(numBits, 0); API.setBit(numBits - 1); return API; } /// \brief Get the SignBit for a specific bit width. /// /// This is just a wrapper function of getSignedMinValue(), and it helps code /// readability when we want to get a SignBit. static APInt getSignBit(unsigned BitWidth) { return getSignedMinValue(BitWidth); } /// \brief Get the all-ones value. /// /// \returns the all-ones value for an APInt of the specified bit-width. static APInt getAllOnesValue(unsigned numBits) { return APInt(numBits, UINT64_MAX, true); } /// \brief Get the '0' value. /// /// \returns the '0' value for an APInt of the specified bit-width. static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); } /// \brief Compute an APInt containing numBits highbits from this APInt. /// /// Get an APInt with the same BitWidth as this APInt, just zero mask /// the low bits and right shift to the least significant bit. /// /// \returns the high "numBits" bits of this APInt. APInt getHiBits(unsigned numBits) const; /// \brief Compute an APInt containing numBits lowbits from this APInt. /// /// Get an APInt with the same BitWidth as this APInt, just zero mask /// the high bits. /// /// \returns the low "numBits" bits of this APInt. APInt getLoBits(unsigned numBits) const; /// \brief Return an APInt with exactly one bit set in the result. static APInt getOneBitSet(unsigned numBits, unsigned BitNo) { APInt Res(numBits, 0); Res.setBit(BitNo); return Res; } /// \brief Get a value with a block of bits set. /// /// Constructs an APInt value that has a contiguous range of bits set. The /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other /// bits will be zero. For example, with parameters(32, 0, 16) you would get /// 0x0000FFFF. If hiBit is less than loBit then the set bits "wrap". For /// example, with parameters (32, 28, 4), you would get 0xF000000F. /// /// \param numBits the intended bit width of the result /// \param loBit the index of the lowest bit set. /// \param hiBit the index of the highest bit set. /// /// \returns An APInt value with the requested bits set. static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) { assert(hiBit <= numBits && "hiBit out of range"); assert(loBit < numBits && "loBit out of range"); if (hiBit < loBit) return getLowBitsSet(numBits, hiBit) | getHighBitsSet(numBits, numBits - loBit); return getLowBitsSet(numBits, hiBit - loBit).shl(loBit); } /// \brief Get a value with high bits set /// /// Constructs an APInt value that has the top hiBitsSet bits set. /// /// \param numBits the bitwidth of the result /// \param hiBitsSet the number of high-order bits set in the result. static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) { assert(hiBitsSet <= numBits && "Too many bits to set!"); // Handle a degenerate case, to avoid shifting by word size if (hiBitsSet == 0) return APInt(numBits, 0); unsigned shiftAmt = numBits - hiBitsSet; // For small values, return quickly if (numBits <= APINT_BITS_PER_WORD) return APInt(numBits, ~0ULL << shiftAmt); return getAllOnesValue(numBits).shl(shiftAmt); } /// \brief Get a value with low bits set /// /// Constructs an APInt value that has the bottom loBitsSet bits set. /// /// \param numBits the bitwidth of the result /// \param loBitsSet the number of low-order bits set in the result. static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) { assert(loBitsSet <= numBits && "Too many bits to set!"); // Handle a degenerate case, to avoid shifting by word size if (loBitsSet == 0) return APInt(numBits, 0); if (loBitsSet == APINT_BITS_PER_WORD) return APInt(numBits, UINT64_MAX); // For small values, return quickly. if (loBitsSet <= APINT_BITS_PER_WORD) return APInt(numBits, UINT64_MAX >> (APINT_BITS_PER_WORD - loBitsSet)); return getAllOnesValue(numBits).lshr(numBits - loBitsSet); } /// \brief Return a value containing V broadcasted over NewLen bits. static APInt getSplat(unsigned NewLen, const APInt &V) { assert(NewLen >= V.getBitWidth() && "Can't splat to smaller bit width!"); APInt Val = V.zextOrSelf(NewLen); for (unsigned I = V.getBitWidth(); I < NewLen; I <<= 1) Val |= Val << I; return Val; } /// \brief Determine if two APInts have the same value, after zero-extending /// one of them (if needed!) to ensure that the bit-widths match. static bool isSameValue(const APInt &I1, const APInt &I2) { if (I1.getBitWidth() == I2.getBitWidth()) return I1 == I2; if (I1.getBitWidth() > I2.getBitWidth()) return I1 == I2.zext(I1.getBitWidth()); return I1.zext(I2.getBitWidth()) == I2; } /// \brief Overload to compute a hash_code for an APInt value. friend hash_code hash_value(const APInt &Arg); /// This function returns a pointer to the internal storage of the APInt. /// This is useful for writing out the APInt in binary form without any /// conversions. const uint64_t *getRawData() const { if (isSingleWord()) return &VAL; return &pVal[0]; } /// @} /// \name Unary Operators /// @{ /// \brief Postfix increment operator. /// /// \returns a new APInt value representing *this incremented by one const APInt operator++(int) { APInt API(*this); ++(*this); return API; } /// \brief Prefix increment operator. /// /// \returns *this incremented by one APInt &operator++(); /// \brief Postfix decrement operator. /// /// \returns a new APInt representing *this decremented by one. const APInt operator--(int) { APInt API(*this); --(*this); return API; } /// \brief Prefix decrement operator. /// /// \returns *this decremented by one. APInt &operator--(); /// \brief Unary bitwise complement operator. /// /// Performs a bitwise complement operation on this APInt. /// /// \returns an APInt that is the bitwise complement of *this APInt operator~() const { APInt Result(*this); Result.flipAllBits(); return Result; } /// \brief Unary negation operator /// /// Negates *this using two's complement logic. /// /// \returns An APInt value representing the negation of *this. APInt operator-() const { return APInt(BitWidth, 0) - (*this); } /// \brief Logical negation operator. /// /// Performs logical negation operation on this APInt. /// /// \returns true if *this is zero, false otherwise. bool operator!() const { if (isSingleWord()) return !VAL; for (unsigned i = 0; i != getNumWords(); ++i) if (pVal[i]) return false; return true; } /// @} /// \name Assignment Operators /// @{ /// \brief Copy assignment operator. /// /// \returns *this after assignment of RHS. APInt &operator=(const APInt &RHS) { // If the bitwidths are the same, we can avoid mucking with memory if (isSingleWord() && RHS.isSingleWord()) { VAL = RHS.VAL; BitWidth = RHS.BitWidth; return clearUnusedBits(); } return AssignSlowCase(RHS); } /// @brief Move assignment operator. APInt &operator=(APInt &&that) { if (!isSingleWord()) { // The MSVC STL shipped in 2013 requires that self move assignment be a // no-op. Otherwise algorithms like stable_sort will produce answers // where half of the output is left in a moved-from state. if (this == &that) return *this; delete[] pVal; } // Use memcpy so that type based alias analysis sees both VAL and pVal // as modified. memcpy(&VAL, &that.VAL, sizeof(uint64_t)); // If 'this == &that', avoid zeroing our own bitwidth by storing to 'that' // first. unsigned ThatBitWidth = that.BitWidth; that.BitWidth = 0; BitWidth = ThatBitWidth; return *this; } /// \brief Assignment operator. /// /// The RHS value is assigned to *this. If the significant bits in RHS exceed /// the bit width, the excess bits are truncated. If the bit width is larger /// than 64, the value is zero filled in the unspecified high order bits. /// /// \returns *this after assignment of RHS value. APInt &operator=(uint64_t RHS); /// \brief Bitwise AND assignment operator. /// /// Performs a bitwise AND operation on this APInt and RHS. The result is /// assigned to *this. /// /// \returns *this after ANDing with RHS. APInt &operator&=(const APInt &RHS); /// \brief Bitwise OR assignment operator. /// /// Performs a bitwise OR operation on this APInt and RHS. The result is /// assigned *this; /// /// \returns *this after ORing with RHS. APInt &operator|=(const APInt &RHS); /// \brief Bitwise OR assignment operator. /// /// Performs a bitwise OR operation on this APInt and RHS. RHS is /// logically zero-extended or truncated to match the bit-width of /// the LHS. APInt &operator|=(uint64_t RHS) { if (isSingleWord()) { VAL |= RHS; clearUnusedBits(); } else { pVal[0] |= RHS; } return *this; } /// \brief Bitwise XOR assignment operator. /// /// Performs a bitwise XOR operation on this APInt and RHS. The result is /// assigned to *this. /// /// \returns *this after XORing with RHS. APInt &operator^=(const APInt &RHS); /// \brief Multiplication assignment operator. /// /// Multiplies this APInt by RHS and assigns the result to *this. /// /// \returns *this APInt &operator*=(const APInt &RHS); /// \brief Addition assignment operator. /// /// Adds RHS to *this and assigns the result to *this. /// /// \returns *this APInt &operator+=(const APInt &RHS); /// \brief Subtraction assignment operator. /// /// Subtracts RHS from *this and assigns the result to *this. /// /// \returns *this APInt &operator-=(const APInt &RHS); /// \brief Left-shift assignment function. /// /// Shifts *this left by shiftAmt and assigns the result to *this. /// /// \returns *this after shifting left by shiftAmt APInt &operator<<=(unsigned shiftAmt) { *this = shl(shiftAmt); return *this; } /// @} /// \name Binary Operators /// @{ /// \brief Bitwise AND operator. /// /// Performs a bitwise AND operation on *this and RHS. /// /// \returns An APInt value representing the bitwise AND of *this and RHS. APInt operator&(const APInt &RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) return APInt(getBitWidth(), VAL & RHS.VAL); return AndSlowCase(RHS); } APInt LLVM_ATTRIBUTE_UNUSED_RESULT And(const APInt &RHS) const { return this->operator&(RHS); } /// \brief Bitwise OR operator. /// /// Performs a bitwise OR operation on *this and RHS. /// /// \returns An APInt value representing the bitwise OR of *this and RHS. APInt operator|(const APInt &RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) return APInt(getBitWidth(), VAL | RHS.VAL); return OrSlowCase(RHS); } /// \brief Bitwise OR function. /// /// Performs a bitwise or on *this and RHS. This is implemented by simply /// calling operator|. /// /// \returns An APInt value representing the bitwise OR of *this and RHS. APInt LLVM_ATTRIBUTE_UNUSED_RESULT Or(const APInt &RHS) const { return this->operator|(RHS); } /// \brief Bitwise XOR operator. /// /// Performs a bitwise XOR operation on *this and RHS. /// /// \returns An APInt value representing the bitwise XOR of *this and RHS. APInt operator^(const APInt &RHS) const { assert(BitWidth == RHS.BitWidth && "Bit widths must be the same"); if (isSingleWord()) return APInt(BitWidth, VAL ^ RHS.VAL); return XorSlowCase(RHS); } /// \brief Bitwise XOR function. /// /// Performs a bitwise XOR operation on *this and RHS. This is implemented /// through the usage of operator^. /// /// \returns An APInt value representing the bitwise XOR of *this and RHS. APInt LLVM_ATTRIBUTE_UNUSED_RESULT Xor(const APInt &RHS) const { return this->operator^(RHS); } /// \brief Multiplication operator. /// /// Multiplies this APInt by RHS and returns the result. APInt operator*(const APInt &RHS) const; /// \brief Addition operator. /// /// Adds RHS to this APInt and returns the result. APInt operator+(const APInt &RHS) const; APInt operator+(uint64_t RHS) const { return (*this) + APInt(BitWidth, RHS); } /// \brief Subtraction operator. /// /// Subtracts RHS from this APInt and returns the result. APInt operator-(const APInt &RHS) const; APInt operator-(uint64_t RHS) const { return (*this) - APInt(BitWidth, RHS); } /// \brief Left logical shift operator. /// /// Shifts this APInt left by \p Bits and returns the result. APInt operator<<(unsigned Bits) const { return shl(Bits); } /// \brief Left logical shift operator. /// /// Shifts this APInt left by \p Bits and returns the result. APInt operator<<(const APInt &Bits) const { return shl(Bits); } /// \brief Arithmetic right-shift function. /// /// Arithmetic right-shift this APInt by shiftAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT ashr(unsigned shiftAmt) const; /// \brief Logical right-shift function. /// /// Logical right-shift this APInt by shiftAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(unsigned shiftAmt) const; /// \brief Left-shift function. /// /// Left-shift this APInt by shiftAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT shl(unsigned shiftAmt) const { assert(shiftAmt <= BitWidth && "Invalid shift amount"); if (isSingleWord()) { if (shiftAmt >= BitWidth) return APInt(BitWidth, 0); // avoid undefined shift results return APInt(BitWidth, VAL << shiftAmt); } return shlSlowCase(shiftAmt); } /// \brief Rotate left by rotateAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotl(unsigned rotateAmt) const; /// \brief Rotate right by rotateAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotr(unsigned rotateAmt) const; /// \brief Arithmetic right-shift function. /// /// Arithmetic right-shift this APInt by shiftAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT ashr(const APInt &shiftAmt) const; /// \brief Logical right-shift function. /// /// Logical right-shift this APInt by shiftAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(const APInt &shiftAmt) const; /// \brief Left-shift function. /// /// Left-shift this APInt by shiftAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT shl(const APInt &shiftAmt) const; /// \brief Rotate left by rotateAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotl(const APInt &rotateAmt) const; /// \brief Rotate right by rotateAmt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotr(const APInt &rotateAmt) const; /// \brief Unsigned division operation. /// /// Perform an unsigned divide operation on this APInt by RHS. Both this and /// RHS are treated as unsigned quantities for purposes of this division. /// /// \returns a new APInt value containing the division result APInt LLVM_ATTRIBUTE_UNUSED_RESULT udiv(const APInt &RHS) const; /// \brief Signed division function for APInt. /// /// Signed divide this APInt by APInt RHS. APInt LLVM_ATTRIBUTE_UNUSED_RESULT sdiv(const APInt &RHS) const; /// \brief Unsigned remainder operation. /// /// Perform an unsigned remainder operation on this APInt with RHS being the /// divisor. Both this and RHS are treated as unsigned quantities for purposes /// of this operation. Note that this is a true remainder operation and not a /// modulo operation because the sign follows the sign of the dividend which /// is *this. /// /// \returns a new APInt value containing the remainder result APInt LLVM_ATTRIBUTE_UNUSED_RESULT urem(const APInt &RHS) const; /// \brief Function for signed remainder operation. /// /// Signed remainder operation on APInt. APInt LLVM_ATTRIBUTE_UNUSED_RESULT srem(const APInt &RHS) const; /// \brief Dual division/remainder interface. /// /// Sometimes it is convenient to divide two APInt values and obtain both the /// quotient and remainder. This function does both operations in the same /// computation making it a little more efficient. The pair of input arguments /// may overlap with the pair of output arguments. It is safe to call /// udivrem(X, Y, X, Y), for example. static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder); static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder); // Operations that return overflow indicators. APInt sadd_ov(const APInt &RHS, bool &Overflow) const; APInt uadd_ov(const APInt &RHS, bool &Overflow) const; APInt ssub_ov(const APInt &RHS, bool &Overflow) const; APInt usub_ov(const APInt &RHS, bool &Overflow) const; APInt sdiv_ov(const APInt &RHS, bool &Overflow) const; APInt smul_ov(const APInt &RHS, bool &Overflow) const; APInt umul_ov(const APInt &RHS, bool &Overflow) const; APInt sshl_ov(const APInt &Amt, bool &Overflow) const; APInt ushl_ov(const APInt &Amt, bool &Overflow) const; /// \brief Array-indexing support. /// /// \returns the bit value at bitPosition bool operator[](unsigned bitPosition) const { assert(bitPosition < getBitWidth() && "Bit position out of bounds!"); return (maskBit(bitPosition) & (isSingleWord() ? VAL : pVal[whichWord(bitPosition)])) != 0; } /// @} /// \name Comparison Operators /// @{ /// \brief Equality operator. /// /// Compares this APInt with RHS for the validity of the equality /// relationship. bool operator==(const APInt &RHS) const { assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths"); if (isSingleWord()) return VAL == RHS.VAL; return EqualSlowCase(RHS); } /// \brief Equality operator. /// /// Compares this APInt with a uint64_t for the validity of the equality /// relationship. /// /// \returns true if *this == Val bool operator==(uint64_t Val) const { if (isSingleWord()) return VAL == Val; return EqualSlowCase(Val); } /// \brief Equality comparison. /// /// Compares this APInt with RHS for the validity of the equality /// relationship. /// /// \returns true if *this == Val bool eq(const APInt &RHS) const { return (*this) == RHS; } /// \brief Inequality operator. /// /// Compares this APInt with RHS for the validity of the inequality /// relationship. /// /// \returns true if *this != Val bool operator!=(const APInt &RHS) const { return !((*this) == RHS); } /// \brief Inequality operator. /// /// Compares this APInt with a uint64_t for the validity of the inequality /// relationship. /// /// \returns true if *this != Val bool operator!=(uint64_t Val) const { return !((*this) == Val); } /// \brief Inequality comparison /// /// Compares this APInt with RHS for the validity of the inequality /// relationship. /// /// \returns true if *this != Val bool ne(const APInt &RHS) const { return !((*this) == RHS); } /// \brief Unsigned less than comparison /// /// Regards both *this and RHS as unsigned quantities and compares them for /// the validity of the less-than relationship. /// /// \returns true if *this < RHS when both are considered unsigned. bool ult(const APInt &RHS) const; /// \brief Unsigned less than comparison /// /// Regards both *this as an unsigned quantity and compares it with RHS for /// the validity of the less-than relationship. /// /// \returns true if *this < RHS when considered unsigned. bool ult(uint64_t RHS) const { return getActiveBits() > 64 ? false : getZExtValue() < RHS; } /// \brief Signed less than comparison /// /// Regards both *this and RHS as signed quantities and compares them for /// validity of the less-than relationship. /// /// \returns true if *this < RHS when both are considered signed. bool slt(const APInt &RHS) const; /// \brief Signed less than comparison /// /// Regards both *this as a signed quantity and compares it with RHS for /// the validity of the less-than relationship. /// /// \returns true if *this < RHS when considered signed. bool slt(int64_t RHS) const { return getMinSignedBits() > 64 ? isNegative() : getSExtValue() < RHS; } /// \brief Unsigned less or equal comparison /// /// Regards both *this and RHS as unsigned quantities and compares them for /// validity of the less-or-equal relationship. /// /// \returns true if *this <= RHS when both are considered unsigned. bool ule(const APInt &RHS) const { return ult(RHS) || eq(RHS); } /// \brief Unsigned less or equal comparison /// /// Regards both *this as an unsigned quantity and compares it with RHS for /// the validity of the less-or-equal relationship. /// /// \returns true if *this <= RHS when considered unsigned. bool ule(uint64_t RHS) const { return !ugt(RHS); } /// \brief Signed less or equal comparison /// /// Regards both *this and RHS as signed quantities and compares them for /// validity of the less-or-equal relationship. /// /// \returns true if *this <= RHS when both are considered signed. bool sle(const APInt &RHS) const { return slt(RHS) || eq(RHS); } /// \brief Signed less or equal comparison /// /// Regards both *this as a signed quantity and compares it with RHS for the /// validity of the less-or-equal relationship. /// /// \returns true if *this <= RHS when considered signed. bool sle(uint64_t RHS) const { return !sgt(RHS); } /// \brief Unsigned greather than comparison /// /// Regards both *this and RHS as unsigned quantities and compares them for /// the validity of the greater-than relationship. /// /// \returns true if *this > RHS when both are considered unsigned. bool ugt(const APInt &RHS) const { return !ult(RHS) && !eq(RHS); } /// \brief Unsigned greater than comparison /// /// Regards both *this as an unsigned quantity and compares it with RHS for /// the validity of the greater-than relationship. /// /// \returns true if *this > RHS when considered unsigned. bool ugt(uint64_t RHS) const { return getActiveBits() > 64 ? true : getZExtValue() > RHS; } /// \brief Signed greather than comparison /// /// Regards both *this and RHS as signed quantities and compares them for the /// validity of the greater-than relationship. /// /// \returns true if *this > RHS when both are considered signed. bool sgt(const APInt &RHS) const { return !slt(RHS) && !eq(RHS); } /// \brief Signed greater than comparison /// /// Regards both *this as a signed quantity and compares it with RHS for /// the validity of the greater-than relationship. /// /// \returns true if *this > RHS when considered signed. bool sgt(int64_t RHS) const { return getMinSignedBits() > 64 ? !isNegative() : getSExtValue() > RHS; } /// \brief Unsigned greater or equal comparison /// /// Regards both *this and RHS as unsigned quantities and compares them for /// validity of the greater-or-equal relationship. /// /// \returns true if *this >= RHS when both are considered unsigned. bool uge(const APInt &RHS) const { return !ult(RHS); } /// \brief Unsigned greater or equal comparison /// /// Regards both *this as an unsigned quantity and compares it with RHS for /// the validity of the greater-or-equal relationship. /// /// \returns true if *this >= RHS when considered unsigned. bool uge(uint64_t RHS) const { return !ult(RHS); } /// \brief Signed greather or equal comparison /// /// Regards both *this and RHS as signed quantities and compares them for /// validity of the greater-or-equal relationship. /// /// \returns true if *this >= RHS when both are considered signed. bool sge(const APInt &RHS) const { return !slt(RHS); } /// \brief Signed greater or equal comparison /// /// Regards both *this as a signed quantity and compares it with RHS for /// the validity of the greater-or-equal relationship. /// /// \returns true if *this >= RHS when considered signed. bool sge(int64_t RHS) const { return !slt(RHS); } /// This operation tests if there are any pairs of corresponding bits /// between this APInt and RHS that are both set. bool intersects(const APInt &RHS) const { return (*this & RHS) != 0; } /// @} /// \name Resizing Operators /// @{ /// \brief Truncate to new width. /// /// Truncate the APInt to a specified width. It is an error to specify a width /// that is greater than or equal to the current width. APInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(unsigned width) const; /// \brief Sign extend to a new width. /// /// This operation sign extends the APInt to a new width. If the high order /// bit is set, the fill on the left will be done with 1 bits, otherwise zero. /// It is an error to specify a width that is less than or equal to the /// current width. APInt LLVM_ATTRIBUTE_UNUSED_RESULT sext(unsigned width) const; /// \brief Zero extend to a new width. /// /// This operation zero extends the APInt to a new width. The high order bits /// are filled with 0 bits. It is an error to specify a width that is less /// than or equal to the current width. APInt LLVM_ATTRIBUTE_UNUSED_RESULT zext(unsigned width) const; /// \brief Sign extend or truncate to width /// /// Make this APInt have the bit width given by \p width. The value is sign /// extended, truncated, or left alone to make it that width. APInt LLVM_ATTRIBUTE_UNUSED_RESULT sextOrTrunc(unsigned width) const; /// \brief Zero extend or truncate to width /// /// Make this APInt have the bit width given by \p width. The value is zero /// extended, truncated, or left alone to make it that width. APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrTrunc(unsigned width) const; /// \brief Sign extend or truncate to width /// /// Make this APInt have the bit width given by \p width. The value is sign /// extended, or left alone to make it that width. APInt LLVM_ATTRIBUTE_UNUSED_RESULT sextOrSelf(unsigned width) const; /// \brief Zero extend or truncate to width /// /// Make this APInt have the bit width given by \p width. The value is zero /// extended, or left alone to make it that width. APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrSelf(unsigned width) const; /// @} /// \name Bit Manipulation Operators /// @{ /// \brief Set every bit to 1. void setAllBits() { if (isSingleWord()) VAL = UINT64_MAX; else { // Set all the bits in all the words. for (unsigned i = 0; i < getNumWords(); ++i) pVal[i] = UINT64_MAX; } // Clear the unused ones clearUnusedBits(); } /// \brief Set a given bit to 1. /// /// Set the given bit to 1 whose position is given as "bitPosition". void setBit(unsigned bitPosition); /// \brief Set every bit to 0. void clearAllBits() { if (isSingleWord()) VAL = 0; else memset(pVal, 0, getNumWords() * APINT_WORD_SIZE); } /// \brief Set a given bit to 0. /// /// Set the given bit to 0 whose position is given as "bitPosition". void clearBit(unsigned bitPosition); /// \brief Toggle every bit to its opposite value. void flipAllBits() { if (isSingleWord()) VAL ^= UINT64_MAX; else { for (unsigned i = 0; i < getNumWords(); ++i) pVal[i] ^= UINT64_MAX; } clearUnusedBits(); } /// \brief Toggles a given bit to its opposite value. /// /// Toggle a given bit to its opposite value whose position is given /// as "bitPosition". void flipBit(unsigned bitPosition); /// @} /// \name Value Characterization Functions /// @{ /// \brief Return the number of bits in the APInt. unsigned getBitWidth() const { return BitWidth; } /// \brief Get the number of words. /// /// Here one word's bitwidth equals to that of uint64_t. /// /// \returns the number of words to hold the integer value of this APInt. unsigned getNumWords() const { return getNumWords(BitWidth); } /// \brief Get the number of words. /// /// *NOTE* Here one word's bitwidth equals to that of uint64_t. /// /// \returns the number of words to hold the integer value with a given bit /// width. static unsigned getNumWords(unsigned BitWidth) { return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD; } /// \brief Compute the number of active bits in the value /// /// This function returns the number of active bits which is defined as the /// bit width minus the number of leading zeros. This is used in several /// computations to see how "wide" the value is. unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); } /// \brief Compute the number of active words in the value of this APInt. /// /// This is used in conjunction with getActiveData to extract the raw value of /// the APInt. unsigned getActiveWords() const { unsigned numActiveBits = getActiveBits(); return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1; } /// \brief Get the minimum bit size for this signed APInt /// /// Computes the minimum bit width for this APInt while considering it to be a /// signed (and probably negative) value. If the value is not negative, this /// function returns the same value as getActiveBits()+1. Otherwise, it /// returns the smallest bit width that will retain the negative value. For /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so /// for -1, this function will always return 1. unsigned getMinSignedBits() const { if (isNegative()) return BitWidth - countLeadingOnes() + 1; return getActiveBits() + 1; } /// \brief Get zero extended value /// /// This method attempts to return the value of this APInt as a zero extended /// uint64_t. The bitwidth must be <= 64 or the value must fit within a /// uint64_t. Otherwise an assertion will result. uint64_t getZExtValue() const { if (isSingleWord()) return VAL; assert(getActiveBits() <= 64 && "Too many bits for uint64_t"); return pVal[0]; } /// \brief Get sign extended value /// /// This method attempts to return the value of this APInt as a sign extended /// int64_t. The bit width must be <= 64 or the value must fit within an /// int64_t. Otherwise an assertion will result. int64_t getSExtValue() const { if (isSingleWord()) return int64_t(VAL << (APINT_BITS_PER_WORD - BitWidth)) >> (APINT_BITS_PER_WORD - BitWidth); assert(getMinSignedBits() <= 64 && "Too many bits for int64_t"); return int64_t(pVal[0]); } /// \brief Get bits required for string value. /// /// This method determines how many bits are required to hold the APInt /// equivalent of the string given by \p str. static unsigned getBitsNeeded(StringRef str, uint8_t radix); /// \brief The APInt version of the countLeadingZeros functions in /// MathExtras.h. /// /// It counts the number of zeros from the most significant bit to the first /// one bit. /// /// \returns BitWidth if the value is zero, otherwise returns the number of /// zeros from the most significant bit to the first one bits. unsigned countLeadingZeros() const { if (isSingleWord()) { unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth; return llvm::countLeadingZeros(VAL) - unusedBits; } return countLeadingZerosSlowCase(); } /// \brief Count the number of leading one bits. /// /// This function is an APInt version of the countLeadingOnes /// functions in MathExtras.h. It counts the number of ones from the most /// significant bit to the first zero bit. /// /// \returns 0 if the high order bit is not set, otherwise returns the number /// of 1 bits from the most significant to the least unsigned countLeadingOnes() const; /// Computes the number of leading bits of this APInt that are equal to its /// sign bit. unsigned getNumSignBits() const { return isNegative() ? countLeadingOnes() : countLeadingZeros(); } /// \brief Count the number of trailing zero bits. /// /// This function is an APInt version of the countTrailingZeros /// functions in MathExtras.h. It counts the number of zeros from the least /// significant bit to the first set bit. /// /// \returns BitWidth if the value is zero, otherwise returns the number of /// zeros from the least significant bit to the first one bit. unsigned countTrailingZeros() const; /// \brief Count the number of trailing one bits. /// /// This function is an APInt version of the countTrailingOnes /// functions in MathExtras.h. It counts the number of ones from the least /// significant bit to the first zero bit. /// /// \returns BitWidth if the value is all ones, otherwise returns the number /// of ones from the least significant bit to the first zero bit. unsigned countTrailingOnes() const { if (isSingleWord()) return llvm::countTrailingOnes(VAL); return countTrailingOnesSlowCase(); } /// \brief Count the number of bits set. /// /// This function is an APInt version of the countPopulation functions /// in MathExtras.h. It counts the number of 1 bits in the APInt value. /// /// \returns 0 if the value is zero, otherwise returns the number of set bits. unsigned countPopulation() const { if (isSingleWord()) return llvm::countPopulation(VAL); return countPopulationSlowCase(); } /// @} /// \name Conversion Functions /// @{ void print(raw_ostream &OS, bool isSigned) const; /// Converts an APInt to a string and append it to Str. Str is commonly a /// SmallString. void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed, bool formatAsCLiteral = false) const; /// Considers the APInt to be unsigned and converts it into a string in the /// radix given. The radix can be 2, 8, 10 16, or 36. void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { toString(Str, Radix, false, false); } /// Considers the APInt to be signed and converts it into a string in the /// radix given. The radix can be 2, 8, 10, 16, or 36. void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { toString(Str, Radix, true, false); } /// \brief Return the APInt as a std::string. /// /// Note that this is an inefficient method. It is better to pass in a /// SmallVector/SmallString to the methods above to avoid thrashing the heap /// for the string. std::string toString(unsigned Radix, bool Signed) const; /// \returns a byte-swapped representation of this APInt Value. APInt LLVM_ATTRIBUTE_UNUSED_RESULT byteSwap() const; /// \brief Converts this APInt to a double value. double roundToDouble(bool isSigned) const; /// \brief Converts this unsigned APInt to a double value. double roundToDouble() const { return roundToDouble(false); } /// \brief Converts this signed APInt to a double value. double signedRoundToDouble() const { return roundToDouble(true); } /// \brief Converts APInt bits to a double /// /// The conversion does not do a translation from integer to double, it just /// re-interprets the bits as a double. Note that it is valid to do this on /// any bit width. Exactly 64 bits will be translated. double bitsToDouble() const { union { uint64_t I; double D; } T; T.I = (isSingleWord() ? VAL : pVal[0]); return T.D; } /// \brief Converts APInt bits to a double /// /// The conversion does not do a translation from integer to float, it just /// re-interprets the bits as a float. Note that it is valid to do this on /// any bit width. Exactly 32 bits will be translated. float bitsToFloat() const { union { unsigned I; float F; } T; T.I = unsigned((isSingleWord() ? VAL : pVal[0])); return T.F; } /// \brief Converts a double to APInt bits. /// /// The conversion does not do a translation from double to integer, it just /// re-interprets the bits of the double. static APInt LLVM_ATTRIBUTE_UNUSED_RESULT doubleToBits(double V) { union { uint64_t I; double D; } T; T.D = V; return APInt(sizeof T * CHAR_BIT, T.I); } /// \brief Converts a float to APInt bits. /// /// The conversion does not do a translation from float to integer, it just /// re-interprets the bits of the float. static APInt LLVM_ATTRIBUTE_UNUSED_RESULT floatToBits(float V) { union { unsigned I; float F; } T; T.F = V; return APInt(sizeof T * CHAR_BIT, T.I); } /// @} /// \name Mathematics Operations /// @{ /// \returns the floor log base 2 of this APInt. unsigned logBase2() const { return BitWidth - 1 - countLeadingZeros(); } /// \returns the ceil log base 2 of this APInt. unsigned ceilLogBase2() const { return BitWidth - (*this - 1).countLeadingZeros(); } /// \returns the nearest log base 2 of this APInt. Ties round up. /// /// NOTE: When we have a BitWidth of 1, we define: /// /// log2(0) = UINT32_MAX /// log2(1) = 0 /// /// to get around any mathematical concerns resulting from /// referencing 2 in a space where 2 does no exist. unsigned nearestLogBase2() const { // Special case when we have a bitwidth of 1. If VAL is 1, then we // get 0. If VAL is 0, we get UINT64_MAX which gets truncated to // UINT32_MAX. if (BitWidth == 1) return VAL - 1; // Handle the zero case. if (!getBoolValue()) return UINT32_MAX; // The non-zero case is handled by computing: // // nearestLogBase2(x) = logBase2(x) + x[logBase2(x)-1]. // // where x[i] is referring to the value of the ith bit of x. unsigned lg = logBase2(); return lg + unsigned((*this)[lg - 1]); } /// \returns the log base 2 of this APInt if its an exact power of two, -1 /// otherwise int32_t exactLogBase2() const { if (!isPowerOf2()) return -1; return logBase2(); } /// \brief Compute the square root APInt LLVM_ATTRIBUTE_UNUSED_RESULT sqrt() const; /// \brief Get the absolute value; /// /// If *this is < 0 then return -(*this), otherwise *this; APInt LLVM_ATTRIBUTE_UNUSED_RESULT abs() const { if (isNegative()) return -(*this); return *this; } /// \returns the multiplicative inverse for a given modulo. APInt multiplicativeInverse(const APInt &modulo) const; /// @} /// \name Support for division by constant /// @{ /// Calculate the magic number for signed division by a constant. struct ms; ms magic() const; /// Calculate the magic number for unsigned division by a constant. struct mu; mu magicu(unsigned LeadingZeros = 0) const; /// @} /// \name Building-block Operations for APInt and APFloat /// @{ // These building block operations operate on a representation of arbitrary // precision, two's-complement, bignum integer values. They should be // sufficient to implement APInt and APFloat bignum requirements. Inputs are // generally a pointer to the base of an array of integer parts, representing // an unsigned bignum, and a count of how many parts there are. /// Sets the least significant part of a bignum to the input value, and zeroes /// out higher parts. static void tcSet(integerPart *, integerPart, unsigned int); /// Assign one bignum to another. static void tcAssign(integerPart *, const integerPart *, unsigned int); /// Returns true if a bignum is zero, false otherwise. static bool tcIsZero(const integerPart *, unsigned int); /// Extract the given bit of a bignum; returns 0 or 1. Zero-based. static int tcExtractBit(const integerPart *, unsigned int bit); /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least /// significant bit of DST. All high bits above srcBITS in DST are /// zero-filled. static void tcExtract(integerPart *, unsigned int dstCount, const integerPart *, unsigned int srcBits, unsigned int srcLSB); /// Set the given bit of a bignum. Zero-based. static void tcSetBit(integerPart *, unsigned int bit); /// Clear the given bit of a bignum. Zero-based. static void tcClearBit(integerPart *, unsigned int bit); /// Returns the bit number of the least or most significant set bit of a /// number. If the input number has no bits set -1U is returned. static unsigned int tcLSB(const integerPart *, unsigned int); static unsigned int tcMSB(const integerPart *parts, unsigned int n); /// Negate a bignum in-place. static void tcNegate(integerPart *, unsigned int); /// DST += RHS + CARRY where CARRY is zero or one. Returns the carry flag. static integerPart tcAdd(integerPart *, const integerPart *, integerPart carry, unsigned); /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag. static integerPart tcSubtract(integerPart *, const integerPart *, integerPart carry, unsigned); /// DST += SRC * MULTIPLIER + PART if add is true /// DST = SRC * MULTIPLIER + PART if add is false /// /// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC they must /// start at the same point, i.e. DST == SRC. /// /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned. /// Otherwise DST is filled with the least significant DSTPARTS parts of the /// result, and if all of the omitted higher parts were zero return zero, /// otherwise overflow occurred and return one. static int tcMultiplyPart(integerPart *dst, const integerPart *src, integerPart multiplier, integerPart carry, unsigned int srcParts, unsigned int dstParts, bool add); /// DST = LHS * RHS, where DST has the same width as the operands and is /// filled with the least significant parts of the result. Returns one if /// overflow occurred, otherwise zero. DST must be disjoint from both /// operands. static int tcMultiply(integerPart *, const integerPart *, const integerPart *, unsigned); /// DST = LHS * RHS, where DST has width the sum of the widths of the /// operands. No overflow occurs. DST must be disjoint from both /// operands. Returns the number of parts required to hold the result. static unsigned int tcFullMultiply(integerPart *, const integerPart *, const integerPart *, unsigned, unsigned); /// If RHS is zero LHS and REMAINDER are left unchanged, return one. /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set /// REMAINDER to the remainder, return zero. i.e. /// /// OLD_LHS = RHS * LHS + REMAINDER /// /// SCRATCH is a bignum of the same size as the operands and result for use by /// the routine; its contents need not be initialized and are destroyed. LHS, /// REMAINDER and SCRATCH must be distinct. static int tcDivide(integerPart *lhs, const integerPart *rhs, integerPart *remainder, integerPart *scratch, unsigned int parts); /// Shift a bignum left COUNT bits. Shifted in bits are zero. There are no /// restrictions on COUNT. static void tcShiftLeft(integerPart *, unsigned int parts, unsigned int count); /// Shift a bignum right COUNT bits. Shifted in bits are zero. There are no /// restrictions on COUNT. static void tcShiftRight(integerPart *, unsigned int parts, unsigned int count); /// The obvious AND, OR and XOR and complement operations. static void tcAnd(integerPart *, const integerPart *, unsigned int); static void tcOr(integerPart *, const integerPart *, unsigned int); static void tcXor(integerPart *, const integerPart *, unsigned int); static void tcComplement(integerPart *, unsigned int); /// Comparison (unsigned) of two bignums. static int tcCompare(const integerPart *, const integerPart *, unsigned int); /// Increment a bignum in-place. Return the carry flag. static integerPart tcIncrement(integerPart *, unsigned int); /// Decrement a bignum in-place. Return the borrow flag. static integerPart tcDecrement(integerPart *, unsigned int); /// Set the least significant BITS and clear the rest. static void tcSetLeastSignificantBits(integerPart *, unsigned int, unsigned int bits); /// \brief debug method void dump() const; /// @} }; /// Magic data for optimising signed division by a constant. struct APInt::ms { APInt m; ///< magic number unsigned s; ///< shift amount }; /// Magic data for optimising unsigned division by a constant. struct APInt::mu { APInt m; ///< magic number bool a; ///< add indicator unsigned s; ///< shift amount }; inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; } inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; } inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) { I.print(OS, true); return OS; } namespace APIntOps { /// \brief Determine the smaller of two APInts considered to be signed. inline APInt smin(const APInt &A, const APInt &B) { return A.slt(B) ? A : B; } /// \brief Determine the larger of two APInts considered to be signed. inline APInt smax(const APInt &A, const APInt &B) { return A.sgt(B) ? A : B; } /// \brief Determine the smaller of two APInts considered to be signed. inline APInt umin(const APInt &A, const APInt &B) { return A.ult(B) ? A : B; } /// \brief Determine the larger of two APInts considered to be unsigned. inline APInt umax(const APInt &A, const APInt &B) { return A.ugt(B) ? A : B; } /// \brief Check if the specified APInt has a N-bits unsigned integer value. inline bool isIntN(unsigned N, const APInt &APIVal) { return APIVal.isIntN(N); } /// \brief Check if the specified APInt has a N-bits signed integer value. inline bool isSignedIntN(unsigned N, const APInt &APIVal) { return APIVal.isSignedIntN(N); } /// \returns true if the argument APInt value is a sequence of ones starting at /// the least significant bit with the remainder zero. inline bool isMask(unsigned numBits, const APInt &APIVal) { return numBits <= APIVal.getBitWidth() && APIVal == APInt::getLowBitsSet(APIVal.getBitWidth(), numBits); } /// \brief Return true if the argument APInt value contains a sequence of ones /// with the remainder zero. inline bool isShiftedMask(unsigned numBits, const APInt &APIVal) { return isMask(numBits, (APIVal - APInt(numBits, 1)) | APIVal); } /// \brief Returns a byte-swapped representation of the specified APInt Value. inline APInt byteSwap(const APInt &APIVal) { return APIVal.byteSwap(); } /// \brief Returns the floor log base 2 of the specified APInt value. inline unsigned logBase2(const APInt &APIVal) { return APIVal.logBase2(); } /// \brief Compute GCD of two APInt values. /// /// This function returns the greatest common divisor of the two APInt values /// using Euclid's algorithm. /// /// \returns the greatest common divisor of Val1 and Val2 APInt GreatestCommonDivisor(const APInt &Val1, const APInt &Val2); /// \brief Converts the given APInt to a double value. /// /// Treats the APInt as an unsigned value for conversion purposes. inline double RoundAPIntToDouble(const APInt &APIVal) { return APIVal.roundToDouble(); } /// \brief Converts the given APInt to a double value. /// /// Treats the APInt as a signed value for conversion purposes. inline double RoundSignedAPIntToDouble(const APInt &APIVal) { return APIVal.signedRoundToDouble(); } /// \brief Converts the given APInt to a float vlalue. inline float RoundAPIntToFloat(const APInt &APIVal) { return float(RoundAPIntToDouble(APIVal)); } /// \brief Converts the given APInt to a float value. /// /// Treast the APInt as a signed value for conversion purposes. inline float RoundSignedAPIntToFloat(const APInt &APIVal) { return float(APIVal.signedRoundToDouble()); } /// \brief Converts the given double value into a APInt. /// /// This function convert a double value to an APInt value. APInt RoundDoubleToAPInt(double Double, unsigned width); /// \brief Converts a float value into a APInt. /// /// Converts a float value into an APInt value. inline APInt RoundFloatToAPInt(float Float, unsigned width) { return RoundDoubleToAPInt(double(Float), width); } /// \brief Arithmetic right-shift function. /// /// Arithmetic right-shift the APInt by shiftAmt. inline APInt ashr(const APInt &LHS, unsigned shiftAmt) { return LHS.ashr(shiftAmt); } /// \brief Logical right-shift function. /// /// Logical right-shift the APInt by shiftAmt. inline APInt lshr(const APInt &LHS, unsigned shiftAmt) { return LHS.lshr(shiftAmt); } /// \brief Left-shift function. /// /// Left-shift the APInt by shiftAmt. inline APInt shl(const APInt &LHS, unsigned shiftAmt) { return LHS.shl(shiftAmt); } /// \brief Signed division function for APInt. /// /// Signed divide APInt LHS by APInt RHS. inline APInt sdiv(const APInt &LHS, const APInt &RHS) { return LHS.sdiv(RHS); } /// \brief Unsigned division function for APInt. /// /// Unsigned divide APInt LHS by APInt RHS. inline APInt udiv(const APInt &LHS, const APInt &RHS) { return LHS.udiv(RHS); } /// \brief Function for signed remainder operation. /// /// Signed remainder operation on APInt. inline APInt srem(const APInt &LHS, const APInt &RHS) { return LHS.srem(RHS); } /// \brief Function for unsigned remainder operation. /// /// Unsigned remainder operation on APInt. inline APInt urem(const APInt &LHS, const APInt &RHS) { return LHS.urem(RHS); } /// \brief Function for multiplication operation. /// /// Performs multiplication on APInt values. inline APInt mul(const APInt &LHS, const APInt &RHS) { return LHS * RHS; } /// \brief Function for addition operation. /// /// Performs addition on APInt values. inline APInt add(const APInt &LHS, const APInt &RHS) { return LHS + RHS; } /// \brief Function for subtraction operation. /// /// Performs subtraction on APInt values. inline APInt sub(const APInt &LHS, const APInt &RHS) { return LHS - RHS; } /// \brief Bitwise AND function for APInt. /// /// Performs bitwise AND operation on APInt LHS and /// APInt RHS. inline APInt And(const APInt &LHS, const APInt &RHS) { return LHS & RHS; } /// \brief Bitwise OR function for APInt. /// /// Performs bitwise OR operation on APInt LHS and APInt RHS. inline APInt Or(const APInt &LHS, const APInt &RHS) { return LHS | RHS; } /// \brief Bitwise XOR function for APInt. /// /// Performs bitwise XOR operation on APInt. inline APInt Xor(const APInt &LHS, const APInt &RHS) { return LHS ^ RHS; } /// \brief Bitwise complement function. /// /// Performs a bitwise complement operation on APInt. inline APInt Not(const APInt &APIVal) { return ~APIVal; } } // End of APIntOps namespace // See friend declaration above. This additional declaration is required in // order to compile LLVM with IBM xlC compiler. hash_code hash_value(const APInt &Arg); } // End of llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/iterator.h
//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_ITERATOR_H #define LLVM_ADT_ITERATOR_H #include <cstddef> #include <iterator> namespace llvm { /// \brief CRTP base class which implements the entire standard iterator facade /// in terms of a minimal subset of the interface. /// /// Use this when it is reasonable to implement most of the iterator /// functionality in terms of a core subset. If you need special behavior or /// there are performance implications for this, you may want to override the /// relevant members instead. /// /// Note, one abstraction that this does *not* provide is implementing /// subtraction in terms of addition by negating the difference. Negation isn't /// always information preserving, and I can see very reasonable iterator /// designs where this doesn't work well. It doesn't really force much added /// boilerplate anyways. /// /// Another abstraction that this doesn't provide is implementing increment in /// terms of addition of one. These aren't equivalent for all iterator /// categories, and respecting that adds a lot of complexity for little gain. template <typename DerivedT, typename IteratorCategoryT, typename T, typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *, typename ReferenceT = T &> class iterator_facade_base { public: using iterator_category = IteratorCategoryT; using value_type = T; using difference_type = DifferenceTypeT; using pointer = PointerT; using reference = ReferenceT; protected: enum { IsRandomAccess = std::is_base_of<std::random_access_iterator_tag, IteratorCategoryT>::value, IsBidirectional = std::is_base_of<std::bidirectional_iterator_tag, IteratorCategoryT>::value, }; public: DerivedT operator+(DifferenceTypeT n) const { static_assert( IsRandomAccess, "The '+' operator is only defined for random access iterators."); DerivedT tmp = *static_cast<const DerivedT *>(this); tmp += n; return tmp; } friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) { static_assert( IsRandomAccess, "The '+' operator is only defined for random access iterators."); return i + n; } DerivedT operator-(DifferenceTypeT n) const { static_assert( IsRandomAccess, "The '-' operator is only defined for random access iterators."); DerivedT tmp = *static_cast<const DerivedT *>(this); tmp -= n; return tmp; } DerivedT &operator++() { return static_cast<DerivedT *>(this)->operator+=(1); } DerivedT operator++(int) { DerivedT tmp = *static_cast<DerivedT *>(this); ++*static_cast<DerivedT *>(this); return tmp; } DerivedT &operator--() { static_assert( IsBidirectional, "The decrement operator is only defined for bidirectional iterators."); return static_cast<DerivedT *>(this)->operator-=(1); } DerivedT operator--(int) { static_assert( IsBidirectional, "The decrement operator is only defined for bidirectional iterators."); DerivedT tmp = *static_cast<DerivedT *>(this); --*static_cast<DerivedT *>(this); return tmp; } bool operator!=(const DerivedT &RHS) const { return !static_cast<const DerivedT *>(this)->operator==(RHS); } bool operator>(const DerivedT &RHS) const { static_assert( IsRandomAccess, "Relational operators are only defined for random access iterators."); return !static_cast<const DerivedT *>(this)->operator<(RHS) && !static_cast<const DerivedT *>(this)->operator==(RHS); } bool operator<=(const DerivedT &RHS) const { static_assert( IsRandomAccess, "Relational operators are only defined for random access iterators."); return !static_cast<const DerivedT *>(this)->operator>(RHS); } bool operator>=(const DerivedT &RHS) const { static_assert( IsRandomAccess, "Relational operators are only defined for random access iterators."); return !static_cast<const DerivedT *>(this)->operator<(RHS); } PointerT operator->() const { return &static_cast<const DerivedT *>(this)->operator*(); } ReferenceT operator[](DifferenceTypeT n) const { static_assert(IsRandomAccess, "Subscripting is only defined for random access iterators."); return *static_cast<const DerivedT *>(this)->operator+(n); } }; /// \brief CRTP base class for adapting an iterator to a different type. /// /// This class can be used through CRTP to adapt one iterator into another. /// Typically this is done through providing in the derived class a custom \c /// operator* implementation. Other methods can be overridden as well. template < typename DerivedT, typename WrappedIteratorT, typename IteratorCategoryT = typename std::iterator_traits<WrappedIteratorT>::iterator_category, typename T = typename std::iterator_traits<WrappedIteratorT>::value_type, typename DifferenceTypeT = typename std::iterator_traits<WrappedIteratorT>::difference_type, typename PointerT = T *, typename ReferenceT = T &, // Don't provide these, they are mostly to act as aliases below. typename WrappedTraitsT = std::iterator_traits<WrappedIteratorT>> class iterator_adaptor_base : public iterator_facade_base<DerivedT, IteratorCategoryT, T, DifferenceTypeT, PointerT, ReferenceT> { typedef typename iterator_adaptor_base::iterator_facade_base BaseT; protected: WrappedIteratorT I; iterator_adaptor_base() = default; explicit iterator_adaptor_base(WrappedIteratorT u) : I(std::move(u)) {} const WrappedIteratorT &wrapped() const { return I; } public: typedef DifferenceTypeT difference_type; DerivedT &operator+=(difference_type n) { static_assert( BaseT::IsRandomAccess, "The '+=' operator is only defined for random access iterators."); I += n; return *static_cast<DerivedT *>(this); } DerivedT &operator-=(difference_type n) { static_assert( BaseT::IsRandomAccess, "The '-=' operator is only defined for random access iterators."); I -= n; return *static_cast<DerivedT *>(this); } using BaseT::operator-; difference_type operator-(const DerivedT &RHS) const { static_assert( BaseT::IsRandomAccess, "The '-' operator is only defined for random access iterators."); return I - RHS.I; } // We have to explicitly provide ++ and -- rather than letting the facade // forward to += because WrappedIteratorT might not support +=. using BaseT::operator++; DerivedT &operator++() { ++I; return *static_cast<DerivedT *>(this); } using BaseT::operator--; DerivedT &operator--() { static_assert( BaseT::IsBidirectional, "The decrement operator is only defined for bidirectional iterators."); --I; return *static_cast<DerivedT *>(this); } bool operator==(const DerivedT &RHS) const { return I == RHS.I; } bool operator<(const DerivedT &RHS) const { static_assert( BaseT::IsRandomAccess, "Relational operators are only defined for random access iterators."); return I < RHS.I; } ReferenceT operator*() const { return *I; } }; /// \brief An iterator type that allows iterating over the pointees via some /// other iterator. /// /// The typical usage of this is to expose a type that iterates over Ts, but /// which is implemented with some iterator over T*s: /// /// \code /// typedef pointee_iterator<SmallVectorImpl<T *>::iterator> iterator; /// \endcode template <typename WrappedIteratorT, typename T = typename std::remove_reference< decltype(**std::declval<WrappedIteratorT>())>::type> struct pointee_iterator : iterator_adaptor_base< pointee_iterator<WrappedIteratorT>, WrappedIteratorT, typename std::iterator_traits<WrappedIteratorT>::iterator_category, T> { pointee_iterator() = default; template <typename U> pointee_iterator(U &&u) : pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {} T &operator*() const { return **this->I; } }; template <typename WrappedIteratorT, typename T = decltype(&*std::declval<WrappedIteratorT>())> class pointer_iterator : public iterator_adaptor_base<pointer_iterator<WrappedIteratorT>, WrappedIteratorT, T> { mutable T Ptr; public: pointer_iterator() {} explicit pointer_iterator(WrappedIteratorT u) : pointer_iterator::iterator_adaptor_base(std::move(u)) {} T &operator*() { return Ptr = &*this->I; } const T &operator*() const { return Ptr = &*this->I; } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/StringSet.h
//===--- StringSet.h - The LLVM Compiler Driver -----------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open // Source License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // StringSet - A set-like wrapper for the StringMap. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_STRINGSET_H #define LLVM_ADT_STRINGSET_H #include "llvm/ADT/StringMap.h" namespace llvm { /// StringSet - A wrapper for StringMap that provides set-like functionality. template <class AllocatorTy = llvm::MallocAllocator> class StringSet : public llvm::StringMap<char, AllocatorTy> { typedef llvm::StringMap<char, AllocatorTy> base; public: std::pair<typename base::iterator, bool> insert(StringRef Key) { assert(!Key.empty()); return base::insert(std::make_pair(Key, '\0')); } }; } #endif // LLVM_ADT_STRINGSET_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/DepthFirstIterator.h
//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file builds on the ADT/GraphTraits.h file to build generic depth // first graph iterator. This file exposes the following functions/types: // // df_begin/df_end/df_iterator // * Normal depth-first iteration - visit a node and then all of its children. // // idf_begin/idf_end/idf_iterator // * Depth-first iteration on the 'inverse' graph. // // df_ext_begin/df_ext_end/df_ext_iterator // * Normal depth-first iteration - visit a node and then all of its children. // This iterator stores the 'visited' set in an external set, which allows // it to be more efficient, and allows external clients to use the set for // other purposes. // // idf_ext_begin/idf_ext_end/idf_ext_iterator // * Depth-first iteration on the 'inverse' graph. // This iterator stores the 'visited' set in an external set, which allows // it to be more efficient, and allows external clients to use the set for // other purposes. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H #define LLVM_ADT_DEPTHFIRSTITERATOR_H #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/iterator_range.h" #include <set> #include <vector> namespace llvm { // df_iterator_storage - A private class which is used to figure out where to // store the visited set. template<class SetType, bool External> // Non-external set class df_iterator_storage { public: SetType Visited; }; template<class SetType> class df_iterator_storage<SetType, true> { public: df_iterator_storage(SetType &VSet) : Visited(VSet) {} df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {} SetType &Visited; }; // Generic Depth First Iterator template<class GraphT, class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>, bool ExtStorage = false, class GT = GraphTraits<GraphT> > class df_iterator : public df_iterator_storage<SetType, ExtStorage> { public: using iterator_category = std::forward_iterator_tag; using value_type = typename GT::NodeType; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; private: typedef typename GT::NodeType NodeType; typedef typename GT::ChildIteratorType ChildItTy; typedef PointerIntPair<NodeType*, 1> PointerIntTy; // VisitStack - Used to maintain the ordering. Top = current block // First element is node pointer, second is the 'next child' to visit // if the int in PointerIntTy is 0, the 'next child' to visit is invalid std::vector<std::pair<PointerIntTy, ChildItTy> > VisitStack; inline df_iterator(NodeType *Node) { this->Visited.insert(Node); VisitStack.push_back(std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node))); } inline df_iterator() { // End is when stack is empty } inline df_iterator(NodeType *Node, SetType &S) : df_iterator_storage<SetType, ExtStorage>(S) { if (!S.count(Node)) { VisitStack.push_back(std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node))); this->Visited.insert(Node); } } inline df_iterator(SetType &S) : df_iterator_storage<SetType, ExtStorage>(S) { // End is when stack is empty } inline void toNext() { do { std::pair<PointerIntTy, ChildItTy> &Top = VisitStack.back(); NodeType *Node = Top.first.getPointer(); ChildItTy &It = Top.second; if (!Top.first.getInt()) { // now retrieve the real begin of the children before we dive in It = GT::child_begin(Node); Top.first.setInt(1); } while (It != GT::child_end(Node)) { NodeType *Next = *It++; // Has our next sibling been visited? if (Next && this->Visited.insert(Next).second) { // No, do it now. VisitStack.push_back(std::make_pair(PointerIntTy(Next, 0), GT::child_begin(Next))); return; } } // Oops, ran out of successors... go up a level on the stack. VisitStack.pop_back(); } while (!VisitStack.empty()); } public: // Provide static begin and end methods as our public "constructors" static df_iterator begin(const GraphT &G) { return df_iterator(GT::getEntryNode(G)); } static df_iterator end(const GraphT &G) { return df_iterator(); } // Static begin and end methods as our public ctors for external iterators static df_iterator begin(const GraphT &G, SetType &S) { return df_iterator(GT::getEntryNode(G), S); } static df_iterator end(const GraphT &G, SetType &S) { return df_iterator(S); } bool operator==(const df_iterator &x) const { return VisitStack == x.VisitStack; } bool operator!=(const df_iterator &x) const { return !(*this == x); } pointer operator*() const { return VisitStack.back().first.getPointer(); } // This is a nonstandard operator-> that dereferences the pointer an extra // time... so that you can actually call methods ON the Node, because // the contained type is a pointer. This allows BBIt->getTerminator() f.e. // NodeType *operator->() const { return **this; } df_iterator &operator++() { // Preincrement toNext(); return *this; } /// \brief Skips all children of the current node and traverses to next node /// /// Note: This function takes care of incrementing the iterator. If you /// always increment and call this function, you risk walking off the end. df_iterator &skipChildren() { VisitStack.pop_back(); if (!VisitStack.empty()) toNext(); return *this; } df_iterator operator++(int) { // Postincrement df_iterator tmp = *this; ++*this; return tmp; } // nodeVisited - return true if this iterator has already visited the // specified node. This is public, and will probably be used to iterate over // nodes that a depth first iteration did not find: ie unreachable nodes. // bool nodeVisited(NodeType *Node) const { return this->Visited.count(Node) != 0; } /// getPathLength - Return the length of the path from the entry node to the /// current node, counting both nodes. unsigned getPathLength() const { return VisitStack.size(); } /// getPath - Return the n'th node in the path from the entry node to the /// current node. NodeType *getPath(unsigned n) const { return VisitStack[n].first.getPointer(); } }; // Provide global constructors that automatically figure out correct types... // template <class T> df_iterator<T> df_begin(const T& G) { return df_iterator<T>::begin(G); } template <class T> df_iterator<T> df_end(const T& G) { return df_iterator<T>::end(G); } // Provide an accessor method to use them in range-based patterns. template <class T> iterator_range<df_iterator<T>> depth_first(const T& G) { return make_range(df_begin(G), df_end(G)); } // Provide global definitions of external depth first iterators... template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> > struct df_ext_iterator : public df_iterator<T, SetTy, true> { df_ext_iterator(const df_iterator<T, SetTy, true> &V) : df_iterator<T, SetTy, true>(V) {} }; template <class T, class SetTy> df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) { return df_ext_iterator<T, SetTy>::begin(G, S); } template <class T, class SetTy> df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) { return df_ext_iterator<T, SetTy>::end(G, S); } template <class T, class SetTy> iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G, SetTy &S) { return make_range(df_ext_begin(G, S), df_ext_end(G, S)); } // Provide global definitions of inverse depth first iterators... template <class T, class SetTy = llvm::SmallPtrSet<typename GraphTraits<T>::NodeType*, 8>, bool External = false> struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> { idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V) : df_iterator<Inverse<T>, SetTy, External>(V) {} }; template <class T> idf_iterator<T> idf_begin(const T& G) { return idf_iterator<T>::begin(Inverse<T>(G)); } template <class T> idf_iterator<T> idf_end(const T& G){ return idf_iterator<T>::end(Inverse<T>(G)); } // Provide an accessor method to use them in range-based patterns. template <class T> iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) { return make_range(idf_begin(G), idf_end(G)); } // Provide global definitions of external inverse depth first iterators... template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> > struct idf_ext_iterator : public idf_iterator<T, SetTy, true> { idf_ext_iterator(const idf_iterator<T, SetTy, true> &V) : idf_iterator<T, SetTy, true>(V) {} idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V) : idf_iterator<T, SetTy, true>(V) {} }; template <class T, class SetTy> idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) { return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S); } template <class T, class SetTy> idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) { return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S); } template <class T, class SetTy> iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G, SetTy &S) { return make_range(idf_ext_begin(G, S), idf_ext_end(G, S)); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/EpochTracker.h
//===- llvm/ADT/EpochTracker.h - ADT epoch tracking --------------*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes. // These can be used to write iterators that are fail-fast when LLVM is built // with asserts enabled. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_EPOCH_TRACKER_H #define LLVM_ADT_EPOCH_TRACKER_H #include "llvm/Config/abi-breaking.h" #include "llvm/Config/llvm-config.h" #include <cstdint> namespace llvm { #ifndef LLVM_ENABLE_ABI_BREAKING_CHECKS class DebugEpochBase { public: void incrementEpoch() {} class HandleBase { public: HandleBase() = default; explicit HandleBase(const DebugEpochBase *) {} bool isHandleInSync() const { return true; } const void *getEpochAddress() const { return nullptr; } }; }; #else /// \brief A base class for data structure classes wishing to make iterators /// ("handles") pointing into themselves fail-fast. When building without /// asserts, this class is empty and does nothing. /// /// DebugEpochBase does not by itself track handles pointing into itself. The /// expectation is that routines touching the handles will poll on /// isHandleInSync at appropriate points to assert that the handle they're using /// is still valid. /// class DebugEpochBase { uint64_t Epoch; public: DebugEpochBase() : Epoch(0) {} /// \brief Calling incrementEpoch invalidates all handles pointing into the /// calling instance. void incrementEpoch() { ++Epoch; } /// \brief The destructor calls incrementEpoch to make use-after-free bugs /// more likely to crash deterministically. ~DebugEpochBase() { incrementEpoch(); } /// \brief A base class for iterator classes ("handles") that wish to poll for /// iterator invalidating modifications in the underlying data structure. /// When LLVM is built without asserts, this class is empty and does nothing. /// /// HandleBase does not track the parent data structure by itself. It expects /// the routines modifying the data structure to call incrementEpoch when they /// make an iterator-invalidating modification. /// class HandleBase { const uint64_t *EpochAddress; uint64_t EpochAtCreation; public: HandleBase() : EpochAddress(nullptr), EpochAtCreation(UINT64_MAX) {} explicit HandleBase(const DebugEpochBase *Parent) : EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {} /// \brief Returns true if the DebugEpochBase this Handle is linked to has /// not called incrementEpoch on itself since the creation of this /// HandleBase instance. bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; } /// \brief Returns a pointer to the epoch word stored in the data structure /// this handle points into. Can be used to check if two iterators point /// into the same data structure. const void *getEpochAddress() const { return EpochAddress; } }; }; #endif // LLVM_ENABLE_ABI_BREAKING_CHECKS } // namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SparseMultiSet.h
//===--- llvm/ADT/SparseMultiSet.h - Sparse multiset ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the SparseMultiSet class, which adds multiset behavior to // the SparseSet. // // A sparse multiset holds a small number of objects identified by integer keys // from a moderately sized universe. The sparse multiset uses more memory than // other containers in order to provide faster operations. Any key can map to // multiple values. A SparseMultiSetNode class is provided, which serves as a // convenient base class for the contents of a SparseMultiSet. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SPARSEMULTISET_H #define LLVM_ADT_SPARSEMULTISET_H #include "llvm/ADT/SparseSet.h" namespace llvm { /// Fast multiset implementation for objects that can be identified by small /// unsigned keys. /// /// SparseMultiSet allocates memory proportional to the size of the key /// universe, so it is not recommended for building composite data structures. /// It is useful for algorithms that require a single set with fast operations. /// /// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time /// fast clear() as fast as a vector. The find(), insert(), and erase() /// operations are all constant time, and typically faster than a hash table. /// The iteration order doesn't depend on numerical key values, it only depends /// on the order of insert() and erase() operations. Iteration order is the /// insertion order. Iteration is only provided over elements of equivalent /// keys, but iterators are bidirectional. /// /// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but /// offers constant-time clear() and size() operations as well as fast iteration /// independent on the size of the universe. /// /// SparseMultiSet contains a dense vector holding all the objects and a sparse /// array holding indexes into the dense vector. Most of the memory is used by /// the sparse array which is the size of the key universe. The SparseT template /// parameter provides a space/speed tradeoff for sets holding many elements. /// /// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the /// sparse array uses 4 x Universe bytes. /// /// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache /// lines, but the sparse array is 4x smaller. N is the number of elements in /// the set. /// /// For sets that may grow to thousands of elements, SparseT should be set to /// uint16_t or uint32_t. /// /// Multiset behavior is provided by providing doubly linked lists for values /// that are inlined in the dense vector. SparseMultiSet is a good choice when /// one desires a growable number of entries per key, as it will retain the /// SparseSet algorithmic properties despite being growable. Thus, it is often a /// better choice than a SparseSet of growable containers or a vector of /// vectors. SparseMultiSet also keeps iterators valid after erasure (provided /// the iterators don't point to the element erased), allowing for more /// intuitive and fast removal. /// /// @tparam ValueT The type of objects in the set. /// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT. /// @tparam SparseT An unsigned integer type. See above. /// template<typename ValueT, typename KeyFunctorT = llvm::identity<unsigned>, typename SparseT = uint8_t> class SparseMultiSet { static_assert(std::numeric_limits<SparseT>::is_integer && !std::numeric_limits<SparseT>::is_signed, "SparseT must be an unsigned integer type"); /// The actual data that's stored, as a doubly-linked list implemented via /// indices into the DenseVector. The doubly linked list is implemented /// circular in Prev indices, and INVALID-terminated in Next indices. This /// provides efficient access to list tails. These nodes can also be /// tombstones, in which case they are actually nodes in a single-linked /// freelist of recyclable slots. struct SMSNode { static const unsigned INVALID = ~0U; ValueT Data; unsigned Prev; unsigned Next; SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) { } /// List tails have invalid Nexts. bool isTail() const { return Next == INVALID; } /// Whether this node is a tombstone node, and thus is in our freelist. bool isTombstone() const { return Prev == INVALID; } /// Since the list is circular in Prev, all non-tombstone nodes have a valid /// Prev. bool isValid() const { return Prev != INVALID; } }; typedef typename KeyFunctorT::argument_type KeyT; typedef SmallVector<SMSNode, 8> DenseT; DenseT Dense; SparseT *Sparse; unsigned Universe; KeyFunctorT KeyIndexOf; SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf; /// We have a built-in recycler for reusing tombstone slots. This recycler /// puts a singly-linked free list into tombstone slots, allowing us quick /// erasure, iterator preservation, and dense size. unsigned FreelistIdx; unsigned NumFree; unsigned sparseIndex(const ValueT &Val) const { assert(ValIndexOf(Val) < Universe && "Invalid key in set. Did object mutate?"); return ValIndexOf(Val); } unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); } // Disable copy construction and assignment. // This data structure is not meant to be used that way. SparseMultiSet(const SparseMultiSet&) = delete; SparseMultiSet &operator=(const SparseMultiSet&) = delete; /// Whether the given entry is the head of the list. List heads's previous /// pointers are to the tail of the list, allowing for efficient access to the /// list tail. D must be a valid entry node. bool isHead(const SMSNode &D) const { assert(D.isValid() && "Invalid node for head"); return Dense[D.Prev].isTail(); } /// Whether the given entry is a singleton entry, i.e. the only entry with /// that key. bool isSingleton(const SMSNode &N) const { assert(N.isValid() && "Invalid node for singleton"); // Is N its own predecessor? return &Dense[N.Prev] == &N; } /// Add in the given SMSNode. Uses a free entry in our freelist if /// available. Returns the index of the added node. unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) { if (NumFree == 0) { Dense.push_back(SMSNode(V, Prev, Next)); return Dense.size() - 1; } // Peel off a free slot unsigned Idx = FreelistIdx; unsigned NextFree = Dense[Idx].Next; assert(Dense[Idx].isTombstone() && "Non-tombstone free?"); Dense[Idx] = SMSNode(V, Prev, Next); FreelistIdx = NextFree; --NumFree; return Idx; } /// Make the current index a new tombstone. Pushes it onto the freelist. void makeTombstone(unsigned Idx) { Dense[Idx].Prev = SMSNode::INVALID; Dense[Idx].Next = FreelistIdx; FreelistIdx = Idx; ++NumFree; } public: typedef ValueT value_type; typedef ValueT &reference; typedef const ValueT &const_reference; typedef ValueT *pointer; typedef const ValueT *const_pointer; typedef unsigned size_type; SparseMultiSet() : Sparse(nullptr), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) {} ~SparseMultiSet() { delete[] Sparse; } // HLSL Change: Use overridable operator new /// Set the universe size which determines the largest key the set can hold. /// The universe must be sized before any elements can be added. /// /// @param U Universe size. All object keys must be less than U. /// void setUniverse(unsigned U) { // It's not hard to resize the universe on a non-empty set, but it doesn't // seem like a likely use case, so we can add that code when we need it. assert(empty() && "Can only resize universe on an empty map"); // Hysteresis prevents needless reallocations. if (U >= Universe/4 && U <= Universe) return; // HLSL Change Begin: Use overridable operator new/delete delete[] Sparse; // The Sparse array doesn't actually need to be initialized, so malloc // would be enough here, but that will cause tools like valgrind to // complain about branching on uninitialized data. Sparse = new SparseT[U]; std::memset(Sparse, 0, U * sizeof(SparseT)); // HLSL Change End Universe = U; } /// Our iterators are iterators over the collection of objects that share a /// key. template<typename SMSPtrTy> class iterator_base { friend class SparseMultiSet; public: using iterator_category = std::bidirectional_iterator_tag; using value_type = ValueT; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; private: SMSPtrTy SMS; unsigned Idx; unsigned SparseIdx; iterator_base(SMSPtrTy P, unsigned I, unsigned SI) : SMS(P), Idx(I), SparseIdx(SI) { } /// Whether our iterator has fallen outside our dense vector. bool isEnd() const { if (Idx == SMSNode::INVALID) return true; assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?"); return false; } /// Whether our iterator is properly keyed, i.e. the SparseIdx is valid bool isKeyed() const { return SparseIdx < SMS->Universe; } unsigned Prev() const { return SMS->Dense[Idx].Prev; } unsigned Next() const { return SMS->Dense[Idx].Next; } void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; } void setNext(unsigned N) { SMS->Dense[Idx].Next = N; } public: reference operator*() const { assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx && "Dereferencing iterator of invalid key or index"); return SMS->Dense[Idx].Data; } pointer operator->() const { return &operator*(); } /// Comparison operators bool operator==(const iterator_base &RHS) const { // end compares equal if (SMS == RHS.SMS && Idx == RHS.Idx) { assert((isEnd() || SparseIdx == RHS.SparseIdx) && "Same dense entry, but different keys?"); return true; } return false; } bool operator!=(const iterator_base &RHS) const { return !operator==(RHS); } /// Increment and decrement operators iterator_base &operator--() { // predecrement - Back up assert(isKeyed() && "Decrementing an invalid iterator"); assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) && "Decrementing head of list"); // If we're at the end, then issue a new find() if (isEnd()) Idx = SMS->findIndex(SparseIdx).Prev(); else Idx = Prev(); return *this; } iterator_base &operator++() { // preincrement - Advance assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator"); Idx = Next(); return *this; } iterator_base operator--(int) { // postdecrement iterator_base I(*this); --*this; return I; } iterator_base operator++(int) { // postincrement iterator_base I(*this); ++*this; return I; } }; typedef iterator_base<SparseMultiSet *> iterator; typedef iterator_base<const SparseMultiSet *> const_iterator; // Convenience types typedef std::pair<iterator, iterator> RangePair; /// Returns an iterator past this container. Note that such an iterator cannot /// be decremented, but will compare equal to other end iterators. iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); } const_iterator end() const { return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID); } /// Returns true if the set is empty. /// /// This is not the same as BitVector::empty(). /// bool empty() const { return size() == 0; } /// Returns the number of elements in the set. /// /// This is not the same as BitVector::size() which returns the size of the /// universe. /// size_type size() const { assert(NumFree <= Dense.size() && "Out-of-bounds free entries"); return Dense.size() - NumFree; } /// Clears the set. This is a very fast constant time operation. /// void clear() { // Sparse does not need to be cleared, see find(). Dense.clear(); NumFree = 0; FreelistIdx = SMSNode::INVALID; } /// Find an element by its index. /// /// @param Idx A valid index to find. /// @returns An iterator to the element identified by key, or end(). /// iterator findIndex(unsigned Idx) { assert(Idx < Universe && "Key out of range"); const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u; for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) { const unsigned FoundIdx = sparseIndex(Dense[i]); // Check that we're pointing at the correct entry and that it is the head // of a valid list. if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i])) return iterator(this, i, Idx); // Stride is 0 when SparseT >= unsigned. We don't need to loop. if (!Stride) break; } return end(); } /// Find an element by its key. /// /// @param Key A valid key to find. /// @returns An iterator to the element identified by key, or end(). /// iterator find(const KeyT &Key) { return findIndex(KeyIndexOf(Key)); } const_iterator find(const KeyT &Key) const { iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key)); return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key)); } /// Returns the number of elements identified by Key. This will be linear in /// the number of elements of that key. size_type count(const KeyT &Key) const { unsigned Ret = 0; for (const_iterator It = find(Key); It != end(); ++It) ++Ret; return Ret; } /// Returns true if this set contains an element identified by Key. bool contains(const KeyT &Key) const { return find(Key) != end(); } /// Return the head and tail of the subset's list, otherwise returns end(). iterator getHead(const KeyT &Key) { return find(Key); } iterator getTail(const KeyT &Key) { iterator I = find(Key); if (I != end()) I = iterator(this, I.Prev(), KeyIndexOf(Key)); return I; } /// The bounds of the range of items sharing Key K. First member is the head /// of the list, and the second member is a decrementable end iterator for /// that key. RangePair equal_range(const KeyT &K) { iterator B = find(K); iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx); return std::make_pair(B, E); } /// Insert a new element at the tail of the subset list. Returns an iterator /// to the newly added entry. iterator insert(const ValueT &Val) { unsigned Idx = sparseIndex(Val); iterator I = findIndex(Idx); unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID); if (I == end()) { // Make a singleton list Sparse[Idx] = NodeIdx; Dense[NodeIdx].Prev = NodeIdx; return iterator(this, NodeIdx, Idx); } // Stick it at the end. unsigned HeadIdx = I.Idx; unsigned TailIdx = I.Prev(); Dense[TailIdx].Next = NodeIdx; Dense[HeadIdx].Prev = NodeIdx; Dense[NodeIdx].Prev = TailIdx; return iterator(this, NodeIdx, Idx); } /// Erases an existing element identified by a valid iterator. /// /// This invalidates iterators pointing at the same entry, but erase() returns /// an iterator pointing to the next element in the subset's list. This makes /// it possible to erase selected elements while iterating over the subset: /// /// tie(I, E) = Set.equal_range(Key); /// while (I != E) /// if (test(*I)) /// I = Set.erase(I); /// else /// ++I; /// /// Note that if the last element in the subset list is erased, this will /// return an end iterator which can be decremented to get the new tail (if it /// exists): /// /// tie(B, I) = Set.equal_range(Key); /// for (bool isBegin = B == I; !isBegin; /* empty */) { /// isBegin = (--I) == B; /// if (test(I)) /// break; /// I = erase(I); /// } iterator erase(iterator I) { assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() && "erasing invalid/end/tombstone iterator"); // First, unlink the node from its list. Then swap the node out with the // dense vector's last entry iterator NextI = unlink(Dense[I.Idx]); // Put in a tombstone. makeTombstone(I.Idx); return NextI; } /// Erase all elements with the given key. This invalidates all /// iterators of that key. void eraseAll(const KeyT &K) { for (iterator I = find(K); I != end(); /* empty */) I = erase(I); } private: /// Unlink the node from its list. Returns the next node in the list. iterator unlink(const SMSNode &N) { if (isSingleton(N)) { // Singleton is already unlinked assert(N.Next == SMSNode::INVALID && "Singleton has next?"); return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data)); } if (isHead(N)) { // If we're the head, then update the sparse array and our next. Sparse[sparseIndex(N)] = N.Next; Dense[N.Next].Prev = N.Prev; return iterator(this, N.Next, ValIndexOf(N.Data)); } if (N.isTail()) { // If we're the tail, then update our head and our previous. findIndex(sparseIndex(N)).setPrev(N.Prev); Dense[N.Prev].Next = N.Next; // Give back an end iterator that can be decremented iterator I(this, N.Prev, ValIndexOf(N.Data)); return ++I; } // Otherwise, just drop us Dense[N.Next].Prev = N.Prev; Dense[N.Prev].Next = N.Next; return iterator(this, N.Next, ValIndexOf(N.Data)); } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/StringRef.h
//===--- StringRef.h - Constant String Reference Wrapper --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_STRINGREF_H #define LLVM_ADT_STRINGREF_H #include <algorithm> #include <cassert> #include <cstring> #include <limits> #include <string> #include <utility> namespace llvm { template <typename T> class SmallVectorImpl; class APInt; class hash_code; class StringRef; /// Helper functions for StringRef::getAsInteger. bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result); bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result); /// StringRef - Represent a constant reference to a string, i.e. a character /// array and a length, which need not be null terminated. /// /// This class does not own the string data, it is expected to be used in /// situations where the character data resides in some other buffer, whose /// lifetime extends past that of the StringRef. For this reason, it is not in /// general safe to store a StringRef. class StringRef { public: typedef const char *iterator; typedef const char *const_iterator; static const size_t npos = ~size_t(0); typedef size_t size_type; private: /// The start of the string, in an external buffer. const char *Data; /// The length of the string. size_t Length; // Workaround memcmp issue with null pointers (undefined behavior) // by providing a specialized version static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) { if (Length == 0) { return 0; } return ::memcmp(Lhs,Rhs,Length); } public: /// @name Constructors /// @{ /// Construct an empty string ref. /*implicit*/ StringRef() : Data(nullptr), Length(0) {} StringRef(std::nullptr_t) = delete; // HLSL Change - So we don't accidentally pass `false` again /// Construct a string ref from a cstring. /*implicit*/ StringRef(const char *Str) : Data(Str) { assert(Str && "StringRef cannot be built from a NULL argument"); Length = ::strlen(Str); // invoking strlen(NULL) is undefined behavior } /// Construct a string ref from a pointer and length. /*implicit*/ StringRef(const char *data, size_t length) : Data(data), Length(length) { assert((data || length == 0) && "StringRef cannot be built from a NULL argument with non-null length"); } /// Construct a string ref from an std::string. /*implicit*/ StringRef(const std::string &Str) : Data(Str.data()), Length(Str.length()) {} /// @} /// @name Iterators /// @{ iterator begin() const { return Data; } iterator end() const { return Data + Length; } const unsigned char *bytes_begin() const { return reinterpret_cast<const unsigned char *>(begin()); } const unsigned char *bytes_end() const { return reinterpret_cast<const unsigned char *>(end()); } /// @} /// @name String Operations /// @{ /// data - Get a pointer to the start of the string (which may not be null /// terminated). const char *data() const { return Data; } /// empty - Check if the string is empty. bool empty() const { return Length == 0; } /// size - Get the string size. size_t size() const { return Length; } /// front - Get the first character in the string. char front() const { assert(!empty()); return Data[0]; } /// back - Get the last character in the string. char back() const { assert(!empty()); return Data[Length-1]; } // copy - Allocate copy in Allocator and return StringRef to it. template <typename Allocator> StringRef copy(Allocator &A) const { char *S = A.template Allocate<char>(Length); std::copy(begin(), end(), S); return StringRef(S, Length); } /// equals - Check for string equality, this is more efficient than /// compare() when the relative ordering of inequal strings isn't needed. bool equals(StringRef RHS) const { return (Length == RHS.Length && compareMemory(Data, RHS.Data, RHS.Length) == 0); } /// equals_lower - Check for string equality, ignoring case. bool equals_lower(StringRef RHS) const { return Length == RHS.Length && compare_lower(RHS) == 0; } /// compare - Compare two strings; the result is -1, 0, or 1 if this string /// is lexicographically less than, equal to, or greater than the \p RHS. int compare(StringRef RHS) const { // Check the prefix for a mismatch. if (int Res = compareMemory(Data, RHS.Data, std::min(Length, RHS.Length))) return Res < 0 ? -1 : 1; // Otherwise the prefixes match, so we only need to check the lengths. if (Length == RHS.Length) return 0; return Length < RHS.Length ? -1 : 1; } /// compare_lower - Compare two strings, ignoring case. int compare_lower(StringRef RHS) const; /// compare_numeric - Compare two strings, treating sequences of digits as /// numbers. int compare_numeric(StringRef RHS) const; /// \brief Determine the edit distance between this string and another /// string. /// /// \param Other the string to compare this string against. /// /// \param AllowReplacements whether to allow character /// replacements (change one character into another) as a single /// operation, rather than as two operations (an insertion and a /// removal). /// /// \param MaxEditDistance If non-zero, the maximum edit distance that /// this routine is allowed to compute. If the edit distance will exceed /// that maximum, returns \c MaxEditDistance+1. /// /// \returns the minimum number of character insertions, removals, /// or (if \p AllowReplacements is \c true) replacements needed to /// transform one of the given strings into the other. If zero, /// the strings are identical. unsigned edit_distance(StringRef Other, bool AllowReplacements = true, unsigned MaxEditDistance = 0) const; /// str - Get the contents as an std::string. std::string str() const { if (!Data) return std::string(); return std::string(Data, Length); } /// @} /// @name Operator Overloads /// @{ char operator[](size_t Index) const { assert(Index < Length && "Invalid index!"); return Data[Index]; } /// @} /// @name Type Conversions /// @{ operator std::string() const { return str(); } /// @} /// @name String Predicates /// @{ /// Check if this string starts with the given \p Prefix. bool startswith(StringRef Prefix) const { return Length >= Prefix.Length && compareMemory(Data, Prefix.Data, Prefix.Length) == 0; } /// Check if this string starts with the given \p Prefix, ignoring case. bool startswith_lower(StringRef Prefix) const; /// Check if this string ends with the given \p Suffix. bool endswith(StringRef Suffix) const { return Length >= Suffix.Length && compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0; } /// Check if this string ends with the given \p Suffix, ignoring case. bool endswith_lower(StringRef Suffix) const; /// @} /// @name String Searching /// @{ /// Search for the first character \p C in the string. /// /// \returns The index of the first occurrence of \p C, or npos if not /// found. size_t find(char C, size_t From = 0) const { size_t FindBegin = std::min(From, Length); if (FindBegin < Length) { // Avoid calling memchr with nullptr. // Just forward to memchr, which is faster than a hand-rolled loop. if (const void *P = ::memchr(Data + FindBegin, C, Length - FindBegin)) return static_cast<const char *>(P) - Data; } return npos; } /// Search for the first string \p Str in the string. /// /// \returns The index of the first occurrence of \p Str, or npos if not /// found. size_t find(StringRef Str, size_t From = 0) const; /// Search for the last character \p C in the string. /// /// \returns The index of the last occurrence of \p C, or npos if not /// found. size_t rfind(char C, size_t From = npos) const { From = std::min(From, Length); size_t i = From; while (i != 0) { --i; if (Data[i] == C) return i; } return npos; } /// Search for the last string \p Str in the string. /// /// \returns The index of the last occurrence of \p Str, or npos if not /// found. size_t rfind(StringRef Str) const; /// Find the first character in the string that is \p C, or npos if not /// found. Same as find. size_t find_first_of(char C, size_t From = 0) const { return find(C, From); } /// Find the first character in the string that is in \p Chars, or npos if /// not found. /// /// Complexity: O(size() + Chars.size()) size_t find_first_of(StringRef Chars, size_t From = 0) const; /// Find the first character in the string that is not \p C or npos if not /// found. size_t find_first_not_of(char C, size_t From = 0) const; /// Find the first character in the string that is not in the string /// \p Chars, or npos if not found. /// /// Complexity: O(size() + Chars.size()) size_t find_first_not_of(StringRef Chars, size_t From = 0) const; /// Find the last character in the string that is \p C, or npos if not /// found. size_t find_last_of(char C, size_t From = npos) const { return rfind(C, From); } /// Find the last character in the string that is in \p C, or npos if not /// found. /// /// Complexity: O(size() + Chars.size()) size_t find_last_of(StringRef Chars, size_t From = npos) const; /// Find the last character in the string that is not \p C, or npos if not /// found. size_t find_last_not_of(char C, size_t From = npos) const; /// Find the last character in the string that is not in \p Chars, or /// npos if not found. /// /// Complexity: O(size() + Chars.size()) size_t find_last_not_of(StringRef Chars, size_t From = npos) const; /// @} /// @name Helpful Algorithms /// @{ /// Return the number of occurrences of \p C in the string. size_t count(char C) const { size_t Count = 0; for (size_t i = 0, e = Length; i != e; ++i) if (Data[i] == C) ++Count; return Count; } /// Return the number of non-overlapped occurrences of \p Str in /// the string. size_t count(StringRef Str) const; /// Parse the current string as an integer of the specified radix. If /// \p Radix is specified as zero, this does radix autosensing using /// extended C rules: 0 is octal, 0x is hex, 0b is binary. /// /// If the string is invalid or if only a subset of the string is valid, /// this returns true to signify the error. The string is considered /// erroneous if empty or if it overflows T. template <typename T> typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type getAsInteger(unsigned Radix, T &Result) const { long long LLVal; if (getAsSignedInteger(*this, Radix, LLVal) || static_cast<T>(LLVal) != LLVal) return true; Result = LLVal; return false; } template <typename T> typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type getAsInteger(unsigned Radix, T &Result) const { unsigned long long ULLVal; // The additional cast to unsigned long long is required to avoid the // Visual C++ warning C4805: '!=' : unsafe mix of type 'bool' and type // 'unsigned __int64' when instantiating getAsInteger with T = bool. if (getAsUnsignedInteger(*this, Radix, ULLVal) || static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal) return true; Result = ULLVal; return false; } /// Parse the current string as an integer of the specified \p Radix, or of /// an autosensed radix if the \p Radix given is 0. The current value in /// \p Result is discarded, and the storage is changed to be wide enough to /// store the parsed integer. /// /// \returns true if the string does not solely consist of a valid /// non-empty number in the appropriate base. /// /// APInt::fromString is superficially similar but assumes the /// string is well-formed in the given radix. bool getAsInteger(unsigned Radix, APInt &Result) const; /// @} /// @name String Operations /// @{ // Convert the given ASCII string to lowercase. std::string lower() const; /// Convert the given ASCII string to uppercase. std::string upper() const; /// @} /// @name Substring Operations /// @{ /// Return a reference to the substring from [Start, Start + N). /// /// \param Start The index of the starting character in the substring; if /// the index is npos or greater than the length of the string then the /// empty substring will be returned. /// /// \param N The number of characters to included in the substring. If N /// exceeds the number of characters remaining in the string, the string /// suffix (starting with \p Start) will be returned. StringRef substr(size_t Start, size_t N = npos) const { Start = std::min(Start, Length); return StringRef(Data + Start, std::min(N, Length - Start)); } /// Return a StringRef equal to 'this' but with the first \p N elements /// dropped. StringRef drop_front(size_t N = 1) const { assert(size() >= N && "Dropping more elements than exist"); return substr(N); } /// Return a StringRef equal to 'this' but with the last \p N elements /// dropped. StringRef drop_back(size_t N = 1) const { assert(size() >= N && "Dropping more elements than exist"); return substr(0, size()-N); } /// Return a reference to the substring from [Start, End). /// /// \param Start The index of the starting character in the substring; if /// the index is npos or greater than the length of the string then the /// empty substring will be returned. /// /// \param End The index following the last character to include in the /// substring. If this is npos, or less than \p Start, or exceeds the /// number of characters remaining in the string, the string suffix /// (starting with \p Start) will be returned. StringRef slice(size_t Start, size_t End) const { Start = std::min(Start, Length); End = std::min(std::max(Start, End), Length); return StringRef(Data + Start, End - Start); } /// Split into two substrings around the first occurrence of a separator /// character. /// /// If \p Separator is in the string, then the result is a pair (LHS, RHS) /// such that (*this == LHS + Separator + RHS) is true and RHS is /// maximal. If \p Separator is not in the string, then the result is a /// pair (LHS, RHS) where (*this == LHS) and (RHS == ""). /// /// \param Separator The character to split on. /// \returns The split substrings. std::pair<StringRef, StringRef> split(char Separator) const { size_t Idx = find(Separator); if (Idx == npos) return std::make_pair(*this, StringRef()); return std::make_pair(slice(0, Idx), slice(Idx+1, npos)); } /// Split into two substrings around the first occurrence of a separator /// string. /// /// If \p Separator is in the string, then the result is a pair (LHS, RHS) /// such that (*this == LHS + Separator + RHS) is true and RHS is /// maximal. If \p Separator is not in the string, then the result is a /// pair (LHS, RHS) where (*this == LHS) and (RHS == ""). /// /// \param Separator - The string to split on. /// \return - The split substrings. std::pair<StringRef, StringRef> split(StringRef Separator) const { size_t Idx = find(Separator); if (Idx == npos) return std::make_pair(*this, StringRef()); return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos)); } /// Split into substrings around the occurrences of a separator string. /// /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most /// \p MaxSplit splits are done and consequently <= \p MaxSplit /// elements are added to A. /// If \p KeepEmpty is false, empty strings are not added to \p A. They /// still count when considering \p MaxSplit /// An useful invariant is that /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true /// /// \param A - Where to put the substrings. /// \param Separator - The string to split on. /// \param MaxSplit - The maximum number of times the string is split. /// \param KeepEmpty - True if empty substring should be added. void split(SmallVectorImpl<StringRef> &A, StringRef Separator, int MaxSplit = -1, bool KeepEmpty = true) const; /// Split into two substrings around the last occurrence of a separator /// character. /// /// If \p Separator is in the string, then the result is a pair (LHS, RHS) /// such that (*this == LHS + Separator + RHS) is true and RHS is /// minimal. If \p Separator is not in the string, then the result is a /// pair (LHS, RHS) where (*this == LHS) and (RHS == ""). /// /// \param Separator - The character to split on. /// \return - The split substrings. std::pair<StringRef, StringRef> rsplit(char Separator) const { size_t Idx = rfind(Separator); if (Idx == npos) return std::make_pair(*this, StringRef()); return std::make_pair(slice(0, Idx), slice(Idx+1, npos)); } /// Return string with consecutive characters in \p Chars starting from /// the left removed. StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const { return drop_front(std::min(Length, find_first_not_of(Chars))); } /// Return string with consecutive characters in \p Chars starting from /// the right removed. StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const { return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1)); } /// Return string with consecutive characters in \p Chars starting from /// the left and right removed. StringRef trim(StringRef Chars = " \t\n\v\f\r") const { return ltrim(Chars).rtrim(Chars); } /// @} }; /// @name StringRef Comparison Operators /// @{ inline bool operator==(StringRef LHS, StringRef RHS) { return LHS.equals(RHS); } inline bool operator!=(StringRef LHS, StringRef RHS) { return !(LHS == RHS); } inline bool operator<(StringRef LHS, StringRef RHS) { return LHS.compare(RHS) == -1; } inline bool operator<=(StringRef LHS, StringRef RHS) { return LHS.compare(RHS) != 1; } inline bool operator>(StringRef LHS, StringRef RHS) { return LHS.compare(RHS) == 1; } inline bool operator>=(StringRef LHS, StringRef RHS) { return LHS.compare(RHS) != -1; } inline std::string &operator+=(std::string &buffer, StringRef string) { return buffer.append(string.data(), string.size()); } /// @} /// \brief Compute a hash_code for a StringRef. hash_code hash_value(StringRef S); // StringRefs can be treated like a POD type. template <typename T> struct isPodLike; template <> struct isPodLike<StringRef> { static const bool value = true; }; } // HLSL Change Starts // StringRef provides an operator string; that trips up the std::pair noexcept specification, // which (a) enables the moves constructor (because conversion is allowed), but (b) // misclassifies the the construction as nothrow. namespace std { template<> struct is_nothrow_constructible <std::string, llvm::StringRef> : std::false_type { }; template<> struct is_nothrow_constructible <std::string, llvm::StringRef &> : std::false_type { }; template<> struct is_nothrow_constructible <std::string, const llvm::StringRef &> : std::false_type { }; } // HLSL Change Ends #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/PointerIntPair.h
//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PointerIntPair class. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_POINTERINTPAIR_H #define LLVM_ADT_POINTERINTPAIR_H #include "llvm/Support/Compiler.h" #include "llvm/Support/PointerLikeTypeTraits.h" #include <cassert> #include <limits> namespace llvm { template<typename T> struct DenseMapInfo; /// PointerIntPair - This class implements a pair of a pointer and small /// integer. It is designed to represent this in the space required by one /// pointer by bitmangling the integer into the low part of the pointer. This /// can only be done for small integers: typically up to 3 bits, but it depends /// on the number of bits available according to PointerLikeTypeTraits for the /// type. /// /// Note that PointerIntPair always puts the IntVal part in the highest bits /// possible. For example, PointerIntPair<void*, 1, bool> will put the bit for /// the bool into bit #2, not bit #0, which allows the low two bits to be used /// for something else. For example, this allows: /// PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool> /// ... and the two bools will land in different bits. /// template <typename PointerTy, unsigned IntBits, typename IntType=unsigned, typename PtrTraits = PointerLikeTypeTraits<PointerTy> > class PointerIntPair { intptr_t Value; static_assert(PtrTraits::NumLowBitsAvailable < std::numeric_limits<uintptr_t>::digits, "cannot use a pointer type that has all bits free"); static_assert(IntBits <= PtrTraits::NumLowBitsAvailable, "PointerIntPair with integer size too large for pointer"); enum : uintptr_t { /// PointerBitMask - The bits that come from the pointer. PointerBitMask = ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable)-1), /// IntShift - The number of low bits that we reserve for other uses, and /// keep zero. IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable-IntBits, /// IntMask - This is the unshifted mask for valid bits of the int type. IntMask = (uintptr_t)(((intptr_t)1 << IntBits)-1), // ShiftedIntMask - This is the bits for the integer shifted in place. ShiftedIntMask = (uintptr_t)(IntMask << IntShift) }; public: PointerIntPair() : Value(0) {} PointerIntPair(PointerTy PtrVal, IntType IntVal) { setPointerAndInt(PtrVal, IntVal); } explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); } PointerTy getPointer() const { return PtrTraits::getFromVoidPointer( reinterpret_cast<void*>(Value & PointerBitMask)); } IntType getInt() const { return (IntType)((Value >> IntShift) & IntMask); } void setPointer(PointerTy PtrVal) { intptr_t PtrWord = reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(PtrVal)); assert((PtrWord & ~PointerBitMask) == 0 && "Pointer is not sufficiently aligned"); // Preserve all low bits, just update the pointer. Value = PtrWord | (Value & ~PointerBitMask); } void setInt(IntType IntVal) { intptr_t IntWord = static_cast<intptr_t>(IntVal); assert((IntWord & ~IntMask) == 0 && "Integer too large for field"); // Preserve all bits other than the ones we are updating. Value &= ~ShiftedIntMask; // Remove integer field. Value |= IntWord << IntShift; // Set new integer. } void initWithPointer(PointerTy PtrVal) { intptr_t PtrWord = reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(PtrVal)); assert((PtrWord & ~PointerBitMask) == 0 && "Pointer is not sufficiently aligned"); Value = PtrWord; } void setPointerAndInt(PointerTy PtrVal, IntType IntVal) { intptr_t PtrWord = reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(PtrVal)); assert((PtrWord & ~PointerBitMask) == 0 && "Pointer is not sufficiently aligned"); intptr_t IntWord = static_cast<intptr_t>(IntVal); assert((IntWord & ~IntMask) == 0 && "Integer too large for field"); Value = PtrWord | (IntWord << IntShift); } PointerTy const *getAddrOfPointer() const { return const_cast<PointerIntPair *>(this)->getAddrOfPointer(); } PointerTy *getAddrOfPointer() { assert(Value == reinterpret_cast<intptr_t>(getPointer()) && "Can only return the address if IntBits is cleared and " "PtrTraits doesn't change the pointer"); return reinterpret_cast<PointerTy *>(&Value); } void *getOpaqueValue() const { return reinterpret_cast<void*>(Value); } void setFromOpaqueValue(void *Val) { Value = reinterpret_cast<intptr_t>(Val);} static PointerIntPair getFromOpaqueValue(void *V) { PointerIntPair P; P.setFromOpaqueValue(V); return P; } // Allow PointerIntPairs to be created from const void * if and only if the // pointer type could be created from a const void *. static PointerIntPair getFromOpaqueValue(const void *V) { (void)PtrTraits::getFromVoidPointer(V); return getFromOpaqueValue(const_cast<void *>(V)); } bool operator==(const PointerIntPair &RHS) const {return Value == RHS.Value;} bool operator!=(const PointerIntPair &RHS) const {return Value != RHS.Value;} bool operator<(const PointerIntPair &RHS) const {return Value < RHS.Value;} bool operator>(const PointerIntPair &RHS) const {return Value > RHS.Value;} bool operator<=(const PointerIntPair &RHS) const {return Value <= RHS.Value;} bool operator>=(const PointerIntPair &RHS) const {return Value >= RHS.Value;} }; template <typename T> struct isPodLike; template<typename PointerTy, unsigned IntBits, typename IntType> struct isPodLike<PointerIntPair<PointerTy, IntBits, IntType> > { static const bool value = true; }; // Provide specialization of DenseMapInfo for PointerIntPair. template<typename PointerTy, unsigned IntBits, typename IntType> struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType> > { typedef PointerIntPair<PointerTy, IntBits, IntType> Ty; static Ty getEmptyKey() { uintptr_t Val = static_cast<uintptr_t>(-1); Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable; return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val)); } static Ty getTombstoneKey() { uintptr_t Val = static_cast<uintptr_t>(-2); Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable; return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val)); } static unsigned getHashValue(Ty V) { uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue()); return unsigned(IV) ^ unsigned(IV >> 9); } static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; } }; // Teach SmallPtrSet that PointerIntPair is "basically a pointer". template<typename PointerTy, unsigned IntBits, typename IntType, typename PtrTraits> class PointerLikeTypeTraits<PointerIntPair<PointerTy, IntBits, IntType, PtrTraits> > { public: static inline void * getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) { return P.getOpaqueValue(); } static inline PointerIntPair<PointerTy, IntBits, IntType> getFromVoidPointer(void *P) { return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P); } static inline PointerIntPair<PointerTy, IntBits, IntType> getFromVoidPointer(const void *P) { return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P); } enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits }; }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/Triple.h
//===-- llvm/ADT/Triple.h - Target triple helper class ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_TRIPLE_H #define LLVM_ADT_TRIPLE_H #include "llvm/ADT/Twine.h" // Some system headers or GCC predefined macros conflict with identifiers in // this file. Undefine them here. #undef NetBSD #undef mips #undef sparc namespace llvm { /// Triple - Helper class for working with autoconf configuration names. For /// historical reasons, we also call these 'triples' (they used to contain /// exactly three fields). /// /// Configuration names are strings in the canonical form: /// ARCHITECTURE-VENDOR-OPERATING_SYSTEM /// or /// ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT /// /// This class is used for clients which want to support arbitrary /// configuration names, but also want to implement certain special /// behavior for particular configurations. This class isolates the mapping /// from the components of the configuration name to well known IDs. /// /// At its core the Triple class is designed to be a wrapper for a triple /// string; the constructor does not change or normalize the triple string. /// Clients that need to handle the non-canonical triples that users often /// specify should use the normalize method. /// /// See autoconf/config.guess for a glimpse into what configuration names /// look like in practice. class Triple { public: enum ArchType { UnknownArch, arm, // ARM (little endian): arm, armv.*, xscale armeb, // ARM (big endian): armeb aarch64, // AArch64 (little endian): aarch64 aarch64_be, // AArch64 (big endian): aarch64_be bpfel, // eBPF or extended BPF or 64-bit BPF (little endian) bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian) hexagon, // Hexagon: hexagon mips, // MIPS: mips, mipsallegrex mipsel, // MIPSEL: mipsel, mipsallegrexel mips64, // MIPS64: mips64 mips64el, // MIPS64EL: mips64el msp430, // MSP430: msp430 ppc, // PPC: powerpc ppc64, // PPC64: powerpc64, ppu ppc64le, // PPC64LE: powerpc64le r600, // R600: AMD GPUs HD2XXX - HD6XXX amdgcn, // AMDGCN: AMD GCN GPUs sparc, // Sparc: sparc sparcv9, // Sparcv9: Sparcv9 sparcel, // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant systemz, // SystemZ: s390x tce, // TCE (http://tce.cs.tut.fi/): tce thumb, // Thumb (little endian): thumb, thumbv.* thumbeb, // Thumb (big endian): thumbeb x86, // X86: i[3-9]86 x86_64, // X86-64: amd64, x86_64 xcore, // XCore: xcore nvptx, // NVPTX: 32-bit nvptx64, // NVPTX: 64-bit le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten) le64, // le64: generic little-endian 64-bit CPU (PNaCl / Emscripten) amdil, // AMDIL amdil64, // AMDIL with 64-bit pointers hsail, // AMD HSAIL hsail64, // AMD HSAIL with 64-bit pointers spir, // SPIR: standard portable IR for OpenCL 32-bit version spir64, // SPIR: standard portable IR for OpenCL 64-bit version // HLSL Change Begins dxil, // DXIL: DirectX Intermediate Language 32-bit dxil64, // DXIL: DirectX Intermediate Language 64-bit // HLSL Change Ends kalimba, // Kalimba: generic kalimba shave, // SHAVE: Movidius vector VLIW processors wasm32, // WebAssembly with 32-bit pointers wasm64, // WebAssembly with 64-bit pointers LastArchType = wasm64 }; enum SubArchType { NoSubArch, ARMSubArch_v8_1a, ARMSubArch_v8, ARMSubArch_v7, ARMSubArch_v7em, ARMSubArch_v7m, ARMSubArch_v7s, ARMSubArch_v6, ARMSubArch_v6m, ARMSubArch_v6k, ARMSubArch_v6t2, ARMSubArch_v5, ARMSubArch_v5te, ARMSubArch_v4t, KalimbaSubArch_v3, KalimbaSubArch_v4, KalimbaSubArch_v5 }; enum VendorType { UnknownVendor, Apple, PC, SCEI, BGP, BGQ, Freescale, IBM, ImaginationTechnologies, MipsTechnologies, Microsoft, // HLSL Change NVIDIA, CSR, LastVendorType = CSR }; enum OSType { UnknownOS, CloudABI, Darwin, DragonFly, FreeBSD, IOS, KFreeBSD, Linux, Lv2, // PS3 MacOSX, NetBSD, OpenBSD, Solaris, Win32, Haiku, Minix, RTEMS, NaCl, // Native Client CNK, // BG/P Compute-Node Kernel Bitrig, AIX, CUDA, // NVIDIA CUDA NVCL, // NVIDIA OpenCL AMDHSA, // AMD HSA Runtime DirectX, // HLSL Change PS4, LastOSType = PS4 }; enum EnvironmentType { UnknownEnvironment, GNU, GNUEABI, GNUEABIHF, GNUX32, CODE16, EABI, EABIHF, Android, MSVC, Itanium, Cygnus, LastEnvironmentType = Cygnus }; enum ObjectFormatType { UnknownObjectFormat, COFF, ELF, MachO, }; private: std::string Data; /// The parsed arch type. ArchType Arch; /// The parsed subarchitecture type. SubArchType SubArch; /// The parsed vendor type. VendorType Vendor; /// The parsed OS type. OSType OS; /// The parsed Environment type. EnvironmentType Environment; /// The object format type. ObjectFormatType ObjectFormat; public: /// @name Constructors /// @{ /// \brief Default constructor is the same as an empty string and leaves all /// triple fields unknown. Triple() : Data(), Arch(), Vendor(), OS(), Environment(), ObjectFormat() {} explicit Triple(const Twine &Str); Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr); Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr, const Twine &EnvironmentStr); bool operator==(const Triple &Other) const { return Arch == Other.Arch && SubArch == Other.SubArch && Vendor == Other.Vendor && OS == Other.OS && Environment == Other.Environment && ObjectFormat == Other.ObjectFormat; } /// @} /// @name Normalization /// @{ /// normalize - Turn an arbitrary machine specification into the canonical /// triple form (or something sensible that the Triple class understands if /// nothing better can reasonably be done). In particular, it handles the /// common case in which otherwise valid components are in the wrong order. static std::string normalize(StringRef Str); /// \brief Return the normalized form of this triple's string. std::string normalize() const { return normalize(Data); } /// @} /// @name Typed Component Access /// @{ /// getArch - Get the parsed architecture type of this triple. ArchType getArch() const { return Arch; } /// getSubArch - get the parsed subarchitecture type for this triple. SubArchType getSubArch() const { return SubArch; } /// getVendor - Get the parsed vendor type of this triple. VendorType getVendor() const { return Vendor; } /// getOS - Get the parsed operating system type of this triple. OSType getOS() const { return OS; } /// hasEnvironment - Does this triple have the optional environment /// (fourth) component? bool hasEnvironment() const { return getEnvironmentName() != ""; } /// getEnvironment - Get the parsed environment type of this triple. EnvironmentType getEnvironment() const { return Environment; } /// \brief Parse the version number from the OS name component of the /// triple, if present. /// /// For example, "fooos1.2.3" would return (1, 2, 3). /// /// If an entry is not defined, it will be returned as 0. void getEnvironmentVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const; /// getFormat - Get the object format for this triple. ObjectFormatType getObjectFormat() const { return ObjectFormat; } /// getOSVersion - Parse the version number from the OS name component of the /// triple, if present. /// /// For example, "fooos1.2.3" would return (1, 2, 3). /// /// If an entry is not defined, it will be returned as 0. void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const; /// getOSMajorVersion - Return just the major version number, this is /// specialized because it is a common query. unsigned getOSMajorVersion() const { unsigned Maj, Min, Micro; getOSVersion(Maj, Min, Micro); return Maj; } /// getMacOSXVersion - Parse the version number as with getOSVersion and then /// translate generic "darwin" versions to the corresponding OS X versions. /// This may also be called with IOS triples but the OS X version number is /// just set to a constant 10.4.0 in that case. Returns true if successful. bool getMacOSXVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const; /// getiOSVersion - Parse the version number as with getOSVersion. This should /// only be called with IOS triples. void getiOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const; /// @} /// @name Direct Component Access /// @{ const std::string &str() const { return Data; } const std::string &getTriple() const { return Data; } /// getArchName - Get the architecture (first) component of the /// triple. StringRef getArchName() const; /// getVendorName - Get the vendor (second) component of the triple. StringRef getVendorName() const; /// getOSName - Get the operating system (third) component of the /// triple. StringRef getOSName() const; /// getEnvironmentName - Get the optional environment (fourth) /// component of the triple, or "" if empty. StringRef getEnvironmentName() const; /// getOSAndEnvironmentName - Get the operating system and optional /// environment components as a single string (separated by a '-' /// if the environment component is present). StringRef getOSAndEnvironmentName() const; /// @} /// @name Convenience Predicates /// @{ /// \brief Test whether the architecture is 64-bit /// /// Note that this tests for 64-bit pointer width, and nothing else. Note /// that we intentionally expose only three predicates, 64-bit, 32-bit, and /// 16-bit. The inner details of pointer width for particular architectures /// is not summed up in the triple, and so only a coarse grained predicate /// system is provided. bool isArch64Bit() const; /// \brief Test whether the architecture is 32-bit /// /// Note that this tests for 32-bit pointer width, and nothing else. bool isArch32Bit() const; /// \brief Test whether the architecture is 16-bit /// /// Note that this tests for 16-bit pointer width, and nothing else. bool isArch16Bit() const; /// isOSVersionLT - Helper function for doing comparisons against version /// numbers included in the target triple. bool isOSVersionLT(unsigned Major, unsigned Minor = 0, unsigned Micro = 0) const { unsigned LHS[3]; getOSVersion(LHS[0], LHS[1], LHS[2]); if (LHS[0] != Major) return LHS[0] < Major; if (LHS[1] != Minor) return LHS[1] < Minor; if (LHS[2] != Micro) return LHS[1] < Micro; return false; } bool isOSVersionLT(const Triple &Other) const { unsigned RHS[3]; Other.getOSVersion(RHS[0], RHS[1], RHS[2]); return isOSVersionLT(RHS[0], RHS[1], RHS[2]); } /// isMacOSXVersionLT - Comparison function for checking OS X version /// compatibility, which handles supporting skewed version numbering schemes /// used by the "darwin" triples. unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0, unsigned Micro = 0) const { assert(isMacOSX() && "Not an OS X triple!"); // If this is OS X, expect a sane version number. if (getOS() == Triple::MacOSX) return isOSVersionLT(Major, Minor, Micro); // Otherwise, compare to the "Darwin" number. assert(Major == 10 && "Unexpected major version"); return isOSVersionLT(Minor + 4, Micro, 0); } /// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both /// "darwin" and "osx" as OS X triples. bool isMacOSX() const { return getOS() == Triple::Darwin || getOS() == Triple::MacOSX; } /// Is this an iOS triple. bool isiOS() const { return getOS() == Triple::IOS; } /// isOSDarwin - Is this a "Darwin" OS (OS X or iOS). bool isOSDarwin() const { return isMacOSX() || isiOS(); } bool isOSNetBSD() const { return getOS() == Triple::NetBSD; } bool isOSOpenBSD() const { return getOS() == Triple::OpenBSD; } bool isOSFreeBSD() const { return getOS() == Triple::FreeBSD; } bool isOSDragonFly() const { return getOS() == Triple::DragonFly; } bool isOSSolaris() const { return getOS() == Triple::Solaris; } bool isOSBitrig() const { return getOS() == Triple::Bitrig; } bool isWindowsMSVCEnvironment() const { return getOS() == Triple::Win32 && (getEnvironment() == Triple::UnknownEnvironment || getEnvironment() == Triple::MSVC); } bool isKnownWindowsMSVCEnvironment() const { return getOS() == Triple::Win32 && getEnvironment() == Triple::MSVC; } bool isWindowsItaniumEnvironment() const { return getOS() == Triple::Win32 && getEnvironment() == Triple::Itanium; } bool isWindowsCygwinEnvironment() const { return getOS() == Triple::Win32 && getEnvironment() == Triple::Cygnus; } bool isWindowsGNUEnvironment() const { return getOS() == Triple::Win32 && getEnvironment() == Triple::GNU; } /// \brief Tests for either Cygwin or MinGW OS bool isOSCygMing() const { return isWindowsCygwinEnvironment() || isWindowsGNUEnvironment(); } /// \brief Is this a "Windows" OS targeting a "MSVCRT.dll" environment. bool isOSMSVCRT() const { return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment() || isWindowsItaniumEnvironment(); } /// \brief Tests whether the OS is Windows. bool isOSWindows() const { return getOS() == Triple::Win32; } /// \brief Tests whether the OS is NaCl (Native Client) bool isOSNaCl() const { return getOS() == Triple::NaCl; } /// \brief Tests whether the OS is Linux. bool isOSLinux() const { return getOS() == Triple::Linux; } /// \brief Tests whether the OS uses the ELF binary format. bool isOSBinFormatELF() const { return getObjectFormat() == Triple::ELF; } /// \brief Tests whether the OS uses the COFF binary format. bool isOSBinFormatCOFF() const { return getObjectFormat() == Triple::COFF; } /// \brief Tests whether the environment is MachO. bool isOSBinFormatMachO() const { return getObjectFormat() == Triple::MachO; } /// \brief Tests whether the target is the PS4 CPU bool isPS4CPU() const { return getArch() == Triple::x86_64 && getVendor() == Triple::SCEI && getOS() == Triple::PS4; } /// \brief Tests whether the target is the PS4 platform bool isPS4() const { return getVendor() == Triple::SCEI && getOS() == Triple::PS4; } // HLSL Change Begin - Add DXIL Triple. bool isDXIL() const { return getArch() == Triple::dxil || getArch() == Triple::dxil64; } // HLSL Change End - Add DXIL Triple. /// @} /// @name Mutators /// @{ /// setArch - Set the architecture (first) component of the triple /// to a known type. void setArch(ArchType Kind); /// setVendor - Set the vendor (second) component of the triple to a /// known type. void setVendor(VendorType Kind); /// setOS - Set the operating system (third) component of the triple /// to a known type. void setOS(OSType Kind); /// setEnvironment - Set the environment (fourth) component of the triple /// to a known type. void setEnvironment(EnvironmentType Kind); /// setObjectFormat - Set the object file format void setObjectFormat(ObjectFormatType Kind); /// setTriple - Set all components to the new triple \p Str. void setTriple(const Twine &Str); /// setArchName - Set the architecture (first) component of the /// triple by name. void setArchName(StringRef Str); /// setVendorName - Set the vendor (second) component of the triple /// by name. void setVendorName(StringRef Str); /// setOSName - Set the operating system (third) component of the /// triple by name. void setOSName(StringRef Str); /// setEnvironmentName - Set the optional environment (fourth) /// component of the triple by name. void setEnvironmentName(StringRef Str); /// setOSAndEnvironmentName - Set the operating system and optional /// environment components with a single string. void setOSAndEnvironmentName(StringRef Str); /// @} /// @name Helpers to build variants of a particular triple. /// @{ /// \brief Form a triple with a 32-bit variant of the current architecture. /// /// This can be used to move across "families" of architectures where useful. /// /// \returns A new triple with a 32-bit architecture or an unknown /// architecture if no such variant can be found. llvm::Triple get32BitArchVariant() const; /// \brief Form a triple with a 64-bit variant of the current architecture. /// /// This can be used to move across "families" of architectures where useful. /// /// \returns A new triple with a 64-bit architecture or an unknown /// architecture if no such variant can be found. llvm::Triple get64BitArchVariant() const; /// Form a triple with a big endian variant of the current architecture. /// /// This can be used to move across "families" of architectures where useful. /// /// \returns A new triple with a big endian architecture or an unknown /// architecture if no such variant can be found. llvm::Triple getBigEndianArchVariant() const; /// Form a triple with a little endian variant of the current architecture. /// /// This can be used to move across "families" of architectures where useful. /// /// \returns A new triple with a little endian architecture or an unknown /// architecture if no such variant can be found. llvm::Triple getLittleEndianArchVariant() const; /// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting. /// /// \param Arch the architecture name (e.g., "armv7s"). If it is an empty /// string then the triple's arch name is used. const char* getARMCPUForArch(StringRef Arch = StringRef()) const; /// @} /// @name Static helpers for IDs. /// @{ /// getArchTypeName - Get the canonical name for the \p Kind architecture. static const char *getArchTypeName(ArchType Kind); /// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind /// architecture. This is the prefix used by the architecture specific /// builtins, and is suitable for passing to \see /// Intrinsic::getIntrinsicForGCCBuiltin(). /// /// \return - The architecture prefix, or 0 if none is defined. static const char *getArchTypePrefix(ArchType Kind); /// getVendorTypeName - Get the canonical name for the \p Kind vendor. static const char *getVendorTypeName(VendorType Kind); /// getOSTypeName - Get the canonical name for the \p Kind operating system. static const char *getOSTypeName(OSType Kind); /// getEnvironmentTypeName - Get the canonical name for the \p Kind /// environment. static const char *getEnvironmentTypeName(EnvironmentType Kind); /// @} /// @name Static helpers for converting alternate architecture names. /// @{ /// getArchTypeForLLVMName - The canonical type for the given LLVM /// architecture name (e.g., "x86"). static ArchType getArchTypeForLLVMName(StringRef Str); /// @} }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/GraphTraits.h
//===-- llvm/ADT/GraphTraits.h - Graph traits template ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the little GraphTraits<X> template class that should be // specialized by classes that want to be iteratable by generic graph iterators. // // This file also defines the marker class Inverse that is used to iterate over // graphs in a graph defined, inverse ordering... // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_GRAPHTRAITS_H #define LLVM_ADT_GRAPHTRAITS_H namespace llvm { // GraphTraits - This class should be specialized by different graph types... // which is why the default version is empty. // template<class GraphType> struct GraphTraits { // Elements to provide: // typedef NodeType - Type of Node in the graph // typedef ChildIteratorType - Type used to iterate over children in graph // static NodeType *getEntryNode(const GraphType &) // Return the entry node of the graph // static ChildIteratorType child_begin(NodeType *) // static ChildIteratorType child_end (NodeType *) // Return iterators that point to the beginning and ending of the child // node list for the specified node. // // typedef ...iterator nodes_iterator; // static nodes_iterator nodes_begin(GraphType *G) // static nodes_iterator nodes_end (GraphType *G) // nodes_iterator/begin/end - Allow iteration over all nodes in the graph // static unsigned size (GraphType *G) // Return total number of nodes in the graph // // If anyone tries to use this class without having an appropriate // specialization, make an error. If you get this error, it's because you // need to include the appropriate specialization of GraphTraits<> for your // graph, or you need to define it for a new graph type. Either that or // your argument to XXX_begin(...) is unknown or needs to have the proper .h // file #include'd. // typedef typename GraphType::UnknownGraphTypeError NodeType; }; // Inverse - This class is used as a little marker class to tell the graph // iterator to iterate over the graph in a graph defined "Inverse" ordering. // Not all graphs define an inverse ordering, and if they do, it depends on // the graph exactly what that is. Here's an example of usage with the // df_iterator: // // idf_iterator<Method*> I = idf_begin(M), E = idf_end(M); // for (; I != E; ++I) { ... } // // Which is equivalent to: // df_iterator<Inverse<Method*> > I = idf_begin(M), E = idf_end(M); // for (; I != E; ++I) { ... } // template <class GraphType> struct Inverse { const GraphType &Graph; inline Inverse(const GraphType &G) : Graph(G) {} }; // Provide a partial specialization of GraphTraits so that the inverse of an // inverse falls back to the original graph. template<class T> struct GraphTraits<Inverse<Inverse<T> > > { typedef typename GraphTraits<T>::NodeType NodeType; typedef typename GraphTraits<T>::ChildIteratorType ChildIteratorType; static NodeType *getEntryNode(Inverse<Inverse<T> > *G) { return GraphTraits<T>::getEntryNode(G->Graph.Graph); } static ChildIteratorType child_begin(NodeType* N) { return GraphTraits<T>::child_begin(N); } static ChildIteratorType child_end(NodeType* N) { return GraphTraits<T>::child_end(N); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SparseSet.h
//===--- llvm/ADT/SparseSet.h - Sparse set ----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the SparseSet class derived from the version described in // Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters // on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec. 1993. // // A sparse set holds a small number of objects identified by integer keys from // a moderately sized universe. The sparse set uses more memory than other // containers in order to provide faster operations. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SPARSESET_H #define LLVM_ADT_SPARSESET_H #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/DataTypes.h" #include <limits> namespace llvm { /// SparseSetValTraits - Objects in a SparseSet are identified by keys that can /// be uniquely converted to a small integer less than the set's universe. This /// class allows the set to hold values that differ from the set's key type as /// long as an index can still be derived from the value. SparseSet never /// directly compares ValueT, only their indices, so it can map keys to /// arbitrary values. SparseSetValTraits computes the index from the value /// object. To compute the index from a key, SparseSet uses a separate /// KeyFunctorT template argument. /// /// A simple type declaration, SparseSet<Type>, handles these cases: /// - unsigned key, identity index, identity value /// - unsigned key, identity index, fat value providing getSparseSetIndex() /// /// The type declaration SparseSet<Type, UnaryFunction> handles: /// - unsigned key, remapped index, identity value (virtual registers) /// - pointer key, pointer-derived index, identity value (node+ID) /// - pointer key, pointer-derived index, fat value with getSparseSetIndex() /// /// Only other, unexpected cases require specializing SparseSetValTraits. /// /// For best results, ValueT should not require a destructor. /// template<typename ValueT> struct SparseSetValTraits { static unsigned getValIndex(const ValueT &Val) { return Val.getSparseSetIndex(); } }; /// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The /// generic implementation handles ValueT classes which either provide /// getSparseSetIndex() or specialize SparseSetValTraits<>. /// template<typename KeyT, typename ValueT, typename KeyFunctorT> struct SparseSetValFunctor { unsigned operator()(const ValueT &Val) const { return SparseSetValTraits<ValueT>::getValIndex(Val); } }; /// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of /// identity key/value sets. template<typename KeyT, typename KeyFunctorT> struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> { unsigned operator()(const KeyT &Key) const { return KeyFunctorT()(Key); } }; /// SparseSet - Fast set implmentation for objects that can be identified by /// small unsigned keys. /// /// SparseSet allocates memory proportional to the size of the key universe, so /// it is not recommended for building composite data structures. It is useful /// for algorithms that require a single set with fast operations. /// /// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast /// clear() and iteration as fast as a vector. The find(), insert(), and /// erase() operations are all constant time, and typically faster than a hash /// table. The iteration order doesn't depend on numerical key values, it only /// depends on the order of insert() and erase() operations. When no elements /// have been erased, the iteration order is the insertion order. /// /// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but /// offers constant-time clear() and size() operations as well as fast /// iteration independent on the size of the universe. /// /// SparseSet contains a dense vector holding all the objects and a sparse /// array holding indexes into the dense vector. Most of the memory is used by /// the sparse array which is the size of the key universe. The SparseT /// template parameter provides a space/speed tradeoff for sets holding many /// elements. /// /// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse /// array uses 4 x Universe bytes. /// /// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache /// lines, but the sparse array is 4x smaller. N is the number of elements in /// the set. /// /// For sets that may grow to thousands of elements, SparseT should be set to /// uint16_t or uint32_t. /// /// @tparam ValueT The type of objects in the set. /// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT. /// @tparam SparseT An unsigned integer type. See above. /// template<typename ValueT, typename KeyFunctorT = llvm::identity<unsigned>, typename SparseT = uint8_t> class SparseSet { static_assert(std::numeric_limits<SparseT>::is_integer && !std::numeric_limits<SparseT>::is_signed, "SparseT must be an unsigned integer type"); typedef typename KeyFunctorT::argument_type KeyT; typedef SmallVector<ValueT, 8> DenseT; typedef unsigned size_type; DenseT Dense; SparseT *Sparse; unsigned Universe; KeyFunctorT KeyIndexOf; SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf; // Disable copy construction and assignment. // This data structure is not meant to be used that way. SparseSet(const SparseSet&) = delete; SparseSet &operator=(const SparseSet&) = delete; public: typedef ValueT value_type; typedef ValueT &reference; typedef const ValueT &const_reference; typedef ValueT *pointer; typedef const ValueT *const_pointer; SparseSet() : Sparse(nullptr), Universe(0) {} ~SparseSet() { delete[] Sparse; } // HLSL Change Begin: Use overridable operator delete /// setUniverse - Set the universe size which determines the largest key the /// set can hold. The universe must be sized before any elements can be /// added. /// /// @param U Universe size. All object keys must be less than U. /// void setUniverse(unsigned U) { // It's not hard to resize the universe on a non-empty set, but it doesn't // seem like a likely use case, so we can add that code when we need it. assert(empty() && "Can only resize universe on an empty map"); // Hysteresis prevents needless reallocations. if (U >= Universe/4 && U <= Universe) return; // HLSL Change Begin: Use overridable operator new/delete delete[] Sparse; // The Sparse array doesn't actually need to be initialized, so malloc // would be enough here, but that will cause tools like valgrind to // complain about branching on uninitialized data. Sparse = new SparseT[U]; std::memset(Sparse, 0, U * sizeof(SparseT)); // HLSL Change End Universe = U; } // Import trivial vector stuff from DenseT. typedef typename DenseT::iterator iterator; typedef typename DenseT::const_iterator const_iterator; const_iterator begin() const { return Dense.begin(); } const_iterator end() const { return Dense.end(); } iterator begin() { return Dense.begin(); } iterator end() { return Dense.end(); } /// empty - Returns true if the set is empty. /// /// This is not the same as BitVector::empty(). /// bool empty() const { return Dense.empty(); } /// size - Returns the number of elements in the set. /// /// This is not the same as BitVector::size() which returns the size of the /// universe. /// size_type size() const { return Dense.size(); } /// clear - Clears the set. This is a very fast constant time operation. /// void clear() { // Sparse does not need to be cleared, see find(). Dense.clear(); } /// findIndex - Find an element by its index. /// /// @param Idx A valid index to find. /// @returns An iterator to the element identified by key, or end(). /// iterator findIndex(unsigned Idx) { assert(Idx < Universe && "Key out of range"); const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u; for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) { const unsigned FoundIdx = ValIndexOf(Dense[i]); assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?"); if (Idx == FoundIdx) return begin() + i; // Stride is 0 when SparseT >= unsigned. We don't need to loop. if (!Stride) break; } return end(); } /// find - Find an element by its key. /// /// @param Key A valid key to find. /// @returns An iterator to the element identified by key, or end(). /// iterator find(const KeyT &Key) { return findIndex(KeyIndexOf(Key)); } const_iterator find(const KeyT &Key) const { return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key)); } /// count - Returns 1 if this set contains an element identified by Key, /// 0 otherwise. /// size_type count(const KeyT &Key) const { return find(Key) == end() ? 0 : 1; } /// insert - Attempts to insert a new element. /// /// If Val is successfully inserted, return (I, true), where I is an iterator /// pointing to the newly inserted element. /// /// If the set already contains an element with the same key as Val, return /// (I, false), where I is an iterator pointing to the existing element. /// /// Insertion invalidates all iterators. /// std::pair<iterator, bool> insert(const ValueT &Val) { unsigned Idx = ValIndexOf(Val); iterator I = findIndex(Idx); if (I != end()) return std::make_pair(I, false); Sparse[Idx] = size(); Dense.push_back(Val); return std::make_pair(end() - 1, true); } /// array subscript - If an element already exists with this key, return it. /// Otherwise, automatically construct a new value from Key, insert it, /// and return the newly inserted element. ValueT &operator[](const KeyT &Key) { return *insert(ValueT(Key)).first; } /// erase - Erases an existing element identified by a valid iterator. /// /// This invalidates all iterators, but erase() returns an iterator pointing /// to the next element. This makes it possible to erase selected elements /// while iterating over the set: /// /// for (SparseSet::iterator I = Set.begin(); I != Set.end();) /// if (test(*I)) /// I = Set.erase(I); /// else /// ++I; /// /// Note that end() changes when elements are erased, unlike std::list. /// iterator erase(iterator I) { assert(unsigned(I - begin()) < size() && "Invalid iterator"); if (I != end() - 1) { *I = Dense.back(); unsigned BackIdx = ValIndexOf(Dense.back()); assert(BackIdx < Universe && "Invalid key in set. Did object mutate?"); Sparse[BackIdx] = I - begin(); } // This depends on SmallVector::pop_back() not invalidating iterators. // std::vector::pop_back() doesn't give that guarantee. Dense.pop_back(); return I; } /// erase - Erases an element identified by Key, if it exists. /// /// @param Key The key identifying the element to erase. /// @returns True when an element was erased, false if no element was found. /// bool erase(const KeyT &Key) { iterator I = find(Key); if (I == end()) return false; erase(I); return true; } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/IntrusiveRefCntPtr.h
//== llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer ---*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines IntrusiveRefCntPtr, a template class that // implements a "smart" pointer for objects that maintain their own // internal reference count, and RefCountedBase/RefCountedBaseVPTR, two // generic base classes for objects that wish to have their lifetimes // managed using reference counting. // // IntrusiveRefCntPtr is similar to Boost's intrusive_ptr with added // LLVM-style casting. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_INTRUSIVEREFCNTPTR_H #define LLVM_ADT_INTRUSIVEREFCNTPTR_H #include <atomic> #include <cassert> #include <cstddef> namespace llvm { template <class T> class IntrusiveRefCntPtr; //===----------------------------------------------------------------------===// /// RefCountedBase - A generic base class for objects that wish to /// have their lifetimes managed using reference counts. Classes /// subclass RefCountedBase to obtain such functionality, and are /// typically handled with IntrusiveRefCntPtr "smart pointers" (see below) /// which automatically handle the management of reference counts. /// Objects that subclass RefCountedBase should not be allocated on /// the stack, as invoking "delete" (which is called when the /// reference count hits 0) on such objects is an error. //===----------------------------------------------------------------------===// template <class Derived> class RefCountedBase { mutable unsigned ref_cnt; public: RefCountedBase() : ref_cnt(0) {} RefCountedBase(const RefCountedBase &) : ref_cnt(0) {} void Retain() const { ++ref_cnt; } void Release() const { assert (ref_cnt > 0 && "Reference count is already zero."); if (--ref_cnt == 0) delete static_cast<const Derived*>(this); } }; //===----------------------------------------------------------------------===// /// RefCountedBaseVPTR - A class that has the same function as /// RefCountedBase, but with a virtual destructor. Should be used /// instead of RefCountedBase for classes that already have virtual /// methods to enforce dynamic allocation via 'new'. Classes that /// inherit from RefCountedBaseVPTR can't be allocated on stack - /// attempting to do this will produce a compile error. //===----------------------------------------------------------------------===// class RefCountedBaseVPTR { mutable unsigned ref_cnt; virtual void anchor(); protected: RefCountedBaseVPTR() : ref_cnt(0) {} RefCountedBaseVPTR(const RefCountedBaseVPTR &) : ref_cnt(0) {} virtual ~RefCountedBaseVPTR() {} void Retain() const { ++ref_cnt; } void Release() const { assert (ref_cnt > 0 && "Reference count is already zero."); if (--ref_cnt == 0) delete this; } template <typename T> friend struct IntrusiveRefCntPtrInfo; }; template <typename T> struct IntrusiveRefCntPtrInfo { static void retain(T *obj) { obj->Retain(); } static void release(T *obj) { obj->Release(); } }; /// \brief A thread-safe version of \c llvm::RefCountedBase. /// /// A generic base class for objects that wish to have their lifetimes managed /// using reference counts. Classes subclass \c ThreadSafeRefCountedBase to /// obtain such functionality, and are typically handled with /// \c IntrusiveRefCntPtr "smart pointers" which automatically handle the /// management of reference counts. template <class Derived> class ThreadSafeRefCountedBase { mutable std::atomic<int> RefCount; protected: ThreadSafeRefCountedBase() : RefCount(0) {} public: void Retain() const { ++RefCount; } void Release() const { int NewRefCount = --RefCount; assert(NewRefCount >= 0 && "Reference count was already zero."); if (NewRefCount == 0) delete static_cast<const Derived*>(this); } }; //===----------------------------------------------------------------------===// /// IntrusiveRefCntPtr - A template class that implements a "smart pointer" /// that assumes the wrapped object has a reference count associated /// with it that can be managed via calls to /// IntrusivePtrAddRef/IntrusivePtrRelease. The smart pointers /// manage reference counts via the RAII idiom: upon creation of /// smart pointer the reference count of the wrapped object is /// incremented and upon destruction of the smart pointer the /// reference count is decremented. This class also safely handles /// wrapping NULL pointers. /// /// Reference counting is implemented via calls to /// Obj->Retain()/Obj->Release(). Release() is required to destroy /// the object when the reference count reaches zero. Inheriting from /// RefCountedBase/RefCountedBaseVPTR takes care of this /// automatically. //===----------------------------------------------------------------------===// template <typename T> class IntrusiveRefCntPtr { T* Obj; public: typedef T element_type; explicit IntrusiveRefCntPtr() : Obj(nullptr) {} IntrusiveRefCntPtr(T* obj) : Obj(obj) { retain(); } IntrusiveRefCntPtr(const IntrusiveRefCntPtr& S) : Obj(S.Obj) { retain(); } IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) { S.Obj = nullptr; } template <class X> IntrusiveRefCntPtr(IntrusiveRefCntPtr<X>&& S) : Obj(S.get()) { S.Obj = 0; } template <class X> IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S) : Obj(S.get()) { retain(); } IntrusiveRefCntPtr& operator=(IntrusiveRefCntPtr S) { swap(S); return *this; } ~IntrusiveRefCntPtr() { release(); } T& operator*() const { return *Obj; } T* operator->() const { return Obj; } T* get() const { return Obj; } explicit operator bool() const { return Obj != nullptr; } // HLSL Change void swap(IntrusiveRefCntPtr& other) { T* tmp = other.Obj; other.Obj = Obj; Obj = tmp; } void reset() { release(); Obj = nullptr; } void resetWithoutRelease() { Obj = 0; } private: void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); } void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); } template <typename X> friend class IntrusiveRefCntPtr; }; template<class T, class U> inline bool operator==(const IntrusiveRefCntPtr<T>& A, const IntrusiveRefCntPtr<U>& B) { return A.get() == B.get(); } template<class T, class U> inline bool operator!=(const IntrusiveRefCntPtr<T>& A, const IntrusiveRefCntPtr<U>& B) { return A.get() != B.get(); } template<class T, class U> inline bool operator==(const IntrusiveRefCntPtr<T>& A, U* B) { return A.get() == B; } template<class T, class U> inline bool operator!=(const IntrusiveRefCntPtr<T>& A, U* B) { return A.get() != B; } template<class T, class U> inline bool operator==(T* A, const IntrusiveRefCntPtr<U>& B) { return A == B.get(); } template<class T, class U> inline bool operator!=(T* A, const IntrusiveRefCntPtr<U>& B) { return A != B.get(); } template <class T> bool operator==(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) { return !B; } template <class T> bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) { return B == A; } template <class T> bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) { return !(A == B); } template <class T> bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) { return !(A == B); } //===----------------------------------------------------------------------===// // LLVM-style downcasting support for IntrusiveRefCntPtr objects // // /////////////////////////////////////////////////////////////////////////////// template <typename From> struct simplify_type; template<class T> struct simplify_type<IntrusiveRefCntPtr<T> > { typedef T* SimpleType; static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T>& Val) { return Val.get(); } }; template<class T> struct simplify_type<const IntrusiveRefCntPtr<T> > { typedef /*const*/ T* SimpleType; static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T>& Val) { return Val.get(); } }; } // end namespace llvm #endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/BitVector.h
//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the BitVector class. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_BITVECTOR_H #define LLVM_ADT_BITVECTOR_H #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include <algorithm> #include <cassert> #include <climits> #include <cstdlib> namespace llvm { class BitVector { typedef unsigned long BitWord; enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT }; static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32, "Unsupported word size"); BitWord *Bits; // Actual bits. unsigned Size; // Size of bitvector in bits. unsigned Capacity; // Size of allocated memory in BitWord. public: typedef unsigned size_type; // Encapsulation of a single bit. class reference { friend class BitVector; BitWord *WordRef; unsigned BitPos; reference(); // Undefined public: reference(BitVector &b, unsigned Idx) { WordRef = &b.Bits[Idx / BITWORD_SIZE]; BitPos = Idx % BITWORD_SIZE; } reference(const reference&) = default; reference &operator=(reference t) { *this = bool(t); return *this; } reference& operator=(bool t) { if (t) *WordRef |= BitWord(1) << BitPos; else *WordRef &= ~(BitWord(1) << BitPos); return *this; } operator bool() const { return ((*WordRef) & (BitWord(1) << BitPos)) ? true : false; } }; /// BitVector default ctor - Creates an empty bitvector. BitVector() : Size(0), Capacity(0) { Bits = nullptr; } /// BitVector ctor - Creates a bitvector of specified number of bits. All /// bits are initialized to the specified value. explicit BitVector(unsigned s, bool t = false) : Size(s) { Capacity = NumBitWords(s); Bits = new BitWord[Capacity]; // HLSL Change: Use overridable operator new init_words(Bits, Capacity, t); if (t) clear_unused_bits(); } /// BitVector copy ctor. BitVector(const BitVector &RHS) : Size(RHS.size()) { if (Size == 0) { Bits = nullptr; Capacity = 0; return; } Capacity = NumBitWords(RHS.size()); Bits = new BitWord[Capacity]; // HLSL Change: Use overridable operator new std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord)); } BitVector(BitVector &&RHS) : Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) { RHS.Bits = nullptr; } ~BitVector() { delete[] Bits; // HLSL Change: Use overridable operator new } /// empty - Tests whether there are no bits in this bitvector. bool empty() const { return Size == 0; } /// size - Returns the number of bits in this bitvector. size_type size() const { return Size; } /// count - Returns the number of bits which are set. size_type count() const { unsigned NumBits = 0; for (unsigned i = 0; i < NumBitWords(size()); ++i) NumBits += countPopulation(Bits[i]); return NumBits; } /// any - Returns true if any bit is set. bool any() const { for (unsigned i = 0; i < NumBitWords(size()); ++i) if (Bits[i] != 0) return true; return false; } /// all - Returns true if all bits are set. bool all() const { for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i) if (Bits[i] != ~0UL) return false; // If bits remain check that they are ones. The unused bits are always zero. if (unsigned Remainder = Size % BITWORD_SIZE) return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1; return true; } /// none - Returns true if none of the bits are set. bool none() const { return !any(); } /// find_first - Returns the index of the first set bit, -1 if none /// of the bits are set. int find_first() const { for (unsigned i = 0; i < NumBitWords(size()); ++i) if (Bits[i] != 0) return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); return -1; } /// find_next - Returns the index of the next set bit following the /// "Prev" bit. Returns -1 if the next set bit is not found. int find_next(unsigned Prev) const { ++Prev; if (Prev >= Size) return -1; unsigned WordPos = Prev / BITWORD_SIZE; unsigned BitPos = Prev % BITWORD_SIZE; BitWord Copy = Bits[WordPos]; // Mask off previous bits. Copy &= ~0UL << BitPos; if (Copy != 0) return WordPos * BITWORD_SIZE + countTrailingZeros(Copy); // Check subsequent words. for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i) if (Bits[i] != 0) return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); return -1; } /// clear - Clear all bits. void clear() { Size = 0; } /// resize - Grow or shrink the bitvector. void resize(unsigned N, bool t = false) { if (N > Capacity * BITWORD_SIZE) { unsigned OldCapacity = Capacity; grow(N); init_words(&Bits[OldCapacity], (Capacity-OldCapacity), t); } // Set any old unused bits that are now included in the BitVector. This // may set bits that are not included in the new vector, but we will clear // them back out below. if (N > Size) set_unused_bits(t); // Update the size, and clear out any bits that are now unused unsigned OldSize = Size; Size = N; if (t || N < OldSize) clear_unused_bits(); } void reserve(unsigned N) { if (N > Capacity * BITWORD_SIZE) grow(N); } // Set, reset, flip BitVector &set() { init_words(Bits, Capacity, true); clear_unused_bits(); return *this; } BitVector &set(unsigned Idx) { assert(Bits && "Bits never allocated"); Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE); return *this; } /// set - Efficiently set a range of bits in [I, E) BitVector &set(unsigned I, unsigned E) { assert(I <= E && "Attempted to set backwards range!"); assert(E <= size() && "Attempted to set out-of-bounds range!"); if (I == E) return *this; if (I / BITWORD_SIZE == E / BITWORD_SIZE) { BitWord EMask = 1UL << (E % BITWORD_SIZE); BitWord IMask = 1UL << (I % BITWORD_SIZE); BitWord Mask = EMask - IMask; Bits[I / BITWORD_SIZE] |= Mask; return *this; } BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE); Bits[I / BITWORD_SIZE] |= PrefixMask; I = RoundUpToAlignment(I, BITWORD_SIZE); for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE) Bits[I / BITWORD_SIZE] = ~0UL; BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1; if (I < E) Bits[I / BITWORD_SIZE] |= PostfixMask; return *this; } BitVector &reset() { init_words(Bits, Capacity, false); return *this; } BitVector &reset(unsigned Idx) { Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE)); return *this; } /// reset - Efficiently reset a range of bits in [I, E) BitVector &reset(unsigned I, unsigned E) { assert(I <= E && "Attempted to reset backwards range!"); assert(E <= size() && "Attempted to reset out-of-bounds range!"); if (I == E) return *this; if (I / BITWORD_SIZE == E / BITWORD_SIZE) { BitWord EMask = 1UL << (E % BITWORD_SIZE); BitWord IMask = 1UL << (I % BITWORD_SIZE); BitWord Mask = EMask - IMask; Bits[I / BITWORD_SIZE] &= ~Mask; return *this; } BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE); Bits[I / BITWORD_SIZE] &= ~PrefixMask; I = RoundUpToAlignment(I, BITWORD_SIZE); for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE) Bits[I / BITWORD_SIZE] = 0UL; BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1; if (I < E) Bits[I / BITWORD_SIZE] &= ~PostfixMask; return *this; } BitVector &flip() { for (unsigned i = 0; i < NumBitWords(size()); ++i) Bits[i] = ~Bits[i]; clear_unused_bits(); return *this; } BitVector &flip(unsigned Idx) { Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE); return *this; } // Indexing. reference operator[](unsigned Idx) { assert (Idx < Size && "Out-of-bounds Bit access."); return reference(*this, Idx); } bool operator[](unsigned Idx) const { assert (Idx < Size && "Out-of-bounds Bit access."); BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE); return (Bits[Idx / BITWORD_SIZE] & Mask) != 0; } bool test(unsigned Idx) const { return (*this)[Idx]; } /// Test if any common bits are set. bool anyCommon(const BitVector &RHS) const { unsigned ThisWords = NumBitWords(size()); unsigned RHSWords = NumBitWords(RHS.size()); for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i) if (Bits[i] & RHS.Bits[i]) return true; return false; } // Comparison operators. bool operator==(const BitVector &RHS) const { unsigned ThisWords = NumBitWords(size()); unsigned RHSWords = NumBitWords(RHS.size()); unsigned i; for (i = 0; i != std::min(ThisWords, RHSWords); ++i) if (Bits[i] != RHS.Bits[i]) return false; // Verify that any extra words are all zeros. if (i != ThisWords) { for (; i != ThisWords; ++i) if (Bits[i]) return false; } else if (i != RHSWords) { for (; i != RHSWords; ++i) if (RHS.Bits[i]) return false; } return true; } bool operator!=(const BitVector &RHS) const { return !(*this == RHS); } /// Intersection, union, disjoint union. BitVector &operator&=(const BitVector &RHS) { unsigned ThisWords = NumBitWords(size()); unsigned RHSWords = NumBitWords(RHS.size()); unsigned i; for (i = 0; i != std::min(ThisWords, RHSWords); ++i) Bits[i] &= RHS.Bits[i]; // Any bits that are just in this bitvector become zero, because they aren't // in the RHS bit vector. Any words only in RHS are ignored because they // are already zero in the LHS. for (; i != ThisWords; ++i) Bits[i] = 0; return *this; } /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS. BitVector &reset(const BitVector &RHS) { unsigned ThisWords = NumBitWords(size()); unsigned RHSWords = NumBitWords(RHS.size()); unsigned i; for (i = 0; i != std::min(ThisWords, RHSWords); ++i) Bits[i] &= ~RHS.Bits[i]; return *this; } /// test - Check if (This - RHS) is zero. /// This is the same as reset(RHS) and any(). bool test(const BitVector &RHS) const { unsigned ThisWords = NumBitWords(size()); unsigned RHSWords = NumBitWords(RHS.size()); unsigned i; for (i = 0; i != std::min(ThisWords, RHSWords); ++i) if ((Bits[i] & ~RHS.Bits[i]) != 0) return true; for (; i != ThisWords ; ++i) if (Bits[i] != 0) return true; return false; } BitVector &operator|=(const BitVector &RHS) { if (size() < RHS.size()) resize(RHS.size()); for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i) Bits[i] |= RHS.Bits[i]; return *this; } BitVector &operator^=(const BitVector &RHS) { if (size() < RHS.size()) resize(RHS.size()); for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i) Bits[i] ^= RHS.Bits[i]; return *this; } // Assignment operator. const BitVector &operator=(const BitVector &RHS) { if (this == &RHS) return *this; Size = RHS.size(); unsigned RHSWords = NumBitWords(Size); if (Size <= Capacity * BITWORD_SIZE) { if (Size) std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord)); clear_unused_bits(); return *this; } // Grow the bitvector to have enough elements. Capacity = RHSWords; assert(Capacity > 0 && "negative capacity?"); BitWord *NewBits = new BitWord[Capacity]; // HLSL Change: Use overridable operator new std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord)); // Destroy the old bits. delete[] Bits; // HLSL Change: Use overridable operator delete Bits = NewBits; return *this; } const BitVector &operator=(BitVector &&RHS) { if (this == &RHS) return *this; delete[] Bits; // HLSL Change: Use overridable operator delete Bits = RHS.Bits; Size = RHS.Size; Capacity = RHS.Capacity; RHS.Bits = nullptr; return *this; } void swap(BitVector &RHS) { std::swap(Bits, RHS.Bits); std::swap(Size, RHS.Size); std::swap(Capacity, RHS.Capacity); } //===--------------------------------------------------------------------===// // Portable bit mask operations. //===--------------------------------------------------------------------===// // // These methods all operate on arrays of uint32_t, each holding 32 bits. The // fixed word size makes it easier to work with literal bit vector constants // in portable code. // // The LSB in each word is the lowest numbered bit. The size of a portable // bit mask is always a whole multiple of 32 bits. If no bit mask size is // given, the bit mask is assumed to cover the entire BitVector. /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize. /// This computes "*this |= Mask". void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { applyMask<true, false>(Mask, MaskWords); } /// clearBitsInMask - Clear any bits in this vector that are set in Mask. /// Don't resize. This computes "*this &= ~Mask". void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { applyMask<false, false>(Mask, MaskWords); } /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask. /// Don't resize. This computes "*this |= ~Mask". void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { applyMask<true, true>(Mask, MaskWords); } /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask. /// Don't resize. This computes "*this &= Mask". void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { applyMask<false, true>(Mask, MaskWords); } private: unsigned NumBitWords(unsigned S) const { return (S + BITWORD_SIZE-1) / BITWORD_SIZE; } // Set the unused bits in the high words. void set_unused_bits(bool t = true) { // Set high words first. unsigned UsedWords = NumBitWords(Size); if (Capacity > UsedWords) init_words(&Bits[UsedWords], (Capacity-UsedWords), t); // Then set any stray high bits of the last used word. unsigned ExtraBits = Size % BITWORD_SIZE; if (ExtraBits) { BitWord ExtraBitMask = ~0UL << ExtraBits; if (t) Bits[UsedWords-1] |= ExtraBitMask; else Bits[UsedWords-1] &= ~ExtraBitMask; } } // Clear the unused bits in the high words. void clear_unused_bits() { set_unused_bits(false); } void grow(unsigned NewSize) { Capacity = std::max(NumBitWords(NewSize), Capacity * 2); assert(Capacity > 0 && "realloc-ing zero space"); // HLSL Change Starts: Use overridable operator new // Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord)); BitWord *newBits = new BitWord[Capacity]; if (Bits != nullptr) { std::memcpy(newBits, Bits, NumBitWords(Size) * sizeof(BitWord)); delete[] Bits; } Bits = newBits; // HLSL Change Ends clear_unused_bits(); } void init_words(BitWord *B, unsigned NumWords, bool t) { memset(B, 0 - (int)t, NumWords*sizeof(BitWord)); } template<bool AddBits, bool InvertMask> void applyMask(const uint32_t *Mask, unsigned MaskWords) { static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size."); MaskWords = std::min(MaskWords, (size() + 31) / 32); const unsigned Scale = BITWORD_SIZE / 32; unsigned i; for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) { BitWord BW = Bits[i]; // This inner loop should unroll completely when BITWORD_SIZE > 32. for (unsigned b = 0; b != BITWORD_SIZE; b += 32) { uint32_t M = *Mask++; if (InvertMask) M = ~M; if (AddBits) BW |= BitWord(M) << b; else BW &= ~(BitWord(M) << b); } Bits[i] = BW; } for (unsigned b = 0; MaskWords; b += 32, --MaskWords) { uint32_t M = *Mask++; if (InvertMask) M = ~M; if (AddBits) Bits[i] |= BitWord(M) << b; else Bits[i] &= ~(BitWord(M) << b); } if (AddBits) clear_unused_bits(); } }; } // End llvm namespace namespace std { /// Implement std::swap in terms of BitVector swap. inline void swap(llvm::BitVector &LHS, llvm::BitVector &RHS) { LHS.swap(RHS); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/None.h
//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides None, an enumerator for use in implicit constructors // of various (usually templated) types to make such construction more // terse. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_NONE_H #define LLVM_ADT_NONE_H namespace llvm { /// \brief A simple null object to allow implicit construction of Optional<T> /// and similar types without having to spell out the specialization's name. enum class NoneType { None }; const NoneType None = None; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SetVector.h
//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a set that has insertion order iteration // characteristics. This is useful for keeping a set of things that need to be // visited later but in a deterministic order (insertion order). The interface // is purposefully minimal. // // This file defines SetVector and SmallSetVector, which performs no allocations // if the SetVector has less than a certain number of elements. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SETVECTOR_H #define LLVM_ADT_SETVECTOR_H #include "llvm/ADT/SmallSet.h" #include <algorithm> #include <cassert> #include <vector> namespace llvm { /// \brief A vector that has set insertion semantics. /// /// This adapter class provides a way to keep a set of things that also has the /// property of a deterministic iteration order. The order of iteration is the /// order of insertion. template <typename T, typename Vector = std::vector<T>, typename Set = SmallSet<T, 16> > class SetVector { public: typedef T value_type; typedef T key_type; typedef T& reference; typedef const T& const_reference; typedef Set set_type; typedef Vector vector_type; typedef typename vector_type::const_iterator iterator; typedef typename vector_type::const_iterator const_iterator; typedef typename vector_type::size_type size_type; /// \brief Construct an empty SetVector SetVector() {} /// \brief Initialize a SetVector with a range of elements template<typename It> SetVector(It Start, It End) { insert(Start, End); } /// \brief Determine if the SetVector is empty or not. bool empty() const { return vector_.empty(); } /// \brief Determine the number of elements in the SetVector. size_type size() const { return vector_.size(); } /// \brief Get an iterator to the beginning of the SetVector. iterator begin() { return vector_.begin(); } /// \brief Get a const_iterator to the beginning of the SetVector. const_iterator begin() const { return vector_.begin(); } /// \brief Get an iterator to the end of the SetVector. iterator end() { return vector_.end(); } /// \brief Get a const_iterator to the end of the SetVector. const_iterator end() const { return vector_.end(); } /// \brief Return the last element of the SetVector. const T &back() const { assert(!empty() && "Cannot call back() on empty SetVector!"); return vector_.back(); } /// \brief Index into the SetVector. const_reference operator[](size_type n) const { assert(n < vector_.size() && "SetVector access out of range!"); return vector_[n]; } /// \brief Insert a new element into the SetVector. /// \returns true iff the element was inserted into the SetVector. bool insert(const value_type &X) { bool result = set_.insert(X).second; if (result) vector_.push_back(X); return result; } /// \brief Insert a range of elements into the SetVector. template<typename It> void insert(It Start, It End) { for (; Start != End; ++Start) if (set_.insert(*Start).second) vector_.push_back(*Start); } /// \brief Remove an item from the set vector. bool remove(const value_type& X) { if (set_.erase(X)) { typename vector_type::iterator I = std::find(vector_.begin(), vector_.end(), X); assert(I != vector_.end() && "Corrupted SetVector instances!"); vector_.erase(I); return true; } return false; } /// \brief Remove items from the set vector based on a predicate function. /// /// This is intended to be equivalent to the following code, if we could /// write it: /// /// \code /// V.erase(std::remove_if(V.begin(), V.end(), P), V.end()); /// \endcode /// /// However, SetVector doesn't expose non-const iterators, making any /// algorithm like remove_if impossible to use. /// /// \returns true if any element is removed. template <typename UnaryPredicate> bool remove_if(UnaryPredicate P) { typename vector_type::iterator I = std::remove_if(vector_.begin(), vector_.end(), TestAndEraseFromSet<UnaryPredicate>(P, set_)); if (I == vector_.end()) return false; vector_.erase(I, vector_.end()); return true; } /// \brief Count the number of elements of a given key in the SetVector. /// \returns 0 if the element is not in the SetVector, 1 if it is. size_type count(const key_type &key) const { return set_.count(key); } /// \brief Completely clear the SetVector void clear() { set_.clear(); vector_.clear(); } /// \brief Remove the last element of the SetVector. void pop_back() { assert(!empty() && "Cannot remove an element from an empty SetVector!"); set_.erase(back()); vector_.pop_back(); } T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() { T Ret = back(); pop_back(); return Ret; } bool operator==(const SetVector &that) const { return vector_ == that.vector_; } bool operator!=(const SetVector &that) const { return vector_ != that.vector_; } private: /// \brief A wrapper predicate designed for use with std::remove_if. /// /// This predicate wraps a predicate suitable for use with std::remove_if to /// call set_.erase(x) on each element which is slated for removal. template <typename UnaryPredicate> class TestAndEraseFromSet { UnaryPredicate P; set_type &set_; public: TestAndEraseFromSet(UnaryPredicate P, set_type &set_) : P(P), set_(set_) {} template <typename ArgumentT> bool operator()(const ArgumentT &Arg) { if (P(Arg)) { set_.erase(Arg); return true; } return false; } }; set_type set_; ///< The set. vector_type vector_; ///< The vector. }; /// \brief A SetVector that performs no allocations if smaller than /// a certain size. template <typename T, unsigned N> class SmallSetVector : public SetVector<T, SmallVector<T, N>, SmallSet<T, N> > { public: SmallSetVector() {} /// \brief Initialize a SmallSetVector with a range of elements template<typename It> SmallSetVector(It Start, It End) { this->insert(Start, End); } }; } // End llvm namespace // vim: sw=2 ai #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/DenseMapInfo.h
//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines DenseMapInfo traits for DenseMap. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_DENSEMAPINFO_H #define LLVM_ADT_DENSEMAPINFO_H #include "llvm/ADT/Hashing.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/PointerLikeTypeTraits.h" #include "llvm/Support/type_traits.h" namespace llvm { template<typename T> struct DenseMapInfo { //static inline T getEmptyKey(); //static inline T getTombstoneKey(); //static unsigned getHashValue(const T &Val); //static bool isEqual(const T &LHS, const T &RHS); }; // Provide DenseMapInfo for all pointers. template<typename T> struct DenseMapInfo<T*> { static inline T* getEmptyKey() { uintptr_t Val = static_cast<uintptr_t>(-1); Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable; return reinterpret_cast<T*>(Val); } static inline T* getTombstoneKey() { uintptr_t Val = static_cast<uintptr_t>(-2); Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable; return reinterpret_cast<T*>(Val); } static unsigned getHashValue(const T *PtrVal) { return (unsigned((uintptr_t)PtrVal) >> 4) ^ (unsigned((uintptr_t)PtrVal) >> 9); } static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for chars. template<> struct DenseMapInfo<char> { static inline char getEmptyKey() { return ~0; } static inline char getTombstoneKey() { return ~0 - 1; } static unsigned getHashValue(const char& Val) { return Val * 37U; } static bool isEqual(const char &LHS, const char &RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for unsigned ints. template<> struct DenseMapInfo<unsigned> { static inline unsigned getEmptyKey() { return ~0U; } static inline unsigned getTombstoneKey() { return ~0U - 1; } static unsigned getHashValue(const unsigned& Val) { return Val * 37U; } static bool isEqual(const unsigned& LHS, const unsigned& RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for unsigned longs. template<> struct DenseMapInfo<unsigned long> { static inline unsigned long getEmptyKey() { return ~0UL; } static inline unsigned long getTombstoneKey() { return ~0UL - 1L; } static unsigned getHashValue(const unsigned long& Val) { return (unsigned)(Val * 37UL); } static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for unsigned long longs. template<> struct DenseMapInfo<unsigned long long> { static inline unsigned long long getEmptyKey() { return ~0ULL; } static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; } static unsigned getHashValue(const unsigned long long& Val) { return (unsigned)(Val * 37ULL); } static bool isEqual(const unsigned long long& LHS, const unsigned long long& RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for ints. template<> struct DenseMapInfo<int> { static inline int getEmptyKey() { return 0x7fffffff; } static inline int getTombstoneKey() { return -0x7fffffff - 1; } static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); } static bool isEqual(const int& LHS, const int& RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for longs. template<> struct DenseMapInfo<long> { static inline long getEmptyKey() { return (1UL << (sizeof(long) * 8 - 1)) - 1UL; } static inline long getTombstoneKey() { return getEmptyKey() - 1L; } static unsigned getHashValue(const long& Val) { return (unsigned)(Val * 37UL); } static bool isEqual(const long& LHS, const long& RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for long longs. template<> struct DenseMapInfo<long long> { static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; } static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; } static unsigned getHashValue(const long long& Val) { return (unsigned)(Val * 37ULL); } static bool isEqual(const long long& LHS, const long long& RHS) { return LHS == RHS; } }; // Provide DenseMapInfo for all pairs whose members have info. template<typename T, typename U> struct DenseMapInfo<std::pair<T, U> > { typedef std::pair<T, U> Pair; typedef DenseMapInfo<T> FirstInfo; typedef DenseMapInfo<U> SecondInfo; static inline Pair getEmptyKey() { return std::make_pair(FirstInfo::getEmptyKey(), SecondInfo::getEmptyKey()); } static inline Pair getTombstoneKey() { return std::make_pair(FirstInfo::getTombstoneKey(), SecondInfo::getTombstoneKey()); } static unsigned getHashValue(const Pair& PairVal) { uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32 | (uint64_t)SecondInfo::getHashValue(PairVal.second); key += ~(key << 32); key ^= (key >> 22); key += ~(key << 13); key ^= (key >> 8); key += (key << 3); key ^= (key >> 15); key += ~(key << 27); key ^= (key >> 31); return (unsigned)key; } static bool isEqual(const Pair &LHS, const Pair &RHS) { return FirstInfo::isEqual(LHS.first, RHS.first) && SecondInfo::isEqual(LHS.second, RHS.second); } }; // Provide DenseMapInfo for StringRefs. template <> struct DenseMapInfo<StringRef> { static inline StringRef getEmptyKey() { return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)), 0); } static inline StringRef getTombstoneKey() { return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)), 0); } static unsigned getHashValue(StringRef Val) { assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!"); assert(Val.data() != getTombstoneKey().data() && "Cannot hash the tombstone key!"); return (unsigned)(hash_value(Val)); } static bool isEqual(StringRef LHS, StringRef RHS) { if (RHS.data() == getEmptyKey().data()) return LHS.data() == getEmptyKey().data(); if (RHS.data() == getTombstoneKey().data()) return LHS.data() == getTombstoneKey().data(); return LHS == RHS; } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SCCIterator.h
//===---- ADT/SCCIterator.h - Strongly Connected Comp. Iter. ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// /// This builds on the llvm/ADT/GraphTraits.h file to find the strongly /// connected components (SCCs) of a graph in O(N+E) time using Tarjan's DFS /// algorithm. /// /// The SCC iterator has the important property that if a node in SCC S1 has an /// edge to a node in SCC S2, then it visits S1 *after* S2. /// /// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. (NOTE: /// This requires some simple wrappers and is not supported yet.) /// //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SCCITERATOR_H #define LLVM_ADT_SCCITERATOR_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/iterator.h" #include <vector> namespace llvm { /// \brief Enumerate the SCCs of a directed graph in reverse topological order /// of the SCC DAG. /// /// This is implemented using Tarjan's DFS algorithm using an internal stack to /// build up a vector of nodes in a particular SCC. Note that it is a forward /// iterator and thus you cannot backtrack or re-visit nodes. template <class GraphT, class GT = GraphTraits<GraphT>> class scc_iterator : public iterator_facade_base< scc_iterator<GraphT, GT>, std::forward_iterator_tag, const std::vector<typename GT::NodeType *>, ptrdiff_t> { typedef typename GT::NodeType NodeType; typedef typename GT::ChildIteratorType ChildItTy; typedef std::vector<NodeType *> SccTy; typedef typename scc_iterator::reference reference; /// Element of VisitStack during DFS. struct StackElement { NodeType *Node; ///< The current node pointer. ChildItTy NextChild; ///< The next child, modified inplace during DFS. unsigned MinVisited; ///< Minimum uplink value of all children of Node. StackElement(NodeType *Node, const ChildItTy &Child, unsigned Min) : Node(Node), NextChild(Child), MinVisited(Min) {} bool operator==(const StackElement &Other) const { return Node == Other.Node && NextChild == Other.NextChild && MinVisited == Other.MinVisited; } }; /// The visit counters used to detect when a complete SCC is on the stack. /// visitNum is the global counter. /// /// nodeVisitNumbers are per-node visit numbers, also used as DFS flags. unsigned visitNum; DenseMap<NodeType *, unsigned> nodeVisitNumbers; /// Stack holding nodes of the SCC. std::vector<NodeType *> SCCNodeStack; /// The current SCC, retrieved using operator*(). SccTy CurrentSCC; /// DFS stack, Used to maintain the ordering. The top contains the current /// node, the next child to visit, and the minimum uplink value of all child std::vector<StackElement> VisitStack; /// A single "visit" within the non-recursive DFS traversal. void DFSVisitOne(NodeType *N); /// The stack-based DFS traversal; defined below. void DFSVisitChildren(); /// Compute the next SCC using the DFS traversal. void GetNextSCC(); scc_iterator(NodeType *entryN) : visitNum(0) { DFSVisitOne(entryN); GetNextSCC(); } /// End is when the DFS stack is empty. scc_iterator() {} public: static scc_iterator begin(const GraphT &G) { return scc_iterator(GT::getEntryNode(G)); } static scc_iterator end(const GraphT &) { return scc_iterator(); } /// \brief Direct loop termination test which is more efficient than /// comparison with \c end(). bool isAtEnd() const { assert(!CurrentSCC.empty() || VisitStack.empty()); return CurrentSCC.empty(); } bool operator==(const scc_iterator &x) const { return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC; } scc_iterator &operator++() { GetNextSCC(); return *this; } reference operator*() const { assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!"); return CurrentSCC; } /// \brief Test if the current SCC has a loop. /// /// If the SCC has more than one node, this is trivially true. If not, it may /// still contain a loop if the node has an edge back to itself. bool hasLoop() const; /// This informs the \c scc_iterator that the specified \c Old node /// has been deleted, and \c New is to be used in its place. void ReplaceNode(NodeType *Old, NodeType *New) { assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?"); nodeVisitNumbers[New] = nodeVisitNumbers[Old]; nodeVisitNumbers.erase(Old); } }; template <class GraphT, class GT> void scc_iterator<GraphT, GT>::DFSVisitOne(NodeType *N) { ++visitNum; nodeVisitNumbers[N] = visitNum; SCCNodeStack.push_back(N); VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum)); #if 0 // Enable if needed when debugging. dbgs() << "TarjanSCC: Node " << N << " : visitNum = " << visitNum << "\n"; #endif } template <class GraphT, class GT> void scc_iterator<GraphT, GT>::DFSVisitChildren() { assert(!VisitStack.empty()); while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) { // TOS has at least one more child so continue DFS NodeType *childN = *VisitStack.back().NextChild++; typename DenseMap<NodeType *, unsigned>::iterator Visited = nodeVisitNumbers.find(childN); if (Visited == nodeVisitNumbers.end()) { // this node has never been seen. DFSVisitOne(childN); continue; } unsigned childNum = Visited->second; if (VisitStack.back().MinVisited > childNum) VisitStack.back().MinVisited = childNum; } } template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() { CurrentSCC.clear(); // Prepare to compute the next SCC while (!VisitStack.empty()) { DFSVisitChildren(); // Pop the leaf on top of the VisitStack. NodeType *visitingN = VisitStack.back().Node; unsigned minVisitNum = VisitStack.back().MinVisited; assert(VisitStack.back().NextChild == GT::child_end(visitingN)); VisitStack.pop_back(); // Propagate MinVisitNum to parent so we can detect the SCC starting node. if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum) VisitStack.back().MinVisited = minVisitNum; #if 0 // Enable if needed when debugging. dbgs() << "TarjanSCC: Popped node " << visitingN << " : minVisitNum = " << minVisitNum << "; Node visit num = " << nodeVisitNumbers[visitingN] << "\n"; #endif if (minVisitNum != nodeVisitNumbers[visitingN]) continue; // A full SCC is on the SCCNodeStack! It includes all nodes below // visitingN on the stack. Copy those nodes to CurrentSCC, // reset their minVisit values, and return (this suspends // the DFS traversal till the next ++). do { CurrentSCC.push_back(SCCNodeStack.back()); SCCNodeStack.pop_back(); nodeVisitNumbers[CurrentSCC.back()] = ~0U; } while (CurrentSCC.back() != visitingN); return; } } template <class GraphT, class GT> bool scc_iterator<GraphT, GT>::hasLoop() const { assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!"); if (CurrentSCC.size() > 1) return true; NodeType *N = CurrentSCC.front(); for (ChildItTy CI = GT::child_begin(N), CE = GT::child_end(N); CI != CE; ++CI) if (*CI == N) return true; return false; } /// \brief Construct the begin iterator for a deduced graph type T. template <class T> scc_iterator<T> scc_begin(const T &G) { return scc_iterator<T>::begin(G); } /// \brief Construct the end iterator for a deduced graph type T. template <class T> scc_iterator<T> scc_end(const T &G) { return scc_iterator<T>::end(G); } /// \brief Construct the begin iterator for a deduced graph type T's Inverse<T>. template <class T> scc_iterator<Inverse<T> > scc_begin(const Inverse<T> &G) { return scc_iterator<Inverse<T> >::begin(G); } /// \brief Construct the end iterator for a deduced graph type T's Inverse<T>. template <class T> scc_iterator<Inverse<T> > scc_end(const Inverse<T> &G) { return scc_iterator<Inverse<T> >::end(G); } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/ImmutableMap.h
//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ImmutableMap class. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_IMMUTABLEMAP_H #define LLVM_ADT_IMMUTABLEMAP_H #include "llvm/ADT/ImmutableSet.h" namespace llvm { /// ImutKeyValueInfo -Traits class used by ImmutableMap. While both the first /// and second elements in a pair are used to generate profile information, /// only the first element (the key) is used by isEqual and isLess. template <typename T, typename S> struct ImutKeyValueInfo { typedef const std::pair<T,S> value_type; typedef const value_type& value_type_ref; typedef const T key_type; typedef const T& key_type_ref; typedef const S data_type; typedef const S& data_type_ref; static inline key_type_ref KeyOfValue(value_type_ref V) { return V.first; } static inline data_type_ref DataOfValue(value_type_ref V) { return V.second; } static inline bool isEqual(key_type_ref L, key_type_ref R) { return ImutContainerInfo<T>::isEqual(L,R); } static inline bool isLess(key_type_ref L, key_type_ref R) { return ImutContainerInfo<T>::isLess(L,R); } static inline bool isDataEqual(data_type_ref L, data_type_ref R) { return ImutContainerInfo<S>::isEqual(L,R); } static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) { ImutContainerInfo<T>::Profile(ID, V.first); ImutContainerInfo<S>::Profile(ID, V.second); } }; template <typename KeyT, typename ValT, typename ValInfo = ImutKeyValueInfo<KeyT,ValT> > class ImmutableMap { public: typedef typename ValInfo::value_type value_type; typedef typename ValInfo::value_type_ref value_type_ref; typedef typename ValInfo::key_type key_type; typedef typename ValInfo::key_type_ref key_type_ref; typedef typename ValInfo::data_type data_type; typedef typename ValInfo::data_type_ref data_type_ref; typedef ImutAVLTree<ValInfo> TreeTy; protected: TreeTy* Root; public: /// Constructs a map from a pointer to a tree root. In general one /// should use a Factory object to create maps instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) { if (Root) { Root->retain(); } } ImmutableMap(const ImmutableMap &X) : Root(X.Root) { if (Root) { Root->retain(); } } ImmutableMap &operator=(const ImmutableMap &X) { if (Root != X.Root) { if (X.Root) { X.Root->retain(); } if (Root) { Root->release(); } Root = X.Root; } return *this; } ~ImmutableMap() { if (Root) { Root->release(); } } class Factory { typename TreeTy::Factory F; const bool Canonicalize; public: Factory(bool canonicalize = true) : Canonicalize(canonicalize) {} Factory(BumpPtrAllocator& Alloc, bool canonicalize = true) : F(Alloc), Canonicalize(canonicalize) {} ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); } ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) { TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D)); return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T); } ImmutableMap remove(ImmutableMap Old, key_type_ref K) { TreeTy *T = F.remove(Old.Root,K); return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T); } typename TreeTy::Factory *getTreeFactory() const { return const_cast<typename TreeTy::Factory *>(&F); } private: Factory(const Factory& RHS) = delete; void operator=(const Factory& RHS) = delete; }; bool contains(key_type_ref K) const { return Root ? Root->contains(K) : false; } bool operator==(const ImmutableMap &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; } bool operator!=(const ImmutableMap &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; } TreeTy *getRoot() const { if (Root) { Root->retain(); } return Root; } TreeTy *getRootWithoutRetain() const { return Root; } void manualRetain() { if (Root) Root->retain(); } void manualRelease() { if (Root) Root->release(); } bool isEmpty() const { return !Root; } //===--------------------------------------------------===// // Foreach - A limited form of map iteration. //===--------------------------------------------------===// private: template <typename Callback> struct CBWrapper { Callback C; void operator()(value_type_ref V) { C(V.first,V.second); } }; template <typename Callback> struct CBWrapperRef { Callback &C; CBWrapperRef(Callback& c) : C(c) {} void operator()(value_type_ref V) { C(V.first,V.second); } }; public: template <typename Callback> void foreach(Callback& C) { if (Root) { CBWrapperRef<Callback> CB(C); Root->foreach(CB); } } template <typename Callback> void foreach() { if (Root) { CBWrapper<Callback> CB; Root->foreach(CB); } } //===--------------------------------------------------===// // For testing. //===--------------------------------------------------===// void verify() const { if (Root) Root->verify(); } //===--------------------------------------------------===// // Iterators. //===--------------------------------------------------===// class iterator : public ImutAVLValueIterator<ImmutableMap> { iterator() = default; explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {} friend class ImmutableMap; public: key_type_ref getKey() const { return (*this)->first; } data_type_ref getData() const { return (*this)->second; } }; iterator begin() const { return iterator(Root); } iterator end() const { return iterator(); } data_type* lookup(key_type_ref K) const { if (Root) { TreeTy* T = Root->find(K); if (T) return &T->getValue().second; } return nullptr; } /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for /// which key is the highest in the ordering of keys in the map. This /// method returns NULL if the map is empty. value_type* getMaxElement() const { return Root ? &(Root->getMaxElement()->getValue()) : nullptr; } //===--------------------------------------------------===// // Utility methods. //===--------------------------------------------------===// unsigned getHeight() const { return Root ? Root->getHeight() : 0; } static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) { ID.AddPointer(M.Root); } inline void Profile(FoldingSetNodeID& ID) const { return Profile(ID,*this); } }; // NOTE: This will possibly become the new implementation of ImmutableMap some day. template <typename KeyT, typename ValT, typename ValInfo = ImutKeyValueInfo<KeyT,ValT> > class ImmutableMapRef { public: typedef typename ValInfo::value_type value_type; typedef typename ValInfo::value_type_ref value_type_ref; typedef typename ValInfo::key_type key_type; typedef typename ValInfo::key_type_ref key_type_ref; typedef typename ValInfo::data_type data_type; typedef typename ValInfo::data_type_ref data_type_ref; typedef ImutAVLTree<ValInfo> TreeTy; typedef typename TreeTy::Factory FactoryTy; protected: TreeTy *Root; FactoryTy *Factory; public: /// Constructs a map from a pointer to a tree root. In general one /// should use a Factory object to create maps instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. explicit ImmutableMapRef(const TreeTy* R, FactoryTy *F) : Root(const_cast<TreeTy*>(R)), Factory(F) { if (Root) { Root->retain(); } } explicit ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X, typename ImmutableMap<KeyT, ValT>::Factory &F) : Root(X.getRootWithoutRetain()), Factory(F.getTreeFactory()) { if (Root) { Root->retain(); } } ImmutableMapRef(const ImmutableMapRef &X) : Root(X.Root), Factory(X.Factory) { if (Root) { Root->retain(); } } ImmutableMapRef &operator=(const ImmutableMapRef &X) { if (Root != X.Root) { if (X.Root) X.Root->retain(); if (Root) Root->release(); Root = X.Root; Factory = X.Factory; } return *this; } ~ImmutableMapRef() { if (Root) Root->release(); } static inline ImmutableMapRef getEmptyMap(FactoryTy *F) { return ImmutableMapRef(0, F); } void manualRetain() { if (Root) Root->retain(); } void manualRelease() { if (Root) Root->release(); } ImmutableMapRef add(key_type_ref K, data_type_ref D) const { TreeTy *NewT = Factory->add(Root, std::pair<key_type, data_type>(K, D)); return ImmutableMapRef(NewT, Factory); } ImmutableMapRef remove(key_type_ref K) const { TreeTy *NewT = Factory->remove(Root, K); return ImmutableMapRef(NewT, Factory); } bool contains(key_type_ref K) const { return Root ? Root->contains(K) : false; } ImmutableMap<KeyT, ValT> asImmutableMap() const { return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root)); } bool operator==(const ImmutableMapRef &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; } bool operator!=(const ImmutableMapRef &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; } bool isEmpty() const { return !Root; } //===--------------------------------------------------===// // For testing. //===--------------------------------------------------===// void verify() const { if (Root) Root->verify(); } //===--------------------------------------------------===// // Iterators. //===--------------------------------------------------===// class iterator : public ImutAVLValueIterator<ImmutableMapRef> { iterator() = default; explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {} friend class ImmutableMapRef; public: key_type_ref getKey() const { return (*this)->first; } data_type_ref getData() const { return (*this)->second; } }; iterator begin() const { return iterator(Root); } iterator end() const { return iterator(); } data_type* lookup(key_type_ref K) const { if (Root) { TreeTy* T = Root->find(K); if (T) return &T->getValue().second; } return 0; } /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for /// which key is the highest in the ordering of keys in the map. This /// method returns NULL if the map is empty. value_type* getMaxElement() const { return Root ? &(Root->getMaxElement()->getValue()) : 0; } //===--------------------------------------------------===// // Utility methods. //===--------------------------------------------------===// unsigned getHeight() const { return Root ? Root->getHeight() : 0; } static inline void Profile(FoldingSetNodeID& ID, const ImmutableMapRef &M) { ID.AddPointer(M.Root); } inline void Profile(FoldingSetNodeID& ID) const { return Profile(ID, *this); } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SmallBitVector.h
//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the SmallBitVector class. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SMALLBITVECTOR_H #define LLVM_ADT_SMALLBITVECTOR_H #include "llvm/ADT/BitVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/MathExtras.h" #include <cassert> namespace llvm { /// SmallBitVector - This is a 'bitvector' (really, a variable-sized bit array), /// optimized for the case when the array is small. It contains one /// pointer-sized field, which is directly used as a plain collection of bits /// when possible, or as a pointer to a larger heap-allocated array when /// necessary. This allows normal "small" cases to be fast without losing /// generality for large inputs. /// class SmallBitVector { // TODO: In "large" mode, a pointer to a BitVector is used, leading to an // unnecessary level of indirection. It would be more efficient to use a // pointer to memory containing size, allocation size, and the array of bits. uintptr_t X; enum { // The number of bits in this class. NumBaseBits = sizeof(uintptr_t) * CHAR_BIT, // One bit is used to discriminate between small and large mode. The // remaining bits are used for the small-mode representation. SmallNumRawBits = NumBaseBits - 1, // A few more bits are used to store the size of the bit set in small mode. // Theoretically this is a ceil-log2. These bits are encoded in the most // significant bits of the raw bits. SmallNumSizeBits = (NumBaseBits == 32 ? 5 : NumBaseBits == 64 ? 6 : SmallNumRawBits), // The remaining bits are used to store the actual set in small mode. SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits }; static_assert(NumBaseBits == 64 || NumBaseBits == 32, "Unsupported word size"); public: typedef unsigned size_type; // Encapsulation of a single bit. class reference { SmallBitVector &TheVector; unsigned BitPos; public: reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {} reference(const reference&) = default; reference& operator=(reference t) { *this = bool(t); return *this; } reference& operator=(bool t) { if (t) TheVector.set(BitPos); else TheVector.reset(BitPos); return *this; } operator bool() const { return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos); } }; private: bool isSmall() const { return X & uintptr_t(1); } BitVector *getPointer() const { assert(!isSmall()); return reinterpret_cast<BitVector *>(X); } void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) { X = 1; setSmallSize(NewSize); setSmallBits(NewSmallBits); } void switchToLarge(BitVector *BV) { X = reinterpret_cast<uintptr_t>(BV); assert(!isSmall() && "Tried to use an unaligned pointer"); } // Return all the bits used for the "small" representation; this includes // bits for the size as well as the element bits. uintptr_t getSmallRawBits() const { assert(isSmall()); return X >> 1; } void setSmallRawBits(uintptr_t NewRawBits) { assert(isSmall()); X = (NewRawBits << 1) | uintptr_t(1); } // Return the size. size_t getSmallSize() const { return getSmallRawBits() >> SmallNumDataBits; } void setSmallSize(size_t Size) { setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits)); } // Return the element bits. uintptr_t getSmallBits() const { return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize()); } void setSmallBits(uintptr_t NewBits) { setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) | (getSmallSize() << SmallNumDataBits)); } public: /// SmallBitVector default ctor - Creates an empty bitvector. SmallBitVector() : X(1) {} /// SmallBitVector ctor - Creates a bitvector of specified number of bits. All /// bits are initialized to the specified value. explicit SmallBitVector(unsigned s, bool t = false) { if (s <= SmallNumDataBits) switchToSmall(t ? ~uintptr_t(0) : 0, s); else switchToLarge(new BitVector(s, t)); } /// SmallBitVector copy ctor. SmallBitVector(const SmallBitVector &RHS) { if (RHS.isSmall()) X = RHS.X; else switchToLarge(new BitVector(*RHS.getPointer())); } SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) { RHS.X = 1; } ~SmallBitVector() { if (!isSmall()) delete getPointer(); } /// empty - Tests whether there are no bits in this bitvector. bool empty() const { return isSmall() ? getSmallSize() == 0 : getPointer()->empty(); } /// size - Returns the number of bits in this bitvector. size_t size() const { return isSmall() ? getSmallSize() : getPointer()->size(); } /// count - Returns the number of bits which are set. size_type count() const { if (isSmall()) { uintptr_t Bits = getSmallBits(); return countPopulation(Bits); } return getPointer()->count(); } /// any - Returns true if any bit is set. bool any() const { if (isSmall()) return getSmallBits() != 0; return getPointer()->any(); } /// all - Returns true if all bits are set. bool all() const { if (isSmall()) return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1; return getPointer()->all(); } /// none - Returns true if none of the bits are set. bool none() const { if (isSmall()) return getSmallBits() == 0; return getPointer()->none(); } /// find_first - Returns the index of the first set bit, -1 if none /// of the bits are set. int find_first() const { if (isSmall()) { uintptr_t Bits = getSmallBits(); if (Bits == 0) return -1; return countTrailingZeros(Bits); } return getPointer()->find_first(); } /// find_next - Returns the index of the next set bit following the /// "Prev" bit. Returns -1 if the next set bit is not found. int find_next(unsigned Prev) const { if (isSmall()) { uintptr_t Bits = getSmallBits(); // Mask off previous bits. Bits &= ~uintptr_t(0) << (Prev + 1); if (Bits == 0 || Prev + 1 >= getSmallSize()) return -1; return countTrailingZeros(Bits); } return getPointer()->find_next(Prev); } /// clear - Clear all bits. void clear() { if (!isSmall()) delete getPointer(); switchToSmall(0, 0); } /// resize - Grow or shrink the bitvector. void resize(unsigned N, bool t = false) { if (!isSmall()) { getPointer()->resize(N, t); } else if (SmallNumDataBits >= N) { uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0; setSmallSize(N); setSmallBits(NewBits | getSmallBits()); } else { BitVector *BV = new BitVector(N, t); uintptr_t OldBits = getSmallBits(); for (size_t i = 0, e = getSmallSize(); i != e; ++i) (*BV)[i] = (OldBits >> i) & 1; switchToLarge(BV); } } void reserve(unsigned N) { if (isSmall()) { if (N > SmallNumDataBits) { uintptr_t OldBits = getSmallRawBits(); size_t SmallSize = getSmallSize(); BitVector *BV = new BitVector(SmallSize); for (size_t i = 0; i < SmallSize; ++i) if ((OldBits >> i) & 1) BV->set(i); BV->reserve(N); switchToLarge(BV); } } else { getPointer()->reserve(N); } } // Set, reset, flip SmallBitVector &set() { if (isSmall()) setSmallBits(~uintptr_t(0)); else getPointer()->set(); return *this; } SmallBitVector &set(unsigned Idx) { if (isSmall()) { assert(Idx <= static_cast<unsigned>( std::numeric_limits<uintptr_t>::digits) && "undefined behavior"); setSmallBits(getSmallBits() | (uintptr_t(1) << Idx)); } else getPointer()->set(Idx); return *this; } /// set - Efficiently set a range of bits in [I, E) SmallBitVector &set(unsigned I, unsigned E) { assert(I <= E && "Attempted to set backwards range!"); assert(E <= size() && "Attempted to set out-of-bounds range!"); if (I == E) return *this; if (isSmall()) { uintptr_t EMask = ((uintptr_t)1) << E; uintptr_t IMask = ((uintptr_t)1) << I; uintptr_t Mask = EMask - IMask; setSmallBits(getSmallBits() | Mask); } else getPointer()->set(I, E); return *this; } SmallBitVector &reset() { if (isSmall()) setSmallBits(0); else getPointer()->reset(); return *this; } SmallBitVector &reset(unsigned Idx) { if (isSmall()) setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx)); else getPointer()->reset(Idx); return *this; } /// reset - Efficiently reset a range of bits in [I, E) SmallBitVector &reset(unsigned I, unsigned E) { assert(I <= E && "Attempted to reset backwards range!"); assert(E <= size() && "Attempted to reset out-of-bounds range!"); if (I == E) return *this; if (isSmall()) { uintptr_t EMask = ((uintptr_t)1) << E; uintptr_t IMask = ((uintptr_t)1) << I; uintptr_t Mask = EMask - IMask; setSmallBits(getSmallBits() & ~Mask); } else getPointer()->reset(I, E); return *this; } SmallBitVector &flip() { if (isSmall()) setSmallBits(~getSmallBits()); else getPointer()->flip(); return *this; } SmallBitVector &flip(unsigned Idx) { if (isSmall()) setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx)); else getPointer()->flip(Idx); return *this; } // No argument flip. SmallBitVector operator~() const { return SmallBitVector(*this).flip(); } // Indexing. reference operator[](unsigned Idx) { assert(Idx < size() && "Out-of-bounds Bit access."); return reference(*this, Idx); } bool operator[](unsigned Idx) const { assert(Idx < size() && "Out-of-bounds Bit access."); if (isSmall()) return ((getSmallBits() >> Idx) & 1) != 0; return getPointer()->operator[](Idx); } bool test(unsigned Idx) const { return (*this)[Idx]; } /// Test if any common bits are set. bool anyCommon(const SmallBitVector &RHS) const { if (isSmall() && RHS.isSmall()) return (getSmallBits() & RHS.getSmallBits()) != 0; if (!isSmall() && !RHS.isSmall()) return getPointer()->anyCommon(*RHS.getPointer()); for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i) if (test(i) && RHS.test(i)) return true; return false; } // Comparison operators. bool operator==(const SmallBitVector &RHS) const { if (size() != RHS.size()) return false; if (isSmall()) return getSmallBits() == RHS.getSmallBits(); else return *getPointer() == *RHS.getPointer(); } bool operator!=(const SmallBitVector &RHS) const { return !(*this == RHS); } // Intersection, union, disjoint union. SmallBitVector &operator&=(const SmallBitVector &RHS) { resize(std::max(size(), RHS.size())); if (isSmall()) setSmallBits(getSmallBits() & RHS.getSmallBits()); else if (!RHS.isSmall()) getPointer()->operator&=(*RHS.getPointer()); else { SmallBitVector Copy = RHS; Copy.resize(size()); getPointer()->operator&=(*Copy.getPointer()); } return *this; } /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS. SmallBitVector &reset(const SmallBitVector &RHS) { if (isSmall() && RHS.isSmall()) setSmallBits(getSmallBits() & ~RHS.getSmallBits()); else if (!isSmall() && !RHS.isSmall()) getPointer()->reset(*RHS.getPointer()); else for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i) if (RHS.test(i)) reset(i); return *this; } /// test - Check if (This - RHS) is zero. /// This is the same as reset(RHS) and any(). bool test(const SmallBitVector &RHS) const { if (isSmall() && RHS.isSmall()) return (getSmallBits() & ~RHS.getSmallBits()) != 0; if (!isSmall() && !RHS.isSmall()) return getPointer()->test(*RHS.getPointer()); unsigned i, e; for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i) if (test(i) && !RHS.test(i)) return true; for (e = size(); i != e; ++i) if (test(i)) return true; return false; } SmallBitVector &operator|=(const SmallBitVector &RHS) { resize(std::max(size(), RHS.size())); if (isSmall()) setSmallBits(getSmallBits() | RHS.getSmallBits()); else if (!RHS.isSmall()) getPointer()->operator|=(*RHS.getPointer()); else { SmallBitVector Copy = RHS; Copy.resize(size()); getPointer()->operator|=(*Copy.getPointer()); } return *this; } SmallBitVector &operator^=(const SmallBitVector &RHS) { resize(std::max(size(), RHS.size())); if (isSmall()) setSmallBits(getSmallBits() ^ RHS.getSmallBits()); else if (!RHS.isSmall()) getPointer()->operator^=(*RHS.getPointer()); else { SmallBitVector Copy = RHS; Copy.resize(size()); getPointer()->operator^=(*Copy.getPointer()); } return *this; } // Assignment operator. const SmallBitVector &operator=(const SmallBitVector &RHS) { if (isSmall()) { if (RHS.isSmall()) X = RHS.X; else switchToLarge(new BitVector(*RHS.getPointer())); } else { if (!RHS.isSmall()) *getPointer() = *RHS.getPointer(); else { delete getPointer(); X = RHS.X; } } return *this; } const SmallBitVector &operator=(SmallBitVector &&RHS) { if (this != &RHS) { clear(); swap(RHS); } return *this; } void swap(SmallBitVector &RHS) { std::swap(X, RHS.X); } /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize. /// This computes "*this |= Mask". void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { if (isSmall()) applyMask<true, false>(Mask, MaskWords); else getPointer()->setBitsInMask(Mask, MaskWords); } /// clearBitsInMask - Clear any bits in this vector that are set in Mask. /// Don't resize. This computes "*this &= ~Mask". void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { if (isSmall()) applyMask<false, false>(Mask, MaskWords); else getPointer()->clearBitsInMask(Mask, MaskWords); } /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask. /// Don't resize. This computes "*this |= ~Mask". void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { if (isSmall()) applyMask<true, true>(Mask, MaskWords); else getPointer()->setBitsNotInMask(Mask, MaskWords); } /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask. /// Don't resize. This computes "*this &= Mask". void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { if (isSmall()) applyMask<false, true>(Mask, MaskWords); else getPointer()->clearBitsNotInMask(Mask, MaskWords); } private: template<bool AddBits, bool InvertMask> void applyMask(const uint32_t *Mask, unsigned MaskWords) { if (NumBaseBits == 64 && MaskWords >= 2) { uint64_t M = Mask[0] | (uint64_t(Mask[1]) << 32); if (InvertMask) M = ~M; if (AddBits) setSmallBits(getSmallBits() | M); else setSmallBits(getSmallBits() & ~M); } else { #pragma warning( push ) // HLSL Change #pragma warning( disable: 4319 ) // HLSL Change - not a branch in 64-bit - '~': zero extending 'uint32_t' to 'uintptr_t' of greater size uint32_t M = Mask[0]; if (InvertMask) M = ~M; if (AddBits) setSmallBits(getSmallBits() | M); else setSmallBits(getSmallBits() & ~M); #pragma warning( pop ) // HLSL Change } } }; inline SmallBitVector operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) { SmallBitVector Result(LHS); Result &= RHS; return Result; } inline SmallBitVector operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) { SmallBitVector Result(LHS); Result |= RHS; return Result; } inline SmallBitVector operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) { SmallBitVector Result(LHS); Result ^= RHS; return Result; } } // End llvm namespace namespace std { /// Implement std::swap in terms of BitVector swap. inline void swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) { LHS.swap(RHS); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/VariadicFunction.h
//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements compile-time type-safe variadic functions. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_VARIADICFUNCTION_H #define LLVM_ADT_VARIADICFUNCTION_H #include "llvm/ADT/ArrayRef.h" namespace llvm { // Define macros to aid in expanding a comma separated series with the index of // the series pasted onto the last token. #define LLVM_COMMA_JOIN1(x) x ## 0 #define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1 #define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2 #define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3 #define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4 #define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5 #define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6 #define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7 #define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8 #define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9 #define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10 #define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11 #define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12 #define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13 #define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14 #define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15 #define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16 #define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17 #define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18 #define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19 #define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20 #define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21 #define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22 #define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23 #define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24 #define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25 #define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26 #define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27 #define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28 #define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29 #define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30 #define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31 /// \brief Class which can simulate a type-safe variadic function. /// /// The VariadicFunction class template makes it easy to define /// type-safe variadic functions where all arguments have the same /// type. /// /// Suppose we need a variadic function like this: /// /// ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N); /// /// Instead of many overloads of Foo(), we only need to define a helper /// function that takes an array of arguments: /// /// ResultT FooImpl(ArrayRef<const ArgT *> Args) { /// // 'Args[i]' is a pointer to the i-th argument passed to Foo(). /// ... /// } /// /// and then define Foo() like this: /// /// const VariadicFunction<ResultT, ArgT, FooImpl> Foo; /// /// VariadicFunction takes care of defining the overloads of Foo(). /// /// Actually, Foo is a function object (i.e. functor) instead of a plain /// function. This object is stateless and its constructor/destructor /// does nothing, so it's safe to create global objects and call Foo(...) at /// any time. /// /// Sometimes we need a variadic function to have some fixed leading /// arguments whose types may be different from that of the optional /// arguments. For example: /// /// bool FullMatch(const StringRef &S, const RE &Regex, /// const ArgT &A_0, ..., const ArgT &A_N); /// /// VariadicFunctionN is for such cases, where N is the number of fixed /// arguments. It is like VariadicFunction, except that it takes N more /// template arguments for the types of the fixed arguments: /// /// bool FullMatchImpl(const StringRef &S, const RE &Regex, /// ArrayRef<const ArgT *> Args) { ... } /// const VariadicFunction2<bool, const StringRef&, /// const RE&, ArgT, FullMatchImpl> /// FullMatch; /// /// Currently VariadicFunction and friends support up-to 3 /// fixed leading arguments and up-to 32 optional arguments. template <typename ResultT, typename ArgT, ResultT (*Func)(ArrayRef<const ArgT *>)> struct VariadicFunction { ResultT operator()() const { return Func(None); } #define LLVM_DEFINE_OVERLOAD(N) \ ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ return Func(makeArrayRef(Args)); \ } LLVM_DEFINE_OVERLOAD(1) LLVM_DEFINE_OVERLOAD(2) LLVM_DEFINE_OVERLOAD(3) LLVM_DEFINE_OVERLOAD(4) LLVM_DEFINE_OVERLOAD(5) LLVM_DEFINE_OVERLOAD(6) LLVM_DEFINE_OVERLOAD(7) LLVM_DEFINE_OVERLOAD(8) LLVM_DEFINE_OVERLOAD(9) LLVM_DEFINE_OVERLOAD(10) LLVM_DEFINE_OVERLOAD(11) LLVM_DEFINE_OVERLOAD(12) LLVM_DEFINE_OVERLOAD(13) LLVM_DEFINE_OVERLOAD(14) LLVM_DEFINE_OVERLOAD(15) LLVM_DEFINE_OVERLOAD(16) LLVM_DEFINE_OVERLOAD(17) LLVM_DEFINE_OVERLOAD(18) LLVM_DEFINE_OVERLOAD(19) LLVM_DEFINE_OVERLOAD(20) LLVM_DEFINE_OVERLOAD(21) LLVM_DEFINE_OVERLOAD(22) LLVM_DEFINE_OVERLOAD(23) LLVM_DEFINE_OVERLOAD(24) LLVM_DEFINE_OVERLOAD(25) LLVM_DEFINE_OVERLOAD(26) LLVM_DEFINE_OVERLOAD(27) LLVM_DEFINE_OVERLOAD(28) LLVM_DEFINE_OVERLOAD(29) LLVM_DEFINE_OVERLOAD(30) LLVM_DEFINE_OVERLOAD(31) LLVM_DEFINE_OVERLOAD(32) #undef LLVM_DEFINE_OVERLOAD }; template <typename ResultT, typename Param0T, typename ArgT, ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)> struct VariadicFunction1 { ResultT operator()(Param0T P0) const { return Func(P0, None); } #define LLVM_DEFINE_OVERLOAD(N) \ ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ return Func(P0, makeArrayRef(Args)); \ } LLVM_DEFINE_OVERLOAD(1) LLVM_DEFINE_OVERLOAD(2) LLVM_DEFINE_OVERLOAD(3) LLVM_DEFINE_OVERLOAD(4) LLVM_DEFINE_OVERLOAD(5) LLVM_DEFINE_OVERLOAD(6) LLVM_DEFINE_OVERLOAD(7) LLVM_DEFINE_OVERLOAD(8) LLVM_DEFINE_OVERLOAD(9) LLVM_DEFINE_OVERLOAD(10) LLVM_DEFINE_OVERLOAD(11) LLVM_DEFINE_OVERLOAD(12) LLVM_DEFINE_OVERLOAD(13) LLVM_DEFINE_OVERLOAD(14) LLVM_DEFINE_OVERLOAD(15) LLVM_DEFINE_OVERLOAD(16) LLVM_DEFINE_OVERLOAD(17) LLVM_DEFINE_OVERLOAD(18) LLVM_DEFINE_OVERLOAD(19) LLVM_DEFINE_OVERLOAD(20) LLVM_DEFINE_OVERLOAD(21) LLVM_DEFINE_OVERLOAD(22) LLVM_DEFINE_OVERLOAD(23) LLVM_DEFINE_OVERLOAD(24) LLVM_DEFINE_OVERLOAD(25) LLVM_DEFINE_OVERLOAD(26) LLVM_DEFINE_OVERLOAD(27) LLVM_DEFINE_OVERLOAD(28) LLVM_DEFINE_OVERLOAD(29) LLVM_DEFINE_OVERLOAD(30) LLVM_DEFINE_OVERLOAD(31) LLVM_DEFINE_OVERLOAD(32) #undef LLVM_DEFINE_OVERLOAD }; template <typename ResultT, typename Param0T, typename Param1T, typename ArgT, ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)> struct VariadicFunction2 { ResultT operator()(Param0T P0, Param1T P1) const { return Func(P0, P1, None); } #define LLVM_DEFINE_OVERLOAD(N) \ ResultT operator()(Param0T P0, Param1T P1, \ LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ return Func(P0, P1, makeArrayRef(Args)); \ } LLVM_DEFINE_OVERLOAD(1) LLVM_DEFINE_OVERLOAD(2) LLVM_DEFINE_OVERLOAD(3) LLVM_DEFINE_OVERLOAD(4) LLVM_DEFINE_OVERLOAD(5) LLVM_DEFINE_OVERLOAD(6) LLVM_DEFINE_OVERLOAD(7) LLVM_DEFINE_OVERLOAD(8) LLVM_DEFINE_OVERLOAD(9) LLVM_DEFINE_OVERLOAD(10) LLVM_DEFINE_OVERLOAD(11) LLVM_DEFINE_OVERLOAD(12) LLVM_DEFINE_OVERLOAD(13) LLVM_DEFINE_OVERLOAD(14) LLVM_DEFINE_OVERLOAD(15) LLVM_DEFINE_OVERLOAD(16) LLVM_DEFINE_OVERLOAD(17) LLVM_DEFINE_OVERLOAD(18) LLVM_DEFINE_OVERLOAD(19) LLVM_DEFINE_OVERLOAD(20) LLVM_DEFINE_OVERLOAD(21) LLVM_DEFINE_OVERLOAD(22) LLVM_DEFINE_OVERLOAD(23) LLVM_DEFINE_OVERLOAD(24) LLVM_DEFINE_OVERLOAD(25) LLVM_DEFINE_OVERLOAD(26) LLVM_DEFINE_OVERLOAD(27) LLVM_DEFINE_OVERLOAD(28) LLVM_DEFINE_OVERLOAD(29) LLVM_DEFINE_OVERLOAD(30) LLVM_DEFINE_OVERLOAD(31) LLVM_DEFINE_OVERLOAD(32) #undef LLVM_DEFINE_OVERLOAD }; template <typename ResultT, typename Param0T, typename Param1T, typename Param2T, typename ArgT, ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)> struct VariadicFunction3 { ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const { return Func(P0, P1, P2, None); } #define LLVM_DEFINE_OVERLOAD(N) \ ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \ LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \ return Func(P0, P1, P2, makeArrayRef(Args)); \ } LLVM_DEFINE_OVERLOAD(1) LLVM_DEFINE_OVERLOAD(2) LLVM_DEFINE_OVERLOAD(3) LLVM_DEFINE_OVERLOAD(4) LLVM_DEFINE_OVERLOAD(5) LLVM_DEFINE_OVERLOAD(6) LLVM_DEFINE_OVERLOAD(7) LLVM_DEFINE_OVERLOAD(8) LLVM_DEFINE_OVERLOAD(9) LLVM_DEFINE_OVERLOAD(10) LLVM_DEFINE_OVERLOAD(11) LLVM_DEFINE_OVERLOAD(12) LLVM_DEFINE_OVERLOAD(13) LLVM_DEFINE_OVERLOAD(14) LLVM_DEFINE_OVERLOAD(15) LLVM_DEFINE_OVERLOAD(16) LLVM_DEFINE_OVERLOAD(17) LLVM_DEFINE_OVERLOAD(18) LLVM_DEFINE_OVERLOAD(19) LLVM_DEFINE_OVERLOAD(20) LLVM_DEFINE_OVERLOAD(21) LLVM_DEFINE_OVERLOAD(22) LLVM_DEFINE_OVERLOAD(23) LLVM_DEFINE_OVERLOAD(24) LLVM_DEFINE_OVERLOAD(25) LLVM_DEFINE_OVERLOAD(26) LLVM_DEFINE_OVERLOAD(27) LLVM_DEFINE_OVERLOAD(28) LLVM_DEFINE_OVERLOAD(29) LLVM_DEFINE_OVERLOAD(30) LLVM_DEFINE_OVERLOAD(31) LLVM_DEFINE_OVERLOAD(32) #undef LLVM_DEFINE_OVERLOAD }; // Cleanup the macro namespace. #undef LLVM_COMMA_JOIN1 #undef LLVM_COMMA_JOIN2 #undef LLVM_COMMA_JOIN3 #undef LLVM_COMMA_JOIN4 #undef LLVM_COMMA_JOIN5 #undef LLVM_COMMA_JOIN6 #undef LLVM_COMMA_JOIN7 #undef LLVM_COMMA_JOIN8 #undef LLVM_COMMA_JOIN9 #undef LLVM_COMMA_JOIN10 #undef LLVM_COMMA_JOIN11 #undef LLVM_COMMA_JOIN12 #undef LLVM_COMMA_JOIN13 #undef LLVM_COMMA_JOIN14 #undef LLVM_COMMA_JOIN15 #undef LLVM_COMMA_JOIN16 #undef LLVM_COMMA_JOIN17 #undef LLVM_COMMA_JOIN18 #undef LLVM_COMMA_JOIN19 #undef LLVM_COMMA_JOIN20 #undef LLVM_COMMA_JOIN21 #undef LLVM_COMMA_JOIN22 #undef LLVM_COMMA_JOIN23 #undef LLVM_COMMA_JOIN24 #undef LLVM_COMMA_JOIN25 #undef LLVM_COMMA_JOIN26 #undef LLVM_COMMA_JOIN27 #undef LLVM_COMMA_JOIN28 #undef LLVM_COMMA_JOIN29 #undef LLVM_COMMA_JOIN30 #undef LLVM_COMMA_JOIN31 #undef LLVM_COMMA_JOIN32 } // end namespace llvm #endif // LLVM_ADT_VARIADICFUNCTION_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/UniqueVector.h
//===-- llvm/ADT/UniqueVector.h ---------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_UNIQUEVECTOR_H #define LLVM_ADT_UNIQUEVECTOR_H #include <cassert> #include <map> #include <vector> namespace llvm { // // /////////////////////////////////////////////////////////////////////////////// /// UniqueVector - This class produces a sequential ID number (base 1) for each /// unique entry that is added. T is the type of entries in the vector. This /// class should have an implementation of operator== and of operator<. /// Entries can be fetched using operator[] with the entry ID. template<class T> class UniqueVector { public: typedef typename std::vector<T> VectorType; typedef typename VectorType::iterator iterator; typedef typename VectorType::const_iterator const_iterator; private: // Map - Used to handle the correspondence of entry to ID. std::map<T, unsigned> Map; // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1. // VectorType Vector; public: /// insert - Append entry to the vector if it doesn't already exist. Returns /// the entry's index + 1 to be used as a unique ID. unsigned insert(const T &Entry) { // Check if the entry is already in the map. unsigned &Val = Map[Entry]; // See if entry exists, if so return prior ID. if (Val) return Val; // Compute ID for entry. Val = static_cast<unsigned>(Vector.size()) + 1; // Insert in vector. Vector.push_back(Entry); return Val; } /// idFor - return the ID for an existing entry. Returns 0 if the entry is /// not found. unsigned idFor(const T &Entry) const { // Search for entry in the map. typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry); // See if entry exists, if so return ID. if (MI != Map.end()) return MI->second; // No luck. return 0; } /// operator[] - Returns a reference to the entry with the specified ID. /// const T &operator[](unsigned ID) const { assert(ID-1 < size() && "ID is 0 or out of range!"); return Vector[ID - 1]; } /// \brief Return an iterator to the start of the vector. iterator begin() { return Vector.begin(); } /// \brief Return an iterator to the start of the vector. const_iterator begin() const { return Vector.begin(); } /// \brief Return an iterator to the end of the vector. iterator end() { return Vector.end(); } /// \brief Return an iterator to the end of the vector. const_iterator end() const { return Vector.end(); } /// size - Returns the number of entries in the vector. /// size_t size() const { return Vector.size(); } /// empty - Returns true if the vector is empty. /// bool empty() const { return Vector.empty(); } /// reset - Clears all the entries. /// void reset() { Map.clear(); Vector.resize(0, 0); } }; } // End of namespace llvm #endif // LLVM_ADT_UNIQUEVECTOR_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/ilist_node.h
//==-- llvm/ADT/ilist_node.h - Intrusive Linked List Helper ------*- C++ -*-==// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ilist_node class template, which is a convenient // base class for creating classes that can be used with ilists. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_ILIST_NODE_H #define LLVM_ADT_ILIST_NODE_H #include "llvm/Config/abi-breaking.h" namespace llvm { template<typename NodeTy> struct ilist_traits; /// ilist_half_node - Base class that provides prev services for sentinels. /// template<typename NodeTy> class ilist_half_node { friend struct ilist_traits<NodeTy>; NodeTy *Prev; protected: NodeTy *getPrev() { return Prev; } const NodeTy *getPrev() const { return Prev; } void setPrev(NodeTy *P) { Prev = P; } ilist_half_node() : Prev(nullptr) {} }; template<typename NodeTy> struct ilist_nextprev_traits; /// ilist_node - Base class that provides next/prev services for nodes /// that use ilist_nextprev_traits or ilist_default_traits. /// template<typename NodeTy> class ilist_node : private ilist_half_node<NodeTy> { friend struct ilist_nextprev_traits<NodeTy>; friend struct ilist_traits<NodeTy>; NodeTy *Next; NodeTy *getNext() { return Next; } const NodeTy *getNext() const { return Next; } void setNext(NodeTy *N) { Next = N; } protected: ilist_node() : Next(nullptr) {} public: /// @name Adjacent Node Accessors /// @{ /// \brief Get the previous node, or 0 for the list head. NodeTy *getPrevNode() { NodeTy *Prev = this->getPrev(); // Check for sentinel. if (Prev && !Prev->getNext()) // HLSL Change: Prev may be nullptr return nullptr; return Prev; } /// \brief Get the previous node, or 0 for the list head. const NodeTy *getPrevNode() const { const NodeTy *Prev = this->getPrev(); // Check for sentinel. if (Prev && !Prev->getNext()) // HLSL Change: Prev may be nullptr return nullptr; return Prev; } /// \brief Get the next node, or 0 for the list tail. NodeTy *getNextNode() { NodeTy *Next = getNext(); // Check for sentinel. if (Next && !Next->getNext()) // HLSL Change: Next may be nullptr return nullptr; return Next; } /// \brief Get the next node, or 0 for the list tail. const NodeTy *getNextNode() const { const NodeTy *Next = getNext(); // Check for sentinel. if (Next && !Next->getNext()) // HLSL Change: Next may be nullptr return nullptr; return Next; } /// @} }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SparseBitVector.h
//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector -*- C++ -*- ===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the SparseBitVector class. See the doxygen comment for // SparseBitVector for more details on the algorithm used. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SPARSEBITVECTOR_H #define LLVM_ADT_SPARSEBITVECTOR_H #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include <cassert> #include <climits> namespace llvm { /// SparseBitVector is an implementation of a bitvector that is sparse by only /// storing the elements that have non-zero bits set. In order to make this /// fast for the most common cases, SparseBitVector is implemented as a linked /// list of SparseBitVectorElements. We maintain a pointer to the last /// SparseBitVectorElement accessed (in the form of a list iterator), in order /// to make multiple in-order test/set constant time after the first one is /// executed. Note that using vectors to store SparseBitVectorElement's does /// not work out very well because it causes insertion in the middle to take /// enormous amounts of time with a large amount of bits. Other structures that /// have better worst cases for insertion in the middle (various balanced trees, /// etc) do not perform as well in practice as a linked list with this iterator /// kept up to date. They are also significantly more memory intensive. template <unsigned ElementSize = 128> struct SparseBitVectorElement : public ilist_node<SparseBitVectorElement<ElementSize> > { public: typedef unsigned long BitWord; typedef unsigned size_type; enum { BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT, BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE, BITS_PER_ELEMENT = ElementSize }; private: // Index of Element in terms of where first bit starts. unsigned ElementIndex; BitWord Bits[BITWORDS_PER_ELEMENT]; // Needed for sentinels friend struct ilist_sentinel_traits<SparseBitVectorElement>; SparseBitVectorElement() { ElementIndex = ~0U; memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT); } public: explicit SparseBitVectorElement(unsigned Idx) { ElementIndex = Idx; memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT); } // Comparison. bool operator==(const SparseBitVectorElement &RHS) const { if (ElementIndex != RHS.ElementIndex) return false; for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) if (Bits[i] != RHS.Bits[i]) return false; return true; } bool operator!=(const SparseBitVectorElement &RHS) const { return !(*this == RHS); } // Return the bits that make up word Idx in our element. BitWord word(unsigned Idx) const { assert (Idx < BITWORDS_PER_ELEMENT); return Bits[Idx]; } unsigned index() const { return ElementIndex; } bool empty() const { for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) if (Bits[i]) return false; return true; } void set(unsigned Idx) { Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE); } bool test_and_set (unsigned Idx) { bool old = test(Idx); if (!old) { set(Idx); return true; } return false; } void reset(unsigned Idx) { Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE)); } bool test(unsigned Idx) const { return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE)); } size_type count() const { unsigned NumBits = 0; for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) NumBits += countPopulation(Bits[i]); return NumBits; } /// find_first - Returns the index of the first set bit. int find_first() const { for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) if (Bits[i] != 0) return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); llvm_unreachable("Illegal empty element"); } /// find_next - Returns the index of the next set bit starting from the /// "Curr" bit. Returns -1 if the next set bit is not found. int find_next(unsigned Curr) const { if (Curr >= BITS_PER_ELEMENT) return -1; unsigned WordPos = Curr / BITWORD_SIZE; unsigned BitPos = Curr % BITWORD_SIZE; BitWord Copy = Bits[WordPos]; assert (WordPos <= BITWORDS_PER_ELEMENT && "Word Position outside of element"); // Mask off previous bits. Copy &= ~0UL << BitPos; if (Copy != 0) return WordPos * BITWORD_SIZE + countTrailingZeros(Copy); // Check subsequent words. for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i) if (Bits[i] != 0) return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); return -1; } // Union this element with RHS and return true if this one changed. bool unionWith(const SparseBitVectorElement &RHS) { bool changed = false; for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { BitWord old = changed ? 0 : Bits[i]; Bits[i] |= RHS.Bits[i]; if (!changed && old != Bits[i]) changed = true; } return changed; } // Return true if we have any bits in common with RHS bool intersects(const SparseBitVectorElement &RHS) const { for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { if (RHS.Bits[i] & Bits[i]) return true; } return false; } // Intersect this Element with RHS and return true if this one changed. // BecameZero is set to true if this element became all-zero bits. bool intersectWith(const SparseBitVectorElement &RHS, bool &BecameZero) { bool changed = false; bool allzero = true; BecameZero = false; for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { BitWord old = changed ? 0 : Bits[i]; Bits[i] &= RHS.Bits[i]; if (Bits[i] != 0) allzero = false; if (!changed && old != Bits[i]) changed = true; } BecameZero = allzero; return changed; } // Intersect this Element with the complement of RHS and return true if this // one changed. BecameZero is set to true if this element became all-zero // bits. bool intersectWithComplement(const SparseBitVectorElement &RHS, bool &BecameZero) { bool changed = false; bool allzero = true; BecameZero = false; for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { BitWord old = changed ? 0 : Bits[i]; Bits[i] &= ~RHS.Bits[i]; if (Bits[i] != 0) allzero = false; if (!changed && old != Bits[i]) changed = true; } BecameZero = allzero; return changed; } // Three argument version of intersectWithComplement that intersects // RHS1 & ~RHS2 into this element void intersectWithComplement(const SparseBitVectorElement &RHS1, const SparseBitVectorElement &RHS2, bool &BecameZero) { bool allzero = true; BecameZero = false; for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) { Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i]; if (Bits[i] != 0) allzero = false; } BecameZero = allzero; } }; template <unsigned ElementSize> struct ilist_traits<SparseBitVectorElement<ElementSize> > : public ilist_default_traits<SparseBitVectorElement<ElementSize> > { typedef SparseBitVectorElement<ElementSize> Element; // HLSL Change Starts // Temporarily disable "downcast of address" UBSAN runtime error // https://github.com/microsoft/DirectXShaderCompiler/issues/6446 #ifdef __has_feature #if __has_feature(undefined_behavior_sanitizer) __attribute__((no_sanitize("undefined"))) #endif // __has_feature(address_sanitizer) #endif // defined(__has_feature) // HLSL Change Ends Element * createSentinel() const { return static_cast<Element *>(&Sentinel); } static void destroySentinel(Element *) {} Element *provideInitialHead() const { return createSentinel(); } Element *ensureHead(Element *) const { return createSentinel(); } static void noteHead(Element *, Element *) {} private: mutable ilist_half_node<Element> Sentinel; }; template <unsigned ElementSize = 128> class SparseBitVector { typedef ilist<SparseBitVectorElement<ElementSize> > ElementList; typedef typename ElementList::iterator ElementListIter; typedef typename ElementList::const_iterator ElementListConstIter; enum { BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE }; // Pointer to our current Element. ElementListIter CurrElementIter; ElementList Elements; // This is like std::lower_bound, except we do linear searching from the // current position. ElementListIter FindLowerBound(unsigned ElementIndex) { if (Elements.empty()) { CurrElementIter = Elements.begin(); return Elements.begin(); } // Make sure our current iterator is valid. if (CurrElementIter == Elements.end()) --CurrElementIter; // Search from our current iterator, either backwards or forwards, // depending on what element we are looking for. ElementListIter ElementIter = CurrElementIter; if (CurrElementIter->index() == ElementIndex) { return ElementIter; } else if (CurrElementIter->index() > ElementIndex) { while (ElementIter != Elements.begin() && ElementIter->index() > ElementIndex) --ElementIter; } else { while (ElementIter != Elements.end() && ElementIter->index() < ElementIndex) ++ElementIter; } CurrElementIter = ElementIter; return ElementIter; } // Iterator to walk set bits in the bitmap. This iterator is a lot uglier // than it would be, in order to be efficient. class SparseBitVectorIterator { private: bool AtEnd; const SparseBitVector<ElementSize> *BitVector; // Current element inside of bitmap. ElementListConstIter Iter; // Current bit number inside of our bitmap. unsigned BitNumber; // Current word number inside of our element. unsigned WordNumber; // Current bits from the element. typename SparseBitVectorElement<ElementSize>::BitWord Bits; // Move our iterator to the first non-zero bit in the bitmap. void AdvanceToFirstNonZero() { if (AtEnd) return; if (BitVector->Elements.empty()) { AtEnd = true; return; } Iter = BitVector->Elements.begin(); BitNumber = Iter->index() * ElementSize; unsigned BitPos = Iter->find_first(); BitNumber += BitPos; WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE; Bits = Iter->word(WordNumber); Bits >>= BitPos % BITWORD_SIZE; } // Move our iterator to the next non-zero bit. void AdvanceToNextNonZero() { if (AtEnd) return; while (Bits && !(Bits & 1)) { Bits >>= 1; BitNumber += 1; } // See if we ran out of Bits in this word. if (!Bits) { int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ; // If we ran out of set bits in this element, move to next element. if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) { ++Iter; WordNumber = 0; // We may run out of elements in the bitmap. if (Iter == BitVector->Elements.end()) { AtEnd = true; return; } // Set up for next non-zero word in bitmap. BitNumber = Iter->index() * ElementSize; NextSetBitNumber = Iter->find_first(); BitNumber += NextSetBitNumber; WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE; Bits = Iter->word(WordNumber); Bits >>= NextSetBitNumber % BITWORD_SIZE; } else { WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE; Bits = Iter->word(WordNumber); Bits >>= NextSetBitNumber % BITWORD_SIZE; BitNumber = Iter->index() * ElementSize; BitNumber += NextSetBitNumber; } } } public: // Preincrement. inline SparseBitVectorIterator& operator++() { ++BitNumber; Bits >>= 1; AdvanceToNextNonZero(); return *this; } // Postincrement. inline SparseBitVectorIterator operator++(int) { SparseBitVectorIterator tmp = *this; ++*this; return tmp; } // Return the current set bit number. unsigned operator*() const { return BitNumber; } bool operator==(const SparseBitVectorIterator &RHS) const { // If they are both at the end, ignore the rest of the fields. if (AtEnd && RHS.AtEnd) return true; // Otherwise they are the same if they have the same bit number and // bitmap. return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber; } bool operator!=(const SparseBitVectorIterator &RHS) const { return !(*this == RHS); } SparseBitVectorIterator(): BitVector(NULL) { } SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS, bool end = false):BitVector(RHS) { Iter = BitVector->Elements.begin(); BitNumber = 0; Bits = 0; WordNumber = ~0; AtEnd = end; AdvanceToFirstNonZero(); } }; public: typedef SparseBitVectorIterator iterator; SparseBitVector () { CurrElementIter = Elements.begin (); } ~SparseBitVector() { } // SparseBitVector copy ctor. SparseBitVector(const SparseBitVector &RHS) { ElementListConstIter ElementIter = RHS.Elements.begin(); while (ElementIter != RHS.Elements.end()) { Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter)); ++ElementIter; } CurrElementIter = Elements.begin (); } // Clear. void clear() { Elements.clear(); } // Assignment SparseBitVector& operator=(const SparseBitVector& RHS) { Elements.clear(); ElementListConstIter ElementIter = RHS.Elements.begin(); while (ElementIter != RHS.Elements.end()) { Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter)); ++ElementIter; } CurrElementIter = Elements.begin (); return *this; } // Test, Reset, and Set a bit in the bitmap. bool test(unsigned Idx) { if (Elements.empty()) return false; unsigned ElementIndex = Idx / ElementSize; ElementListIter ElementIter = FindLowerBound(ElementIndex); // If we can't find an element that is supposed to contain this bit, there // is nothing more to do. if (ElementIter == Elements.end() || ElementIter->index() != ElementIndex) return false; return ElementIter->test(Idx % ElementSize); } void reset(unsigned Idx) { if (Elements.empty()) return; unsigned ElementIndex = Idx / ElementSize; ElementListIter ElementIter = FindLowerBound(ElementIndex); // If we can't find an element that is supposed to contain this bit, there // is nothing more to do. if (ElementIter == Elements.end() || ElementIter->index() != ElementIndex) return; ElementIter->reset(Idx % ElementSize); // When the element is zeroed out, delete it. if (ElementIter->empty()) { ++CurrElementIter; Elements.erase(ElementIter); } } void set(unsigned Idx) { unsigned ElementIndex = Idx / ElementSize; SparseBitVectorElement<ElementSize> *Element; ElementListIter ElementIter; if (Elements.empty()) { Element = new SparseBitVectorElement<ElementSize>(ElementIndex); ElementIter = Elements.insert(Elements.end(), Element); } else { ElementIter = FindLowerBound(ElementIndex); if (ElementIter == Elements.end() || ElementIter->index() != ElementIndex) { Element = new SparseBitVectorElement<ElementSize>(ElementIndex); // We may have hit the beginning of our SparseBitVector, in which case, // we may need to insert right after this element, which requires moving // the current iterator forward one, because insert does insert before. if (ElementIter != Elements.end() && ElementIter->index() < ElementIndex) ElementIter = Elements.insert(++ElementIter, Element); else ElementIter = Elements.insert(ElementIter, Element); } } CurrElementIter = ElementIter; ElementIter->set(Idx % ElementSize); } bool test_and_set (unsigned Idx) { bool old = test(Idx); if (!old) { set(Idx); return true; } return false; } bool operator!=(const SparseBitVector &RHS) const { return !(*this == RHS); } bool operator==(const SparseBitVector &RHS) const { ElementListConstIter Iter1 = Elements.begin(); ElementListConstIter Iter2 = RHS.Elements.begin(); for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end(); ++Iter1, ++Iter2) { if (*Iter1 != *Iter2) return false; } return Iter1 == Elements.end() && Iter2 == RHS.Elements.end(); } // Union our bitmap with the RHS and return true if we changed. bool operator|=(const SparseBitVector &RHS) { bool changed = false; ElementListIter Iter1 = Elements.begin(); ElementListConstIter Iter2 = RHS.Elements.begin(); // If RHS is empty, we are done if (RHS.Elements.empty()) return false; while (Iter2 != RHS.Elements.end()) { if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) { Elements.insert(Iter1, new SparseBitVectorElement<ElementSize>(*Iter2)); ++Iter2; changed = true; } else if (Iter1->index() == Iter2->index()) { changed |= Iter1->unionWith(*Iter2); ++Iter1; ++Iter2; } else { ++Iter1; } } CurrElementIter = Elements.begin(); return changed; } // Intersect our bitmap with the RHS and return true if ours changed. bool operator&=(const SparseBitVector &RHS) { bool changed = false; ElementListIter Iter1 = Elements.begin(); ElementListConstIter Iter2 = RHS.Elements.begin(); // Check if both bitmaps are empty. if (Elements.empty() && RHS.Elements.empty()) return false; // Loop through, intersecting as we go, erasing elements when necessary. while (Iter2 != RHS.Elements.end()) { if (Iter1 == Elements.end()) { CurrElementIter = Elements.begin(); return changed; } if (Iter1->index() > Iter2->index()) { ++Iter2; } else if (Iter1->index() == Iter2->index()) { bool BecameZero; changed |= Iter1->intersectWith(*Iter2, BecameZero); if (BecameZero) { ElementListIter IterTmp = Iter1; ++Iter1; Elements.erase(IterTmp); } else { ++Iter1; } ++Iter2; } else { ElementListIter IterTmp = Iter1; ++Iter1; Elements.erase(IterTmp); } } Elements.erase(Iter1, Elements.end()); CurrElementIter = Elements.begin(); return changed; } // Intersect our bitmap with the complement of the RHS and return true // if ours changed. bool intersectWithComplement(const SparseBitVector &RHS) { bool changed = false; ElementListIter Iter1 = Elements.begin(); ElementListConstIter Iter2 = RHS.Elements.begin(); // If either our bitmap or RHS is empty, we are done if (Elements.empty() || RHS.Elements.empty()) return false; // Loop through, intersecting as we go, erasing elements when necessary. while (Iter2 != RHS.Elements.end()) { if (Iter1 == Elements.end()) { CurrElementIter = Elements.begin(); return changed; } if (Iter1->index() > Iter2->index()) { ++Iter2; } else if (Iter1->index() == Iter2->index()) { bool BecameZero; changed |= Iter1->intersectWithComplement(*Iter2, BecameZero); if (BecameZero) { ElementListIter IterTmp = Iter1; ++Iter1; Elements.erase(IterTmp); } else { ++Iter1; } ++Iter2; } else { ++Iter1; } } CurrElementIter = Elements.begin(); return changed; } bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const { return intersectWithComplement(*RHS); } // Three argument version of intersectWithComplement. // Result of RHS1 & ~RHS2 is stored into this bitmap. void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1, const SparseBitVector<ElementSize> &RHS2) { Elements.clear(); CurrElementIter = Elements.begin(); ElementListConstIter Iter1 = RHS1.Elements.begin(); ElementListConstIter Iter2 = RHS2.Elements.begin(); // If RHS1 is empty, we are done // If RHS2 is empty, we still have to copy RHS1 if (RHS1.Elements.empty()) return; // Loop through, intersecting as we go, erasing elements when necessary. while (Iter2 != RHS2.Elements.end()) { if (Iter1 == RHS1.Elements.end()) return; if (Iter1->index() > Iter2->index()) { ++Iter2; } else if (Iter1->index() == Iter2->index()) { bool BecameZero = false; SparseBitVectorElement<ElementSize> *NewElement = new SparseBitVectorElement<ElementSize>(Iter1->index()); NewElement->intersectWithComplement(*Iter1, *Iter2, BecameZero); if (!BecameZero) { Elements.push_back(NewElement); } else delete NewElement; ++Iter1; ++Iter2; } else { SparseBitVectorElement<ElementSize> *NewElement = new SparseBitVectorElement<ElementSize>(*Iter1); Elements.push_back(NewElement); ++Iter1; } } // copy the remaining elements while (Iter1 != RHS1.Elements.end()) { SparseBitVectorElement<ElementSize> *NewElement = new SparseBitVectorElement<ElementSize>(*Iter1); Elements.push_back(NewElement); ++Iter1; } return; } void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1, const SparseBitVector<ElementSize> *RHS2) { intersectWithComplement(*RHS1, *RHS2); } bool intersects(const SparseBitVector<ElementSize> *RHS) const { return intersects(*RHS); } // Return true if we share any bits in common with RHS bool intersects(const SparseBitVector<ElementSize> &RHS) const { ElementListConstIter Iter1 = Elements.begin(); ElementListConstIter Iter2 = RHS.Elements.begin(); // Check if both bitmaps are empty. if (Elements.empty() && RHS.Elements.empty()) return false; // Loop through, intersecting stopping when we hit bits in common. while (Iter2 != RHS.Elements.end()) { if (Iter1 == Elements.end()) return false; if (Iter1->index() > Iter2->index()) { ++Iter2; } else if (Iter1->index() == Iter2->index()) { if (Iter1->intersects(*Iter2)) return true; ++Iter1; ++Iter2; } else { ++Iter1; } } return false; } // Return true iff all bits set in this SparseBitVector are // also set in RHS. bool contains(const SparseBitVector<ElementSize> &RHS) const { SparseBitVector<ElementSize> Result(*this); Result &= RHS; return (Result == RHS); } // Return the first set bit in the bitmap. Return -1 if no bits are set. int find_first() const { if (Elements.empty()) return -1; const SparseBitVectorElement<ElementSize> &First = *(Elements.begin()); return (First.index() * ElementSize) + First.find_first(); } // Return true if the SparseBitVector is empty bool empty() const { return Elements.empty(); } unsigned count() const { unsigned BitCount = 0; for (ElementListConstIter Iter = Elements.begin(); Iter != Elements.end(); ++Iter) BitCount += Iter->count(); return BitCount; } iterator begin() const { return iterator(this); } iterator end() const { return iterator(this, true); } }; // Convenience functions to allow Or and And without dereferencing in the user // code. template <unsigned ElementSize> inline bool operator |=(SparseBitVector<ElementSize> &LHS, const SparseBitVector<ElementSize> *RHS) { return LHS |= *RHS; } template <unsigned ElementSize> inline bool operator |=(SparseBitVector<ElementSize> *LHS, const SparseBitVector<ElementSize> &RHS) { return LHS->operator|=(RHS); } template <unsigned ElementSize> inline bool operator &=(SparseBitVector<ElementSize> *LHS, const SparseBitVector<ElementSize> &RHS) { return LHS->operator&=(RHS); } template <unsigned ElementSize> inline bool operator &=(SparseBitVector<ElementSize> &LHS, const SparseBitVector<ElementSize> *RHS) { return LHS &= *RHS; } // Convenience functions for infix union, intersection, difference operators. template <unsigned ElementSize> inline SparseBitVector<ElementSize> operator|(const SparseBitVector<ElementSize> &LHS, const SparseBitVector<ElementSize> &RHS) { SparseBitVector<ElementSize> Result(LHS); Result |= RHS; return Result; } template <unsigned ElementSize> inline SparseBitVector<ElementSize> operator&(const SparseBitVector<ElementSize> &LHS, const SparseBitVector<ElementSize> &RHS) { SparseBitVector<ElementSize> Result(LHS); Result &= RHS; return Result; } template <unsigned ElementSize> inline SparseBitVector<ElementSize> operator-(const SparseBitVector<ElementSize> &LHS, const SparseBitVector<ElementSize> &RHS) { SparseBitVector<ElementSize> Result; Result.intersectWithComplement(LHS, RHS); return Result; } // Dump a SparseBitVector to a stream template <unsigned ElementSize> void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) { out << "["; typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(), be = LHS.end(); if (bi != be) { out << *bi; for (++bi; bi != be; ++bi) { out << " " << *bi; } } out << "]\n"; } } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/edit_distance.h
//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a Levenshtein distance function that works for any two // sequences, with each element of each sequence being analogous to a character // in a string. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_EDIT_DISTANCE_H #define LLVM_ADT_EDIT_DISTANCE_H #include "llvm/ADT/ArrayRef.h" #include <algorithm> #include <memory> namespace llvm { /// \brief Determine the edit distance between two sequences. /// /// \param FromArray the first sequence to compare. /// /// \param ToArray the second sequence to compare. /// /// \param AllowReplacements whether to allow element replacements (change one /// element into another) as a single operation, rather than as two operations /// (an insertion and a removal). /// /// \param MaxEditDistance If non-zero, the maximum edit distance that this /// routine is allowed to compute. If the edit distance will exceed that /// maximum, returns \c MaxEditDistance+1. /// /// \returns the minimum number of element insertions, removals, or (if /// \p AllowReplacements is \c true) replacements needed to transform one of /// the given sequences into the other. If zero, the sequences are identical. template<typename T> unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray, bool AllowReplacements = true, unsigned MaxEditDistance = 0) { // The algorithm implemented below is the "classic" // dynamic-programming algorithm for computing the Levenshtein // distance, which is described here: // // http://en.wikipedia.org/wiki/Levenshtein_distance // // Although the algorithm is typically described using an m x n // array, only one row plus one element are used at a time, so this // implementation just keeps one vector for the row. To update one entry, // only the entries to the left, top, and top-left are needed. The left // entry is in Row[x-1], the top entry is what's in Row[x] from the last // iteration, and the top-left entry is stored in Previous. typename ArrayRef<T>::size_type m = FromArray.size(); typename ArrayRef<T>::size_type n = ToArray.size(); const unsigned SmallBufferSize = 64; unsigned SmallBuffer[SmallBufferSize]; std::unique_ptr<unsigned[]> Allocated; unsigned *Row = SmallBuffer; if (n + 1 > SmallBufferSize) { Row = new unsigned[n + 1]; Allocated.reset(Row); } for (unsigned i = 1; i <= n; ++i) Row[i] = i; for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) { Row[0] = y; unsigned BestThisRow = Row[0]; unsigned Previous = y - 1; for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) { int OldRow = Row[x]; if (AllowReplacements) { Row[x] = std::min( Previous + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u), std::min(Row[x-1], Row[x])+1); } else { if (FromArray[y-1] == ToArray[x-1]) Row[x] = Previous; else Row[x] = std::min(Row[x-1], Row[x]) + 1; } Previous = OldRow; BestThisRow = std::min(BestThisRow, Row[x]); } if (MaxEditDistance && BestThisRow > MaxEditDistance) return MaxEditDistance + 1; } #pragma warning( push ) // HLSL Change - suppress this warning #pragma warning( disable : 28199 ) // 'Using possibly uninitialized memory '*Row': The variable has had its address taken but no assignment to it has been discovered.' // n is assigned early on and is never < 1 because it's an array size unsigned Result = Row[n]; #pragma warning( pop ) return Result; } } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/PostOrderIterator.h
//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file builds on the ADT/GraphTraits.h file to build a generic graph // post order iterator. This should work over any graph type that has a // GraphTraits specialization. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_POSTORDERITERATOR_H #define LLVM_ADT_POSTORDERITERATOR_H #include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/iterator_range.h" #include <set> #include <vector> namespace llvm { // The po_iterator_storage template provides access to the set of already // visited nodes during the po_iterator's depth-first traversal. // // The default implementation simply contains a set of visited nodes, while // the Extended=true version uses a reference to an external set. // // It is possible to prune the depth-first traversal in several ways: // // - When providing an external set that already contains some graph nodes, // those nodes won't be visited again. This is useful for restarting a // post-order traversal on a graph with nodes that aren't dominated by a // single node. // // - By providing a custom SetType class, unwanted graph nodes can be excluded // by having the insert() function return false. This could for example // confine a CFG traversal to blocks in a specific loop. // // - Finally, by specializing the po_iterator_storage template itself, graph // edges can be pruned by returning false in the insertEdge() function. This // could be used to remove loop back-edges from the CFG seen by po_iterator. // // A specialized po_iterator_storage class can observe both the pre-order and // the post-order. The insertEdge() function is called in a pre-order, while // the finishPostorder() function is called just before the po_iterator moves // on to the next node. /// Default po_iterator_storage implementation with an internal set object. template<class SetType, bool External> class po_iterator_storage { SetType Visited; public: // Return true if edge destination should be visited. template<typename NodeType> bool insertEdge(NodeType *From, NodeType *To) { return Visited.insert(To).second; } // Called after all children of BB have been visited. template<typename NodeType> void finishPostorder(NodeType *BB) {} }; /// Specialization of po_iterator_storage that references an external set. template<class SetType> class po_iterator_storage<SetType, true> { SetType &Visited; public: po_iterator_storage(SetType &VSet) : Visited(VSet) {} po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {} // Return true if edge destination should be visited, called with From = 0 for // the root node. // Graph edges can be pruned by specializing this function. template <class NodeType> bool insertEdge(NodeType *From, NodeType *To) { return Visited.insert(To).second; } // Called after all children of BB have been visited. template<class NodeType> void finishPostorder(NodeType *BB) {} }; template<class GraphT, class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>, bool ExtStorage = false, class GT = GraphTraits<GraphT> > class po_iterator : public po_iterator_storage<SetType, ExtStorage> { public: using iterator_category = std::forward_iterator_tag; using value_type = typename GT::NodeType; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; typedef typename GT::NodeType NodeType; typedef typename GT::ChildIteratorType ChildItTy; // VisitStack - Used to maintain the ordering. Top = current block // First element is basic block pointer, second is the 'next child' to visit std::vector<std::pair<NodeType *, ChildItTy> > VisitStack; void traverseChild() { while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) { NodeType *BB = *VisitStack.back().second++; if (this->insertEdge(VisitStack.back().first, BB)) { // If the block is not visited... VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB))); } } } po_iterator(NodeType *BB) { this->insertEdge((NodeType*)nullptr, BB); VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB))); traverseChild(); } po_iterator() {} // End is when stack is empty. po_iterator(NodeType *BB, SetType &S) : po_iterator_storage<SetType, ExtStorage>(S) { if (this->insertEdge((NodeType*)nullptr, BB)) { VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB))); traverseChild(); } } po_iterator(SetType &S) : po_iterator_storage<SetType, ExtStorage>(S) { } // End is when stack is empty. public: // Provide static "constructors"... static po_iterator begin(GraphT G) { return po_iterator(GT::getEntryNode(G)); } static po_iterator end(GraphT G) { return po_iterator(); } static po_iterator begin(GraphT G, SetType &S) { return po_iterator(GT::getEntryNode(G), S); } static po_iterator end(GraphT G, SetType &S) { return po_iterator(S); } bool operator==(const po_iterator &x) const { return VisitStack == x.VisitStack; } bool operator!=(const po_iterator &x) const { return !(*this == x); } pointer operator*() const { return VisitStack.back().first; } // This is a nonstandard operator-> that dereferences the pointer an extra // time... so that you can actually call methods ON the BasicBlock, because // the contained type is a pointer. This allows BBIt->getTerminator() f.e. // NodeType *operator->() const { return **this; } po_iterator &operator++() { // Preincrement this->finishPostorder(VisitStack.back().first); VisitStack.pop_back(); if (!VisitStack.empty()) traverseChild(); return *this; } po_iterator operator++(int) { // Postincrement po_iterator tmp = *this; ++*this; return tmp; } }; // Provide global constructors that automatically figure out correct types... // template <class T> po_iterator<T> po_begin(const T &G) { return po_iterator<T>::begin(G); } template <class T> po_iterator<T> po_end (const T &G) { return po_iterator<T>::end(G); } template <class T> iterator_range<po_iterator<T>> post_order(const T &G) { return make_range(po_begin(G), po_end(G)); } // Provide global definitions of external postorder iterators... template<class T, class SetType=std::set<typename GraphTraits<T>::NodeType*> > struct po_ext_iterator : public po_iterator<T, SetType, true> { po_ext_iterator(const po_iterator<T, SetType, true> &V) : po_iterator<T, SetType, true>(V) {} }; template<class T, class SetType> po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) { return po_ext_iterator<T, SetType>::begin(G, S); } template<class T, class SetType> po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) { return po_ext_iterator<T, SetType>::end(G, S); } template <class T, class SetType> iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &S) { return make_range(po_ext_begin(G, S), po_ext_end(G, S)); } // Provide global definitions of inverse post order iterators... template <class T, class SetType = std::set<typename GraphTraits<T>::NodeType*>, bool External = false> struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External > { ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) : po_iterator<Inverse<T>, SetType, External> (V) {} }; template <class T> ipo_iterator<T> ipo_begin(const T &G, bool Reverse = false) { return ipo_iterator<T>::begin(G, Reverse); } template <class T> ipo_iterator<T> ipo_end(const T &G){ return ipo_iterator<T>::end(G); } template <class T> iterator_range<ipo_iterator<T>> inverse_post_order(const T &G, bool Reverse = false) { return make_range(ipo_begin(G, Reverse), ipo_end(G)); } // Provide global definitions of external inverse postorder iterators... template <class T, class SetType = std::set<typename GraphTraits<T>::NodeType*> > struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> { ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) : ipo_iterator<T, SetType, true>(V) {} ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) : ipo_iterator<T, SetType, true>(V) {} }; template <class T, class SetType> ipo_ext_iterator<T, SetType> ipo_ext_begin(const T &G, SetType &S) { return ipo_ext_iterator<T, SetType>::begin(G, S); } template <class T, class SetType> ipo_ext_iterator<T, SetType> ipo_ext_end(const T &G, SetType &S) { return ipo_ext_iterator<T, SetType>::end(G, S); } template <class T, class SetType> iterator_range<ipo_ext_iterator<T, SetType>> inverse_post_order_ext(const T &G, SetType &S) { return make_range(ipo_ext_begin(G, S), ipo_ext_end(G, S)); } //===--------------------------------------------------------------------===// // Reverse Post Order CFG iterator code //===--------------------------------------------------------------------===// // // This is used to visit basic blocks in a method in reverse post order. This // class is awkward to use because I don't know a good incremental algorithm to // computer RPO from a graph. Because of this, the construction of the // ReversePostOrderTraversal object is expensive (it must walk the entire graph // with a postorder iterator to build the data structures). The moral of this // story is: Don't create more ReversePostOrderTraversal classes than necessary. // // This class should be used like this: // { // ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create // for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) { // ... // } // for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) { // ... // } // } // template<class GraphT, class GT = GraphTraits<GraphT> > class ReversePostOrderTraversal { typedef typename GT::NodeType NodeType; std::vector<NodeType*> Blocks; // Block list in normal PO order void Initialize(NodeType *BB) { std::copy(po_begin(BB), po_end(BB), std::back_inserter(Blocks)); } public: typedef typename std::vector<NodeType*>::reverse_iterator rpo_iterator; ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); } // Because we want a reverse post order, use reverse iterators from the vector rpo_iterator begin() { return Blocks.rbegin(); } rpo_iterator end() { return Blocks.rend(); } }; } // End llvm namespace #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/StringMap.h
//===--- StringMap.h - String Hash table map interface ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the StringMap class. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_STRINGMAP_H #define LLVM_ADT_STRINGMAP_H #include "llvm/ADT/StringRef.h" #include "llvm/Support/Allocator.h" #include <cstring> #include <utility> namespace llvm { template<typename ValueT> class StringMapConstIterator; template<typename ValueT> class StringMapIterator; template<typename ValueTy> class StringMapEntry; /// StringMapEntryBase - Shared base class of StringMapEntry instances. class StringMapEntryBase { unsigned StrLen; public: explicit StringMapEntryBase(unsigned Len) : StrLen(Len) {} unsigned getKeyLength() const { return StrLen; } }; /// StringMapImpl - This is the base class of StringMap that is shared among /// all of its instantiations. class StringMapImpl { protected: // Array of NumBuckets pointers to entries, null pointers are holes. // TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed // by an array of the actual hash values as unsigned integers. StringMapEntryBase **TheTable; unsigned NumBuckets; unsigned NumItems; unsigned NumTombstones; unsigned ItemSize; protected: explicit StringMapImpl(unsigned itemSize) : TheTable(nullptr), // Initialize the map with zero buckets to allocation. NumBuckets(0), NumItems(0), NumTombstones(0), ItemSize(itemSize) {} StringMapImpl(StringMapImpl &&RHS) : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets), NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones), ItemSize(RHS.ItemSize) { RHS.TheTable = nullptr; RHS.NumBuckets = 0; RHS.NumItems = 0; RHS.NumTombstones = 0; } StringMapImpl(unsigned InitSize, unsigned ItemSize); unsigned RehashTable(unsigned BucketNo = 0); /// LookupBucketFor - Look up the bucket that the specified string should end /// up in. If it already exists as a key in the map, the Item pointer for the /// specified bucket will be non-null. Otherwise, it will be null. In either /// case, the FullHashValue field of the bucket will be set to the hash value /// of the string. unsigned LookupBucketFor(StringRef Key); /// FindKey - Look up the bucket that contains the specified key. If it exists /// in the map, return the bucket number of the key. Otherwise return -1. /// This does not modify the map. int FindKey(StringRef Key) const; /// RemoveKey - Remove the specified StringMapEntry from the table, but do not /// delete it. This aborts if the value isn't in the table. void RemoveKey(StringMapEntryBase *V); /// RemoveKey - Remove the StringMapEntry for the specified key from the /// table, returning it. If the key is not in the table, this returns null. StringMapEntryBase *RemoveKey(StringRef Key); private: void init(unsigned Size); public: static StringMapEntryBase *getTombstoneVal() { return (StringMapEntryBase*)-1; } unsigned getNumBuckets() const { return NumBuckets; } unsigned getNumItems() const { return NumItems; } bool empty() const { return NumItems == 0; } unsigned size() const { return NumItems; } void swap(StringMapImpl &Other) { std::swap(TheTable, Other.TheTable); std::swap(NumBuckets, Other.NumBuckets); std::swap(NumItems, Other.NumItems); std::swap(NumTombstones, Other.NumTombstones); } }; /// StringMapEntry - This is used to represent one value that is inserted into /// a StringMap. It contains the Value itself and the key: the string length /// and data. template<typename ValueTy> class StringMapEntry : public StringMapEntryBase { StringMapEntry(StringMapEntry &E) = delete; public: ValueTy second; explicit StringMapEntry(unsigned strLen) : StringMapEntryBase(strLen), second() {} template <class InitTy> StringMapEntry(unsigned strLen, InitTy &&V) : StringMapEntryBase(strLen), second(std::forward<InitTy>(V)) {} StringRef getKey() const { return StringRef(getKeyData(), getKeyLength()); } const ValueTy &getValue() const { return second; } ValueTy &getValue() { return second; } void setValue(const ValueTy &V) { second = V; } /// getKeyData - Return the start of the string data that is the key for this /// value. The string data is always stored immediately after the /// StringMapEntry object. const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);} StringRef first() const { return StringRef(getKeyData(), getKeyLength()); } /// Create - Create a StringMapEntry for the specified key and default /// construct the value. template <typename AllocatorTy, typename InitType> static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator, InitType &&InitVal) { unsigned KeyLength = Key.size(); // Allocate a new item with space for the string at the end and a null // terminator. unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+ KeyLength+1; unsigned Alignment = alignOf<StringMapEntry>(); StringMapEntry *NewItem = static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment)); // Default construct the value. new (NewItem) StringMapEntry(KeyLength, std::forward<InitType>(InitVal)); // Copy the string information. char *StrBuffer = const_cast<char*>(NewItem->getKeyData()); if (KeyLength > 0) memcpy(StrBuffer, Key.data(), KeyLength); StrBuffer[KeyLength] = 0; // Null terminate for convenience of clients. return NewItem; } template<typename AllocatorTy> static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator) { return Create(Key, Allocator, ValueTy()); } /// Create - Create a StringMapEntry with normal malloc/free. template<typename InitType> static StringMapEntry *Create(StringRef Key, InitType &&InitVal) { MallocAllocator A; return Create(Key, A, std::forward<InitType>(InitVal)); } static StringMapEntry *Create(StringRef Key) { return Create(Key, ValueTy()); } /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded /// into a StringMapEntry, return the StringMapEntry itself. static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) { char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>); return *reinterpret_cast<StringMapEntry*>(Ptr); } /// Destroy - Destroy this StringMapEntry, releasing memory back to the /// specified allocator. template<typename AllocatorTy> void Destroy(AllocatorTy &Allocator) { // Free memory referenced by the item. unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry)) + getKeyLength() + 1; this->~StringMapEntry(); Allocator.Deallocate(static_cast<void *>(this), AllocSize); } /// Destroy this object, releasing memory back to the malloc allocator. void Destroy() { MallocAllocator A; Destroy(A); } }; /// StringMap - This is an unconventional map that is specialized for handling /// keys that are "strings", which are basically ranges of bytes. This does some /// funky memory allocation and hashing things to make it extremely efficient, /// storing the string data *after* the value in the map. template<typename ValueTy, typename AllocatorTy = MallocAllocator> class StringMap : public StringMapImpl { AllocatorTy Allocator; public: typedef StringMapEntry<ValueTy> MapEntryTy; StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {} explicit StringMap(unsigned InitialSize) : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {} explicit StringMap(AllocatorTy A) : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {} StringMap(unsigned InitialSize, AllocatorTy A) : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {} StringMap(StringMap &&RHS) : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {} StringMap &operator=(StringMap RHS) { StringMapImpl::swap(RHS); std::swap(Allocator, RHS.Allocator); return *this; } // FIXME: Implement copy operations if/when they're needed. AllocatorTy &getAllocator() { return Allocator; } const AllocatorTy &getAllocator() const { return Allocator; } typedef const char* key_type; typedef ValueTy mapped_type; typedef StringMapEntry<ValueTy> value_type; typedef size_t size_type; typedef StringMapConstIterator<ValueTy> const_iterator; typedef StringMapIterator<ValueTy> iterator; iterator begin() { return iterator(TheTable, NumBuckets == 0); } iterator end() { return iterator(TheTable+NumBuckets, true); } const_iterator begin() const { return const_iterator(TheTable, NumBuckets == 0); } const_iterator end() const { return const_iterator(TheTable+NumBuckets, true); } iterator find(StringRef Key) { int Bucket = FindKey(Key); if (Bucket == -1) return end(); return iterator(TheTable+Bucket, true); } const_iterator find(StringRef Key) const { int Bucket = FindKey(Key); if (Bucket == -1) return end(); return const_iterator(TheTable+Bucket, true); } /// lookup - Return the entry for the specified key, or a default /// constructed value if no such entry exists. ValueTy lookup(StringRef Key) const { const_iterator it = find(Key); if (it != end()) return it->second; return ValueTy(); } ValueTy &operator[](StringRef Key) { return insert(std::make_pair(Key, ValueTy())).first->second; } /// count - Return 1 if the element is in the map, 0 otherwise. size_type count(StringRef Key) const { return find(Key) == end() ? 0 : 1; } /// insert - Insert the specified key/value pair into the map. If the key /// already exists in the map, return false and ignore the request, otherwise /// insert it and return true. bool insert(MapEntryTy *KeyValue) { unsigned BucketNo = LookupBucketFor(KeyValue->getKey()); StringMapEntryBase *&Bucket = TheTable[BucketNo]; if (Bucket && Bucket != getTombstoneVal()) return false; // Already exists in map. if (Bucket == getTombstoneVal()) --NumTombstones; Bucket = KeyValue; ++NumItems; assert(NumItems + NumTombstones <= NumBuckets); RehashTable(); return true; } /// insert - Inserts the specified key/value pair into the map if the key /// isn't already in the map. The bool component of the returned pair is true /// if and only if the insertion takes place, and the iterator component of /// the pair points to the element with key equivalent to the key of the pair. std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) { unsigned BucketNo = LookupBucketFor(KV.first); StringMapEntryBase *&Bucket = TheTable[BucketNo]; if (Bucket && Bucket != getTombstoneVal()) return std::make_pair(iterator(TheTable + BucketNo, false), false); // Already exists in map. if (Bucket == getTombstoneVal()) --NumTombstones; Bucket = MapEntryTy::Create(KV.first, Allocator, std::move(KV.second)); ++NumItems; assert(NumItems + NumTombstones <= NumBuckets); BucketNo = RehashTable(BucketNo); return std::make_pair(iterator(TheTable + BucketNo, false), true); } // clear - Empties out the StringMap void clear() { if (empty()) return; // Zap all values, resetting the keys back to non-present (not tombstone), // which is safe because we're removing all elements. for (unsigned I = 0, E = NumBuckets; I != E; ++I) { StringMapEntryBase *&Bucket = TheTable[I]; if (Bucket && Bucket != getTombstoneVal()) { static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator); } Bucket = nullptr; } NumItems = 0; NumTombstones = 0; } /// remove - Remove the specified key/value pair from the map, but do not /// erase it. This aborts if the key is not in the map. void remove(MapEntryTy *KeyValue) { RemoveKey(KeyValue); } void erase(iterator I) { MapEntryTy &V = *I; remove(&V); V.Destroy(Allocator); } bool erase(StringRef Key) { iterator I = find(Key); if (I == end()) return false; erase(I); return true; } ~StringMap() { // Delete all the elements in the map, but don't reset the elements // to default values. This is a copy of clear(), but avoids unnecessary // work not required in the destructor. if (!empty()) { for (unsigned I = 0, E = NumBuckets; I != E; ++I) { StringMapEntryBase *Bucket = TheTable[I]; if (Bucket && Bucket != getTombstoneVal()) { static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator); } } } ::operator delete(TheTable); // HLSL Change Begin: Use overridable operator delete } }; template<typename ValueTy> class StringMapConstIterator { protected: StringMapEntryBase **Ptr; public: typedef StringMapEntry<ValueTy> value_type; StringMapConstIterator() : Ptr(nullptr) { } explicit StringMapConstIterator(StringMapEntryBase **Bucket, bool NoAdvance = false) : Ptr(Bucket) { if (!NoAdvance) AdvancePastEmptyBuckets(); } const value_type &operator*() const { return *static_cast<StringMapEntry<ValueTy>*>(*Ptr); } const value_type *operator->() const { return static_cast<StringMapEntry<ValueTy>*>(*Ptr); } bool operator==(const StringMapConstIterator &RHS) const { return Ptr == RHS.Ptr; } bool operator!=(const StringMapConstIterator &RHS) const { return Ptr != RHS.Ptr; } inline StringMapConstIterator& operator++() { // Preincrement ++Ptr; AdvancePastEmptyBuckets(); return *this; } StringMapConstIterator operator++(int) { // Postincrement StringMapConstIterator tmp = *this; ++*this; return tmp; } private: void AdvancePastEmptyBuckets() { while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal()) ++Ptr; } }; template<typename ValueTy> class StringMapIterator : public StringMapConstIterator<ValueTy> { public: StringMapIterator() {} explicit StringMapIterator(StringMapEntryBase **Bucket, bool NoAdvance = false) : StringMapConstIterator<ValueTy>(Bucket, NoAdvance) { } StringMapEntry<ValueTy> &operator*() const { return *static_cast<StringMapEntry<ValueTy>*>(*this->Ptr); } StringMapEntry<ValueTy> *operator->() const { return static_cast<StringMapEntry<ValueTy>*>(*this->Ptr); } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/DAGDeltaAlgorithm.h
//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_DAGDELTAALGORITHM_H #define LLVM_ADT_DAGDELTAALGORITHM_H #include <set> #include <vector> namespace llvm { /// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing /// directed acyclic graphs using a predicate function. /// /// The result of the algorithm is a subset of the input change set which is /// guaranteed to satisfy the predicate, assuming that the input set did. For /// well formed predicates, the result set is guaranteed to be such that /// removing any single element not required by the dependencies on the other /// elements would falsify the predicate. /// /// The DAG should be used to represent dependencies in the changes which are /// likely to hold across the predicate function. That is, for a particular /// changeset S and predicate P: /// /// P(S) => P(S union pred(S)) /// /// The minization algorithm uses this dependency information to attempt to /// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG /// is not required to satisfy this property, but the algorithm will run /// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm /// for more information on the properties which the predicate function itself /// should satisfy. class DAGDeltaAlgorithm { virtual void anchor(); public: typedef unsigned change_ty; typedef std::pair<change_ty, change_ty> edge_ty; // FIXME: Use a decent data structure. typedef std::set<change_ty> changeset_ty; typedef std::vector<changeset_ty> changesetlist_ty; public: virtual ~DAGDeltaAlgorithm() {} /// Run - Minimize the DAG formed by the \p Changes vertices and the /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of /// changes and returning the smallest set which still satisfies the test /// predicate and the input \p Dependencies. /// /// \param Changes The list of changes. /// /// \param Dependencies The list of dependencies amongst changes. For each /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The /// minimization algorithm guarantees that for each tested changed set S, /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic /// dependencies. changeset_ty Run(const changeset_ty &Changes, const std::vector<edge_ty> &Dependencies); /// UpdatedSearchState - Callback used when the search state changes. virtual void UpdatedSearchState(const changeset_ty &Changes, const changesetlist_ty &Sets, const changeset_ty &Required) {} /// ExecuteOneTest - Execute a single test predicate on the change set \p S. virtual bool ExecuteOneTest(const changeset_ty &S) = 0; }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/PointerUnion.h
//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PointerUnion class, which is a discriminated union of // pointer types. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_POINTERUNION_H #define LLVM_ADT_POINTERUNION_H #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/Compiler.h" namespace llvm { template <typename T> struct PointerUnionTypeSelectorReturn { typedef T Return; }; /// \brief Get a type based on whether two types are the same or not. For: /// @code /// typedef typename PointerUnionTypeSelector<T1, T2, EQ, NE>::Return Ret; /// @endcode /// Ret will be EQ type if T1 is same as T2 or NE type otherwise. template <typename T1, typename T2, typename RET_EQ, typename RET_NE> struct PointerUnionTypeSelector { typedef typename PointerUnionTypeSelectorReturn<RET_NE>::Return Return; }; template <typename T, typename RET_EQ, typename RET_NE> struct PointerUnionTypeSelector<T, T, RET_EQ, RET_NE> { typedef typename PointerUnionTypeSelectorReturn<RET_EQ>::Return Return; }; template <typename T1, typename T2, typename RET_EQ, typename RET_NE> struct PointerUnionTypeSelectorReturn< PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE> > { typedef typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return Return; }; /// Provide PointerLikeTypeTraits for void* that is used by PointerUnion /// for the two template arguments. template <typename PT1, typename PT2> class PointerUnionUIntTraits { public: static inline void *getAsVoidPointer(void *P) { return P; } static inline void *getFromVoidPointer(void *P) { return P; } enum { PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable), PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable), NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv }; }; /// PointerUnion - This implements a discriminated union of two pointer types, /// and keeps the discriminator bit-mangled into the low bits of the pointer. /// This allows the implementation to be extremely efficient in space, but /// permits a very natural and type-safe API. /// /// Common use patterns would be something like this: /// PointerUnion<int*, float*> P; /// P = (int*)0; /// printf("%d %d", P.is<int*>(), P.is<float*>()); // prints "1 0" /// X = P.get<int*>(); // ok. /// Y = P.get<float*>(); // runtime assertion failure. /// Z = P.get<double*>(); // compile time failure. /// P = (float*)0; /// Y = P.get<float*>(); // ok. /// X = P.get<int*>(); // runtime assertion failure. template <typename PT1, typename PT2> class PointerUnion { public: typedef PointerIntPair<void*, 1, bool, PointerUnionUIntTraits<PT1,PT2> > ValTy; private: ValTy Val; struct IsPT1 { static const int Num = 0; }; struct IsPT2 { static const int Num = 1; }; template <typename T> struct UNION_DOESNT_CONTAIN_TYPE { }; public: PointerUnion() {} PointerUnion(PT1 V) : Val( const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(V))) { } PointerUnion(PT2 V) : Val( const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(V)), 1) { } /// isNull - Return true if the pointer held in the union is null, /// regardless of which type it is. bool isNull() const { // Convert from the void* to one of the pointer types, to make sure that // we recursively strip off low bits if we have a nested PointerUnion. return !PointerLikeTypeTraits<PT1>::getFromVoidPointer(Val.getPointer()); } explicit operator bool() const { return !isNull(); } /// is<T>() return true if the Union currently holds the type matching T. template<typename T> int is() const { typedef typename ::llvm::PointerUnionTypeSelector<PT1, T, IsPT1, ::llvm::PointerUnionTypeSelector<PT2, T, IsPT2, UNION_DOESNT_CONTAIN_TYPE<T> > >::Return Ty; int TyNo = Ty::Num; return static_cast<int>(Val.getInt()) == TyNo; } /// get<T>() - Return the value of the specified pointer type. If the /// specified pointer type is incorrect, assert. template<typename T> T get() const { assert(is<T>() && "Invalid accessor called"); return PointerLikeTypeTraits<T>::getFromVoidPointer(Val.getPointer()); } /// dyn_cast<T>() - If the current value is of the specified pointer type, /// return it, otherwise return null. template<typename T> T dyn_cast() const { if (is<T>()) return get<T>(); return T(); } /// \brief If the union is set to the first pointer type get an address /// pointing to it. PT1 const *getAddrOfPtr1() const { return const_cast<PointerUnion *>(this)->getAddrOfPtr1(); } /// \brief If the union is set to the first pointer type get an address /// pointing to it. PT1 *getAddrOfPtr1() { assert(is<PT1>() && "Val is not the first pointer"); assert(get<PT1>() == Val.getPointer() && "Can't get the address because PointerLikeTypeTraits changes the ptr"); return const_cast<PT1 *>( reinterpret_cast<const PT1 *>(Val.getAddrOfPointer())); } /// \brief Assignment from nullptr which just clears the union. const PointerUnion &operator=(std::nullptr_t) { Val.initWithPointer(nullptr); return *this; } /// Assignment operators - Allow assigning into this union from either /// pointer type, setting the discriminator to remember what it came from. const PointerUnion &operator=(const PT1 &RHS) { Val.initWithPointer( const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(RHS))); return *this; } const PointerUnion &operator=(const PT2 &RHS) { Val.setPointerAndInt( const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(RHS)), 1); return *this; } void *getOpaqueValue() const { return Val.getOpaqueValue(); } static inline PointerUnion getFromOpaqueValue(void *VP) { PointerUnion V; V.Val = ValTy::getFromOpaqueValue(VP); return V; } }; template<typename PT1, typename PT2> static bool operator==(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) { return lhs.getOpaqueValue() == rhs.getOpaqueValue(); } template<typename PT1, typename PT2> static bool operator!=(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) { return lhs.getOpaqueValue() != rhs.getOpaqueValue(); } template<typename PT1, typename PT2> static bool operator<(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) { return lhs.getOpaqueValue() < rhs.getOpaqueValue(); } // Teach SmallPtrSet that PointerUnion is "basically a pointer", that has // # low bits available = min(PT1bits,PT2bits)-1. template<typename PT1, typename PT2> class PointerLikeTypeTraits<PointerUnion<PT1, PT2> > { public: static inline void * getAsVoidPointer(const PointerUnion<PT1, PT2> &P) { return P.getOpaqueValue(); } static inline PointerUnion<PT1, PT2> getFromVoidPointer(void *P) { return PointerUnion<PT1, PT2>::getFromOpaqueValue(P); } // The number of bits available are the min of the two pointer types. enum { NumLowBitsAvailable = PointerLikeTypeTraits<typename PointerUnion<PT1,PT2>::ValTy> ::NumLowBitsAvailable }; }; /// PointerUnion3 - This is a pointer union of three pointer types. See /// documentation for PointerUnion for usage. template <typename PT1, typename PT2, typename PT3> class PointerUnion3 { public: typedef PointerUnion<PT1, PT2> InnerUnion; typedef PointerUnion<InnerUnion, PT3> ValTy; private: ValTy Val; struct IsInnerUnion { ValTy Val; IsInnerUnion(ValTy val) : Val(val) { } template<typename T> int is() const { return Val.template is<InnerUnion>() && Val.template get<InnerUnion>().template is<T>(); } template<typename T> T get() const { return Val.template get<InnerUnion>().template get<T>(); } }; struct IsPT3 { ValTy Val; IsPT3(ValTy val) : Val(val) { } template<typename T> int is() const { return Val.template is<T>(); } template<typename T> T get() const { return Val.template get<T>(); } }; public: PointerUnion3() {} PointerUnion3(PT1 V) { Val = InnerUnion(V); } PointerUnion3(PT2 V) { Val = InnerUnion(V); } PointerUnion3(PT3 V) { Val = V; } /// isNull - Return true if the pointer held in the union is null, /// regardless of which type it is. bool isNull() const { return Val.isNull(); } explicit operator bool() const { return !isNull(); } /// is<T>() return true if the Union currently holds the type matching T. template<typename T> int is() const { // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3. typedef typename ::llvm::PointerUnionTypeSelector<PT1, T, IsInnerUnion, ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3 > >::Return Ty; return Ty(Val).template is<T>(); } /// get<T>() - Return the value of the specified pointer type. If the /// specified pointer type is incorrect, assert. template<typename T> T get() const { assert(is<T>() && "Invalid accessor called"); // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3. typedef typename ::llvm::PointerUnionTypeSelector<PT1, T, IsInnerUnion, ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3 > >::Return Ty; return Ty(Val).template get<T>(); } /// dyn_cast<T>() - If the current value is of the specified pointer type, /// return it, otherwise return null. template<typename T> T dyn_cast() const { if (is<T>()) return get<T>(); return T(); } /// \brief Assignment from nullptr which just clears the union. const PointerUnion3 &operator=(std::nullptr_t) { Val = nullptr; return *this; } /// Assignment operators - Allow assigning into this union from either /// pointer type, setting the discriminator to remember what it came from. const PointerUnion3 &operator=(const PT1 &RHS) { Val = InnerUnion(RHS); return *this; } const PointerUnion3 &operator=(const PT2 &RHS) { Val = InnerUnion(RHS); return *this; } const PointerUnion3 &operator=(const PT3 &RHS) { Val = RHS; return *this; } void *getOpaqueValue() const { return Val.getOpaqueValue(); } static inline PointerUnion3 getFromOpaqueValue(void *VP) { PointerUnion3 V; V.Val = ValTy::getFromOpaqueValue(VP); return V; } }; // Teach SmallPtrSet that PointerUnion3 is "basically a pointer", that has // # low bits available = min(PT1bits,PT2bits,PT2bits)-2. template<typename PT1, typename PT2, typename PT3> class PointerLikeTypeTraits<PointerUnion3<PT1, PT2, PT3> > { public: static inline void * getAsVoidPointer(const PointerUnion3<PT1, PT2, PT3> &P) { return P.getOpaqueValue(); } static inline PointerUnion3<PT1, PT2, PT3> getFromVoidPointer(void *P) { return PointerUnion3<PT1, PT2, PT3>::getFromOpaqueValue(P); } // The number of bits available are the min of the two pointer types. enum { NumLowBitsAvailable = PointerLikeTypeTraits<typename PointerUnion3<PT1, PT2, PT3>::ValTy> ::NumLowBitsAvailable }; }; /// PointerUnion4 - This is a pointer union of four pointer types. See /// documentation for PointerUnion for usage. template <typename PT1, typename PT2, typename PT3, typename PT4> class PointerUnion4 { public: typedef PointerUnion<PT1, PT2> InnerUnion1; typedef PointerUnion<PT3, PT4> InnerUnion2; typedef PointerUnion<InnerUnion1, InnerUnion2> ValTy; private: ValTy Val; public: PointerUnion4() {} PointerUnion4(PT1 V) { Val = InnerUnion1(V); } PointerUnion4(PT2 V) { Val = InnerUnion1(V); } PointerUnion4(PT3 V) { Val = InnerUnion2(V); } PointerUnion4(PT4 V) { Val = InnerUnion2(V); } /// isNull - Return true if the pointer held in the union is null, /// regardless of which type it is. bool isNull() const { return Val.isNull(); } explicit operator bool() const { return !isNull(); } /// is<T>() return true if the Union currently holds the type matching T. template<typename T> int is() const { // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2. typedef typename ::llvm::PointerUnionTypeSelector<PT1, T, InnerUnion1, ::llvm::PointerUnionTypeSelector<PT2, T, InnerUnion1, InnerUnion2 > >::Return Ty; return Val.template is<Ty>() && Val.template get<Ty>().template is<T>(); } /// get<T>() - Return the value of the specified pointer type. If the /// specified pointer type is incorrect, assert. template<typename T> T get() const { assert(is<T>() && "Invalid accessor called"); // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2. typedef typename ::llvm::PointerUnionTypeSelector<PT1, T, InnerUnion1, ::llvm::PointerUnionTypeSelector<PT2, T, InnerUnion1, InnerUnion2 > >::Return Ty; return Val.template get<Ty>().template get<T>(); } /// dyn_cast<T>() - If the current value is of the specified pointer type, /// return it, otherwise return null. template<typename T> T dyn_cast() const { if (is<T>()) return get<T>(); return T(); } /// \brief Assignment from nullptr which just clears the union. const PointerUnion4 &operator=(std::nullptr_t) { Val = nullptr; return *this; } /// Assignment operators - Allow assigning into this union from either /// pointer type, setting the discriminator to remember what it came from. const PointerUnion4 &operator=(const PT1 &RHS) { Val = InnerUnion1(RHS); return *this; } const PointerUnion4 &operator=(const PT2 &RHS) { Val = InnerUnion1(RHS); return *this; } const PointerUnion4 &operator=(const PT3 &RHS) { Val = InnerUnion2(RHS); return *this; } const PointerUnion4 &operator=(const PT4 &RHS) { Val = InnerUnion2(RHS); return *this; } void *getOpaqueValue() const { return Val.getOpaqueValue(); } static inline PointerUnion4 getFromOpaqueValue(void *VP) { PointerUnion4 V; V.Val = ValTy::getFromOpaqueValue(VP); return V; } }; // Teach SmallPtrSet that PointerUnion4 is "basically a pointer", that has // # low bits available = min(PT1bits,PT2bits,PT2bits)-2. template<typename PT1, typename PT2, typename PT3, typename PT4> class PointerLikeTypeTraits<PointerUnion4<PT1, PT2, PT3, PT4> > { public: static inline void * getAsVoidPointer(const PointerUnion4<PT1, PT2, PT3, PT4> &P) { return P.getOpaqueValue(); } static inline PointerUnion4<PT1, PT2, PT3, PT4> getFromVoidPointer(void *P) { return PointerUnion4<PT1, PT2, PT3, PT4>::getFromOpaqueValue(P); } // The number of bits available are the min of the two pointer types. enum { NumLowBitsAvailable = PointerLikeTypeTraits<typename PointerUnion4<PT1, PT2, PT3, PT4>::ValTy> ::NumLowBitsAvailable }; }; // Teach DenseMap how to use PointerUnions as keys. template<typename T, typename U> struct DenseMapInfo<PointerUnion<T, U> > { typedef PointerUnion<T, U> Pair; typedef DenseMapInfo<T> FirstInfo; typedef DenseMapInfo<U> SecondInfo; static inline Pair getEmptyKey() { return Pair(FirstInfo::getEmptyKey()); } static inline Pair getTombstoneKey() { return Pair(FirstInfo::getTombstoneKey()); } static unsigned getHashValue(const Pair &PairVal) { intptr_t key = (intptr_t)PairVal.getOpaqueValue(); return DenseMapInfo<intptr_t>::getHashValue(key); } static bool isEqual(const Pair &LHS, const Pair &RHS) { return LHS.template is<T>() == RHS.template is<T>() && (LHS.template is<T>() ? FirstInfo::isEqual(LHS.template get<T>(), RHS.template get<T>()) : SecondInfo::isEqual(LHS.template get<U>(), RHS.template get<U>())); } }; } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/ImmutableSet.h
//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the ImutAVLTree and ImmutableSet classes. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_IMMUTABLESET_H #define LLVM_ADT_IMMUTABLESET_H #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/ErrorHandling.h" #include <cassert> #include <functional> #include <vector> namespace llvm { //===----------------------------------------------------------------------===// // Immutable AVL-Tree Definition. //===----------------------------------------------------------------------===// template <typename ImutInfo> class ImutAVLFactory; template <typename ImutInfo> class ImutIntervalAVLFactory; template <typename ImutInfo> class ImutAVLTreeInOrderIterator; template <typename ImutInfo> class ImutAVLTreeGenericIterator; template <typename ImutInfo > class ImutAVLTree { public: typedef typename ImutInfo::key_type_ref key_type_ref; typedef typename ImutInfo::value_type value_type; typedef typename ImutInfo::value_type_ref value_type_ref; typedef ImutAVLFactory<ImutInfo> Factory; friend class ImutAVLFactory<ImutInfo>; friend class ImutIntervalAVLFactory<ImutInfo>; friend class ImutAVLTreeGenericIterator<ImutInfo>; typedef ImutAVLTreeInOrderIterator<ImutInfo> iterator; //===----------------------------------------------------===// // Public Interface. //===----------------------------------------------------===// /// Return a pointer to the left subtree. This value /// is NULL if there is no left subtree. ImutAVLTree *getLeft() const { return left; } /// Return a pointer to the right subtree. This value is /// NULL if there is no right subtree. ImutAVLTree *getRight() const { return right; } /// getHeight - Returns the height of the tree. A tree with no subtrees /// has a height of 1. unsigned getHeight() const { return height; } /// getValue - Returns the data value associated with the tree node. const value_type& getValue() const { return value; } /// find - Finds the subtree associated with the specified key value. /// This method returns NULL if no matching subtree is found. ImutAVLTree* find(key_type_ref K) { ImutAVLTree *T = this; while (T) { key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue()); if (ImutInfo::isEqual(K,CurrentKey)) return T; else if (ImutInfo::isLess(K,CurrentKey)) T = T->getLeft(); else T = T->getRight(); } return nullptr; } /// getMaxElement - Find the subtree associated with the highest ranged /// key value. ImutAVLTree* getMaxElement() { ImutAVLTree *T = this; ImutAVLTree *Right = T->getRight(); while (Right) { T = Right; Right = T->getRight(); } return T; } /// size - Returns the number of nodes in the tree, which includes /// both leaves and non-leaf nodes. unsigned size() const { unsigned n = 1; if (const ImutAVLTree* L = getLeft()) n += L->size(); if (const ImutAVLTree* R = getRight()) n += R->size(); return n; } /// begin - Returns an iterator that iterates over the nodes of the tree /// in an inorder traversal. The returned iterator thus refers to the /// the tree node with the minimum data element. iterator begin() const { return iterator(this); } /// end - Returns an iterator for the tree that denotes the end of an /// inorder traversal. iterator end() const { return iterator(); } bool isElementEqual(value_type_ref V) const { // Compare the keys. if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()), ImutInfo::KeyOfValue(V))) return false; // Also compare the data values. if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()), ImutInfo::DataOfValue(V))) return false; return true; } bool isElementEqual(const ImutAVLTree* RHS) const { return isElementEqual(RHS->getValue()); } /// isEqual - Compares two trees for structural equality and returns true /// if they are equal. This worst case performance of this operation is // linear in the sizes of the trees. bool isEqual(const ImutAVLTree& RHS) const { if (&RHS == this) return true; iterator LItr = begin(), LEnd = end(); iterator RItr = RHS.begin(), REnd = RHS.end(); while (LItr != LEnd && RItr != REnd) { if (&*LItr == &*RItr) { LItr.skipSubTree(); RItr.skipSubTree(); continue; } if (!LItr->isElementEqual(&*RItr)) return false; ++LItr; ++RItr; } return LItr == LEnd && RItr == REnd; } /// isNotEqual - Compares two trees for structural inequality. Performance /// is the same is isEqual. bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); } /// contains - Returns true if this tree contains a subtree (node) that /// has an data element that matches the specified key. Complexity /// is logarithmic in the size of the tree. bool contains(key_type_ref K) { return (bool) find(K); } /// foreach - A member template the accepts invokes operator() on a functor /// object (specifed by Callback) for every node/subtree in the tree. /// Nodes are visited using an inorder traversal. template <typename Callback> void foreach(Callback& C) { if (ImutAVLTree* L = getLeft()) L->foreach(C); C(value); if (ImutAVLTree* R = getRight()) R->foreach(C); } /// validateTree - A utility method that checks that the balancing and /// ordering invariants of the tree are satisifed. It is a recursive /// method that returns the height of the tree, which is then consumed /// by the enclosing validateTree call. External callers should ignore the /// return value. An invalid tree will cause an assertion to fire in /// a debug build. unsigned validateTree() const { unsigned HL = getLeft() ? getLeft()->validateTree() : 0; unsigned HR = getRight() ? getRight()->validateTree() : 0; (void) HL; (void) HR; assert(getHeight() == ( HL > HR ? HL : HR ) + 1 && "Height calculation wrong"); assert((HL > HR ? HL-HR : HR-HL) <= 2 && "Balancing invariant violated"); assert((!getLeft() || ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()), ImutInfo::KeyOfValue(getValue()))) && "Value in left child is not less that current value"); assert(!(getRight() || ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()), ImutInfo::KeyOfValue(getRight()->getValue()))) && "Current value is not less that value of right child"); return getHeight(); } //===----------------------------------------------------===// // Internal values. //===----------------------------------------------------===// private: Factory *factory; ImutAVLTree *left; ImutAVLTree *right; ImutAVLTree *prev; ImutAVLTree *next; unsigned height : 28; unsigned IsMutable : 1; unsigned IsDigestCached : 1; unsigned IsCanonicalized : 1; value_type value; uint32_t digest; uint32_t refCount; //===----------------------------------------------------===// // Internal methods (node manipulation; used by Factory). //===----------------------------------------------------===// private: /// ImutAVLTree - Internal constructor that is only called by /// ImutAVLFactory. ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v, unsigned height) : factory(f), left(l), right(r), prev(nullptr), next(nullptr), height(height), IsMutable(true), IsDigestCached(false), IsCanonicalized(0), value(v), digest(0), refCount(0) { if (left) left->retain(); if (right) right->retain(); } /// isMutable - Returns true if the left and right subtree references /// (as well as height) can be changed. If this method returns false, /// the tree is truly immutable. Trees returned from an ImutAVLFactory /// object should always have this method return true. Further, if this /// method returns false for an instance of ImutAVLTree, all subtrees /// will also have this method return false. The converse is not true. bool isMutable() const { return IsMutable; } /// hasCachedDigest - Returns true if the digest for this tree is cached. /// This can only be true if the tree is immutable. bool hasCachedDigest() const { return IsDigestCached; } //===----------------------------------------------------===// // Mutating operations. A tree root can be manipulated as // long as its reference has not "escaped" from internal // methods of a factory object (see below). When a tree // pointer is externally viewable by client code, the // internal "mutable bit" is cleared to mark the tree // immutable. Note that a tree that still has its mutable // bit set may have children (subtrees) that are themselves // immutable. //===----------------------------------------------------===// /// markImmutable - Clears the mutable flag for a tree. After this happens, /// it is an error to call setLeft(), setRight(), and setHeight(). void markImmutable() { assert(isMutable() && "Mutable flag already removed."); IsMutable = false; } /// markedCachedDigest - Clears the NoCachedDigest flag for a tree. void markedCachedDigest() { assert(!hasCachedDigest() && "NoCachedDigest flag already removed."); IsDigestCached = true; } /// setHeight - Changes the height of the tree. Used internally by /// ImutAVLFactory. void setHeight(unsigned h) { assert(isMutable() && "Only a mutable tree can have its height changed."); height = h; } static uint32_t computeDigest(ImutAVLTree *L, ImutAVLTree *R, value_type_ref V) { uint32_t digest = 0; if (L) digest += L->computeDigest(); // Compute digest of stored data. FoldingSetNodeID ID; ImutInfo::Profile(ID,V); digest += ID.ComputeHash(); if (R) digest += R->computeDigest(); return digest; } uint32_t computeDigest() { // Check the lowest bit to determine if digest has actually been // pre-computed. if (hasCachedDigest()) return digest; uint32_t X = computeDigest(getLeft(), getRight(), getValue()); digest = X; markedCachedDigest(); return X; } //===----------------------------------------------------===// // Reference count operations. //===----------------------------------------------------===// public: void retain() { ++refCount; } void release() { assert(refCount > 0); if (--refCount == 0) destroy(); } void destroy() { if (left) left->release(); if (right) right->release(); if (IsCanonicalized) { if (next) next->prev = prev; if (prev) prev->next = next; else factory->Cache[factory->maskCacheIndex(computeDigest())] = next; } // We need to clear the mutability bit in case we are // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes(). IsMutable = false; factory->freeNodes.push_back(this); } }; //===----------------------------------------------------------------------===// // Immutable AVL-Tree Factory class. //===----------------------------------------------------------------------===// template <typename ImutInfo > class ImutAVLFactory { friend class ImutAVLTree<ImutInfo>; typedef ImutAVLTree<ImutInfo> TreeTy; typedef typename TreeTy::value_type_ref value_type_ref; typedef typename TreeTy::key_type_ref key_type_ref; typedef DenseMap<unsigned, TreeTy*> CacheTy; CacheTy Cache; uintptr_t Allocator; std::vector<TreeTy*> createdNodes; std::vector<TreeTy*> freeNodes; bool ownsAllocator() const { return Allocator & 0x1 ? false : true; } BumpPtrAllocator& getAllocator() const { return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1); } //===--------------------------------------------------===// // Public interface. //===--------------------------------------------------===// public: ImutAVLFactory() : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {} ImutAVLFactory(BumpPtrAllocator& Alloc) : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {} ~ImutAVLFactory() { if (ownsAllocator()) delete &getAllocator(); } TreeTy* add(TreeTy* T, value_type_ref V) { T = add_internal(V,T); markImmutable(T); recoverNodes(); return T; } TreeTy* remove(TreeTy* T, key_type_ref V) { T = remove_internal(V,T); markImmutable(T); recoverNodes(); return T; } TreeTy* getEmptyTree() const { return nullptr; } protected: //===--------------------------------------------------===// // A bunch of quick helper functions used for reasoning // about the properties of trees and their children. // These have succinct names so that the balancing code // is as terse (and readable) as possible. //===--------------------------------------------------===// bool isEmpty(TreeTy* T) const { return !T; } unsigned getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; } TreeTy* getLeft(TreeTy* T) const { return T->getLeft(); } TreeTy* getRight(TreeTy* T) const { return T->getRight(); } value_type_ref getValue(TreeTy* T) const { return T->value; } // Make sure the index is not the Tombstone or Entry key of the DenseMap. static unsigned maskCacheIndex(unsigned I) { return (I & ~0x02); } unsigned incrementHeight(TreeTy* L, TreeTy* R) const { unsigned hl = getHeight(L); unsigned hr = getHeight(R); return (hl > hr ? hl : hr) + 1; } static bool compareTreeWithSection(TreeTy* T, typename TreeTy::iterator& TI, typename TreeTy::iterator& TE) { typename TreeTy::iterator I = T->begin(), E = T->end(); for ( ; I!=E ; ++I, ++TI) { if (TI == TE || !I->isElementEqual(&*TI)) return false; } return true; } //===--------------------------------------------------===// // "createNode" is used to generate new tree roots that link // to other trees. The functon may also simply move links // in an existing root if that root is still marked mutable. // This is necessary because otherwise our balancing code // would leak memory as it would create nodes that are // then discarded later before the finished tree is // returned to the caller. //===--------------------------------------------------===// TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) { BumpPtrAllocator& A = getAllocator(); TreeTy* T; if (!freeNodes.empty()) { T = freeNodes.back(); freeNodes.pop_back(); assert(T != L); assert(T != R); } else { T = (TreeTy*) A.Allocate<TreeTy>(); } new (T) TreeTy(this, L, R, V, incrementHeight(L,R)); createdNodes.push_back(T); return T; } TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) { return createNode(newLeft, getValue(oldTree), newRight); } void recoverNodes() { for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) { TreeTy *N = createdNodes[i]; if (N->isMutable() && N->refCount == 0) N->destroy(); } createdNodes.clear(); } /// balanceTree - Used by add_internal and remove_internal to /// balance a newly created tree. TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) { unsigned hl = getHeight(L); unsigned hr = getHeight(R); if (hl > hr + 2) { assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2"); TreeTy *LL = getLeft(L); TreeTy *LR = getRight(L); if (getHeight(LL) >= getHeight(LR)) return createNode(LL, L, createNode(LR,V,R)); assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1"); TreeTy *LRL = getLeft(LR); TreeTy *LRR = getRight(LR); return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R)); } if (hr > hl + 2) { assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2"); TreeTy *RL = getLeft(R); TreeTy *RR = getRight(R); if (getHeight(RR) >= getHeight(RL)) return createNode(createNode(L,V,RL), R, RR); assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1"); TreeTy *RLL = getLeft(RL); TreeTy *RLR = getRight(RL); return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR)); } return createNode(L,V,R); } /// add_internal - Creates a new tree that includes the specified /// data and the data from the original tree. If the original tree /// already contained the data item, the original tree is returned. TreeTy* add_internal(value_type_ref V, TreeTy* T) { if (isEmpty(T)) return createNode(T, V, T); assert(!T->isMutable()); key_type_ref K = ImutInfo::KeyOfValue(V); key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T)); if (ImutInfo::isEqual(K,KCurrent)) return createNode(getLeft(T), V, getRight(T)); else if (ImutInfo::isLess(K,KCurrent)) return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T)); else return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T))); } /// remove_internal - Creates a new tree that includes all the data /// from the original tree except the specified data. If the /// specified data did not exist in the original tree, the original /// tree is returned. TreeTy* remove_internal(key_type_ref K, TreeTy* T) { if (isEmpty(T)) return T; assert(!T->isMutable()); key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T)); if (ImutInfo::isEqual(K,KCurrent)) { return combineTrees(getLeft(T), getRight(T)); } else if (ImutInfo::isLess(K,KCurrent)) { return balanceTree(remove_internal(K, getLeft(T)), getValue(T), getRight(T)); } else { return balanceTree(getLeft(T), getValue(T), remove_internal(K, getRight(T))); } } TreeTy* combineTrees(TreeTy* L, TreeTy* R) { if (isEmpty(L)) return R; if (isEmpty(R)) return L; TreeTy* OldNode; TreeTy* newRight = removeMinBinding(R,OldNode); return balanceTree(L, getValue(OldNode), newRight); } TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) { assert(!isEmpty(T)); if (isEmpty(getLeft(T))) { Noderemoved = T; return getRight(T); } return balanceTree(removeMinBinding(getLeft(T), Noderemoved), getValue(T), getRight(T)); } /// markImmutable - Clears the mutable bits of a root and all of its /// descendants. void markImmutable(TreeTy* T) { if (!T || !T->isMutable()) return; T->markImmutable(); markImmutable(getLeft(T)); markImmutable(getRight(T)); } public: TreeTy *getCanonicalTree(TreeTy *TNew) { if (!TNew) return nullptr; if (TNew->IsCanonicalized) return TNew; // Search the hashtable for another tree with the same digest, and // if find a collision compare those trees by their contents. unsigned digest = TNew->computeDigest(); TreeTy *&entry = Cache[maskCacheIndex(digest)]; do { if (!entry) break; for (TreeTy *T = entry ; T != nullptr; T = T->next) { // Compare the Contents('T') with Contents('TNew') typename TreeTy::iterator TI = T->begin(), TE = T->end(); if (!compareTreeWithSection(TNew, TI, TE)) continue; if (TI != TE) continue; // T has more contents than TNew. // Trees did match! Return 'T'. if (TNew->refCount == 0) TNew->destroy(); return T; } entry->prev = TNew; TNew->next = entry; } while (false); entry = TNew; TNew->IsCanonicalized = true; return TNew; } }; //===----------------------------------------------------------------------===// // Immutable AVL-Tree Iterators. //===----------------------------------------------------------------------===// template <typename ImutInfo> class ImutAVLTreeGenericIterator { SmallVector<uintptr_t,20> stack; public: using iterator_category = std::bidirectional_iterator_tag; using value_type = ImutAVLTree<ImutInfo>; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3, Flags=0x3 }; typedef ImutAVLTree<ImutInfo> TreeTy; ImutAVLTreeGenericIterator() {} ImutAVLTreeGenericIterator(const TreeTy *Root) { if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root)); } TreeTy &operator*() const { assert(!stack.empty()); return *reinterpret_cast<TreeTy *>(stack.back() & ~Flags); } TreeTy *operator->() const { return &*this; } uintptr_t getVisitState() const { assert(!stack.empty()); return stack.back() & Flags; } bool atEnd() const { return stack.empty(); } bool atBeginning() const { return stack.size() == 1 && getVisitState() == VisitedNone; } void skipToParent() { assert(!stack.empty()); stack.pop_back(); if (stack.empty()) return; switch (getVisitState()) { case VisitedNone: stack.back() |= VisitedLeft; break; case VisitedLeft: stack.back() |= VisitedRight; break; default: llvm_unreachable("Unreachable."); } } bool operator==(const ImutAVLTreeGenericIterator &x) const { return stack == x.stack; } bool operator!=(const ImutAVLTreeGenericIterator &x) const { return !(*this == x); } ImutAVLTreeGenericIterator &operator++() { assert(!stack.empty()); TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags); assert(Current); switch (getVisitState()) { case VisitedNone: if (TreeTy* L = Current->getLeft()) stack.push_back(reinterpret_cast<uintptr_t>(L)); else stack.back() |= VisitedLeft; break; case VisitedLeft: if (TreeTy* R = Current->getRight()) stack.push_back(reinterpret_cast<uintptr_t>(R)); else stack.back() |= VisitedRight; break; case VisitedRight: skipToParent(); break; default: llvm_unreachable("Unreachable."); } return *this; } ImutAVLTreeGenericIterator &operator--() { assert(!stack.empty()); TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags); assert(Current); switch (getVisitState()) { case VisitedNone: stack.pop_back(); break; case VisitedLeft: stack.back() &= ~Flags; // Set state to "VisitedNone." if (TreeTy* L = Current->getLeft()) stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight); break; case VisitedRight: stack.back() &= ~Flags; stack.back() |= VisitedLeft; if (TreeTy* R = Current->getRight()) stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight); break; default: llvm_unreachable("Unreachable."); } return *this; } }; template <typename ImutInfo> class ImutAVLTreeInOrderIterator { typedef ImutAVLTreeGenericIterator<ImutInfo> InternalIteratorTy; InternalIteratorTy InternalItr; public: using iterator_category = std::bidirectional_iterator_tag; using value_type = ImutAVLTree<ImutInfo>; using difference_type = std::ptrdiff_t; using pointer = value_type *; using reference = value_type &; typedef ImutAVLTree<ImutInfo> TreeTy; ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) { if (Root) ++*this; // Advance to first element. } ImutAVLTreeInOrderIterator() : InternalItr() {} bool operator==(const ImutAVLTreeInOrderIterator &x) const { return InternalItr == x.InternalItr; } bool operator!=(const ImutAVLTreeInOrderIterator &x) const { return !(*this == x); } TreeTy &operator*() const { return *InternalItr; } TreeTy *operator->() const { return &*InternalItr; } ImutAVLTreeInOrderIterator &operator++() { do ++InternalItr; while (!InternalItr.atEnd() && InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft); return *this; } ImutAVLTreeInOrderIterator &operator--() { do --InternalItr; while (!InternalItr.atBeginning() && InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft); return *this; } void skipSubTree() { InternalItr.skipToParent(); while (!InternalItr.atEnd() && InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft) ++InternalItr; } }; /// Generic iterator that wraps a T::TreeTy::iterator and exposes /// iterator::getValue() on dereference. template <typename T> struct ImutAVLValueIterator : iterator_adaptor_base< ImutAVLValueIterator<T>, typename T::TreeTy::iterator, typename std::iterator_traits< typename T::TreeTy::iterator>::iterator_category, const typename T::value_type> { ImutAVLValueIterator() = default; explicit ImutAVLValueIterator(typename T::TreeTy *Tree) : ImutAVLValueIterator::iterator_adaptor_base(Tree) {} typename ImutAVLValueIterator::reference operator*() const { return this->I->getValue(); } }; //===----------------------------------------------------------------------===// // Trait classes for Profile information. //===----------------------------------------------------------------------===// /// Generic profile template. The default behavior is to invoke the /// profile method of an object. Specializations for primitive integers /// and generic handling of pointers is done below. template <typename T> struct ImutProfileInfo { typedef const T value_type; typedef const T& value_type_ref; static void Profile(FoldingSetNodeID &ID, value_type_ref X) { FoldingSetTrait<T>::Profile(X,ID); } }; /// Profile traits for integers. template <typename T> struct ImutProfileInteger { typedef const T value_type; typedef const T& value_type_ref; static void Profile(FoldingSetNodeID &ID, value_type_ref X) { ID.AddInteger(X); } }; #define PROFILE_INTEGER_INFO(X)\ template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {}; PROFILE_INTEGER_INFO(char) PROFILE_INTEGER_INFO(unsigned char) PROFILE_INTEGER_INFO(short) PROFILE_INTEGER_INFO(unsigned short) PROFILE_INTEGER_INFO(unsigned) PROFILE_INTEGER_INFO(signed) PROFILE_INTEGER_INFO(long) PROFILE_INTEGER_INFO(unsigned long) PROFILE_INTEGER_INFO(long long) PROFILE_INTEGER_INFO(unsigned long long) #undef PROFILE_INTEGER_INFO /// Profile traits for booleans. template <> struct ImutProfileInfo<bool> { typedef const bool value_type; typedef const bool& value_type_ref; static void Profile(FoldingSetNodeID &ID, value_type_ref X) { ID.AddBoolean(X); } }; /// Generic profile trait for pointer types. We treat pointers as /// references to unique objects. template <typename T> struct ImutProfileInfo<T*> { typedef const T* value_type; typedef value_type value_type_ref; static void Profile(FoldingSetNodeID &ID, value_type_ref X) { ID.AddPointer(X); } }; //===----------------------------------------------------------------------===// // Trait classes that contain element comparison operators and type // definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap. These // inherit from the profile traits (ImutProfileInfo) to include operations // for element profiling. //===----------------------------------------------------------------------===// /// ImutContainerInfo - Generic definition of comparison operations for /// elements of immutable containers that defaults to using /// std::equal_to<> and std::less<> to perform comparison of elements. template <typename T> struct ImutContainerInfo : public ImutProfileInfo<T> { typedef typename ImutProfileInfo<T>::value_type value_type; typedef typename ImutProfileInfo<T>::value_type_ref value_type_ref; typedef value_type key_type; typedef value_type_ref key_type_ref; typedef bool data_type; typedef bool data_type_ref; static key_type_ref KeyOfValue(value_type_ref D) { return D; } static data_type_ref DataOfValue(value_type_ref) { return true; } static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return std::equal_to<key_type>()(LHS,RHS); } static bool isLess(key_type_ref LHS, key_type_ref RHS) { return std::less<key_type>()(LHS,RHS); } static bool isDataEqual(data_type_ref, data_type_ref) { return true; } }; /// ImutContainerInfo - Specialization for pointer values to treat pointers /// as references to unique objects. Pointers are thus compared by /// their addresses. template <typename T> struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> { typedef typename ImutProfileInfo<T*>::value_type value_type; typedef typename ImutProfileInfo<T*>::value_type_ref value_type_ref; typedef value_type key_type; typedef value_type_ref key_type_ref; typedef bool data_type; typedef bool data_type_ref; static key_type_ref KeyOfValue(value_type_ref D) { return D; } static data_type_ref DataOfValue(value_type_ref) { return true; } static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return LHS == RHS; } static bool isLess(key_type_ref LHS, key_type_ref RHS) { return LHS < RHS; } static bool isDataEqual(data_type_ref, data_type_ref) { return true; } }; //===----------------------------------------------------------------------===// // Immutable Set // // /////////////////////////////////////////////////////////////////////////////// template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> > class ImmutableSet { public: typedef typename ValInfo::value_type value_type; typedef typename ValInfo::value_type_ref value_type_ref; typedef ImutAVLTree<ValInfo> TreeTy; private: TreeTy *Root; public: /// Constructs a set from a pointer to a tree root. In general one /// should use a Factory object to create sets instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. explicit ImmutableSet(TreeTy* R) : Root(R) { if (Root) { Root->retain(); } } ImmutableSet(const ImmutableSet &X) : Root(X.Root) { if (Root) { Root->retain(); } } ImmutableSet &operator=(const ImmutableSet &X) { if (Root != X.Root) { if (X.Root) { X.Root->retain(); } if (Root) { Root->release(); } Root = X.Root; } return *this; } ~ImmutableSet() { if (Root) { Root->release(); } } class Factory { typename TreeTy::Factory F; const bool Canonicalize; public: Factory(bool canonicalize = true) : Canonicalize(canonicalize) {} Factory(BumpPtrAllocator& Alloc, bool canonicalize = true) : F(Alloc), Canonicalize(canonicalize) {} /// getEmptySet - Returns an immutable set that contains no elements. ImmutableSet getEmptySet() { return ImmutableSet(F.getEmptyTree()); } /// add - Creates a new immutable set that contains all of the values /// of the original set with the addition of the specified value. If /// the original set already included the value, then the original set is /// returned and no memory is allocated. The time and space complexity /// of this operation is logarithmic in the size of the original set. /// The memory allocated to represent the set is released when the /// factory object that created the set is destroyed. ImmutableSet add(ImmutableSet Old, value_type_ref V) { TreeTy *NewT = F.add(Old.Root, V); return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT); } /// remove - Creates a new immutable set that contains all of the values /// of the original set with the exception of the specified value. If /// the original set did not contain the value, the original set is /// returned and no memory is allocated. The time and space complexity /// of this operation is logarithmic in the size of the original set. /// The memory allocated to represent the set is released when the /// factory object that created the set is destroyed. ImmutableSet remove(ImmutableSet Old, value_type_ref V) { TreeTy *NewT = F.remove(Old.Root, V); return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT); } BumpPtrAllocator& getAllocator() { return F.getAllocator(); } typename TreeTy::Factory *getTreeFactory() const { return const_cast<typename TreeTy::Factory *>(&F); } private: Factory(const Factory& RHS) = delete; void operator=(const Factory& RHS) = delete; }; friend class Factory; /// Returns true if the set contains the specified value. bool contains(value_type_ref V) const { return Root ? Root->contains(V) : false; } bool operator==(const ImmutableSet &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; } bool operator!=(const ImmutableSet &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; } TreeTy *getRoot() { if (Root) { Root->retain(); } return Root; } TreeTy *getRootWithoutRetain() const { return Root; } /// isEmpty - Return true if the set contains no elements. bool isEmpty() const { return !Root; } /// isSingleton - Return true if the set contains exactly one element. /// This method runs in constant time. bool isSingleton() const { return getHeight() == 1; } template <typename Callback> void foreach(Callback& C) { if (Root) Root->foreach(C); } template <typename Callback> void foreach() { if (Root) { Callback C; Root->foreach(C); } } //===--------------------------------------------------===// // Iterators. //===--------------------------------------------------===// typedef ImutAVLValueIterator<ImmutableSet> iterator; iterator begin() const { return iterator(Root); } iterator end() const { return iterator(); } //===--------------------------------------------------===// // Utility methods. //===--------------------------------------------------===// unsigned getHeight() const { return Root ? Root->getHeight() : 0; } static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) { ID.AddPointer(S.Root); } void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); } //===--------------------------------------------------===// // For testing. //===--------------------------------------------------===// void validateTree() const { if (Root) Root->validateTree(); } }; // NOTE: This may some day replace the current ImmutableSet. template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> > class ImmutableSetRef { public: typedef typename ValInfo::value_type value_type; typedef typename ValInfo::value_type_ref value_type_ref; typedef ImutAVLTree<ValInfo> TreeTy; typedef typename TreeTy::Factory FactoryTy; private: TreeTy *Root; FactoryTy *Factory; public: /// Constructs a set from a pointer to a tree root. In general one /// should use a Factory object to create sets instead of directly /// invoking the constructor, but there are cases where make this /// constructor public is useful. explicit ImmutableSetRef(TreeTy* R, FactoryTy *F) : Root(R), Factory(F) { if (Root) { Root->retain(); } } ImmutableSetRef(const ImmutableSetRef &X) : Root(X.Root), Factory(X.Factory) { if (Root) { Root->retain(); } } ImmutableSetRef &operator=(const ImmutableSetRef &X) { if (Root != X.Root) { if (X.Root) { X.Root->retain(); } if (Root) { Root->release(); } Root = X.Root; Factory = X.Factory; } return *this; } ~ImmutableSetRef() { if (Root) { Root->release(); } } static ImmutableSetRef getEmptySet(FactoryTy *F) { return ImmutableSetRef(0, F); } ImmutableSetRef add(value_type_ref V) { return ImmutableSetRef(Factory->add(Root, V), Factory); } ImmutableSetRef remove(value_type_ref V) { return ImmutableSetRef(Factory->remove(Root, V), Factory); } /// Returns true if the set contains the specified value. bool contains(value_type_ref V) const { return Root ? Root->contains(V) : false; } ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const { return ImmutableSet<ValT>(canonicalize ? Factory->getCanonicalTree(Root) : Root); } TreeTy *getRootWithoutRetain() const { return Root; } bool operator==(const ImmutableSetRef &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; } bool operator!=(const ImmutableSetRef &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; } /// isEmpty - Return true if the set contains no elements. bool isEmpty() const { return !Root; } /// isSingleton - Return true if the set contains exactly one element. /// This method runs in constant time. bool isSingleton() const { return getHeight() == 1; } //===--------------------------------------------------===// // Iterators. //===--------------------------------------------------===// typedef ImutAVLValueIterator<ImmutableSetRef> iterator; iterator begin() const { return iterator(Root); } iterator end() const { return iterator(); } //===--------------------------------------------------===// // Utility methods. //===--------------------------------------------------===// unsigned getHeight() const { return Root ? Root->getHeight() : 0; } static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) { ID.AddPointer(S.Root); } void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); } //===--------------------------------------------------===// // For testing. //===--------------------------------------------------===// void validateTree() const { if (Root) Root->validateTree(); } }; } // end namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/StringSwitch.h
//===--- StringSwitch.h - Switch-on-literal-string Construct --------------===/ // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. //===----------------------------------------------------------------------===/ // // This file implements the StringSwitch template, which mimics a switch() // statement whose cases are string literals. // //===----------------------------------------------------------------------===/ #ifndef LLVM_ADT_STRINGSWITCH_H #define LLVM_ADT_STRINGSWITCH_H #include "llvm/ADT/StringRef.h" #include <cassert> #include <cstring> namespace llvm { /// \brief A switch()-like statement whose cases are string literals. /// /// The StringSwitch class is a simple form of a switch() statement that /// determines whether the given string matches one of the given string /// literals. The template type parameter \p T is the type of the value that /// will be returned from the string-switch expression. For example, /// the following code switches on the name of a color in \c argv[i]: /// /// \code /// Color color = StringSwitch<Color>(argv[i]) /// .Case("red", Red) /// .Case("orange", Orange) /// .Case("yellow", Yellow) /// .Case("green", Green) /// .Case("blue", Blue) /// .Case("indigo", Indigo) /// .Cases("violet", "purple", Violet) /// .Default(UnknownColor); /// \endcode template<typename T, typename R = T> class StringSwitch { /// \brief The string we are matching. StringRef Str; /// \brief The pointer to the result of this switch statement, once known, /// null before that. const T *Result; public: explicit StringSwitch(StringRef S) : Str(S), Result(nullptr) { } template<unsigned N> StringSwitch& Case(const char (&S)[N], const T& Value) { if (!Result && N-1 == Str.size() && (std::memcmp(S, Str.data(), N-1) == 0)) { Result = &Value; } return *this; } template<unsigned N> StringSwitch& EndsWith(const char (&S)[N], const T &Value) { if (!Result && Str.size() >= N-1 && std::memcmp(S, Str.data() + Str.size() + 1 - N, N-1) == 0) { Result = &Value; } return *this; } template<unsigned N> StringSwitch& StartsWith(const char (&S)[N], const T &Value) { if (!Result && Str.size() >= N-1 && std::memcmp(S, Str.data(), N-1) == 0) { Result = &Value; } return *this; } template<unsigned N0, unsigned N1> StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1], const T& Value) { return Case(S0, Value).Case(S1, Value); } template<unsigned N0, unsigned N1, unsigned N2> StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2], const T& Value) { return Case(S0, Value).Case(S1, Value).Case(S2, Value); } template<unsigned N0, unsigned N1, unsigned N2, unsigned N3> StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2], const char (&S3)[N3], const T& Value) { return Case(S0, Value).Case(S1, Value).Case(S2, Value).Case(S3, Value); } template<unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4> StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2], const char (&S3)[N3], const char (&S4)[N4], const T& Value) { return Case(S0, Value).Case(S1, Value).Case(S2, Value).Case(S3, Value) .Case(S4, Value); } R Default(const T& Value) const { if (Result) return *Result; return Value; } operator R() const { assert(Result && "Fell off the end of a string-switch"); return *Result; } }; } // end namespace llvm #endif // LLVM_ADT_STRINGSWITCH_H
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/Hashing.h
//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the newly proposed standard C++ interfaces for hashing // arbitrary data and building hash functions for user-defined types. This // interface was originally proposed in N3333[1] and is currently under review // for inclusion in a future TR and/or standard. // // The primary interfaces provide are comprised of one type and three functions: // // -- 'hash_code' class is an opaque type representing the hash code for some // data. It is the intended product of hashing, and can be used to implement // hash tables, checksumming, and other common uses of hashes. It is not an // integer type (although it can be converted to one) because it is risky // to assume much about the internals of a hash_code. In particular, each // execution of the program has a high probability of producing a different // hash_code for a given input. Thus their values are not stable to save or // persist, and should only be used during the execution for the // construction of hashing datastructures. // // -- 'hash_value' is a function designed to be overloaded for each // user-defined type which wishes to be used within a hashing context. It // should be overloaded within the user-defined type's namespace and found // via ADL. Overloads for primitive types are provided by this library. // // -- 'hash_combine' and 'hash_combine_range' are functions designed to aid // programmers in easily and intuitively combining a set of data into // a single hash_code for their object. They should only logically be used // within the implementation of a 'hash_value' routine or similar context. // // Note that 'hash_combine_range' contains very special logic for hashing // a contiguous array of integers or pointers. This logic is *extremely* fast, // on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were // benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys // under 32-bytes. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_HASHING_H #define LLVM_ADT_HASHING_H #include "llvm/Support/DataTypes.h" #include "llvm/Support/Host.h" #include "llvm/Support/SwapByteOrder.h" #include "llvm/Support/type_traits.h" #include <algorithm> #include <cassert> #include <cstring> #include <iterator> #include <string> #include <utility> namespace llvm { /// \brief An opaque object representing a hash code. /// /// This object represents the result of hashing some entity. It is intended to /// be used to implement hashtables or other hashing-based data structures. /// While it wraps and exposes a numeric value, this value should not be /// trusted to be stable or predictable across processes or executions. /// /// In order to obtain the hash_code for an object 'x': /// \code /// using llvm::hash_value; /// llvm::hash_code code = hash_value(x); /// \endcode class hash_code { size_t value; public: /// \brief Default construct a hash_code. /// Note that this leaves the value uninitialized. hash_code() = default; /// \brief Form a hash code directly from a numerical value. hash_code(size_t value) : value(value) {} /// \brief Convert the hash code to its numerical value for use. /*explicit*/ operator size_t() const { return value; } friend bool operator==(const hash_code &lhs, const hash_code &rhs) { return lhs.value == rhs.value; } friend bool operator!=(const hash_code &lhs, const hash_code &rhs) { return lhs.value != rhs.value; } /// \brief Allow a hash_code to be directly run through hash_value. friend size_t hash_value(const hash_code &code) { return code.value; } }; /// \brief Compute a hash_code for any integer value. /// /// Note that this function is intended to compute the same hash_code for /// a particular value without regard to the pre-promotion type. This is in /// contrast to hash_combine which may produce different hash_codes for /// differing argument types even if they would implicit promote to a common /// type without changing the value. template <typename T> typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type hash_value(T value); /// \brief Compute a hash_code for a pointer's address. /// /// N.B.: This hashes the *address*. Not the value and not the type. template <typename T> hash_code hash_value(const T *ptr); /// \brief Compute a hash_code for a pair of objects. template <typename T, typename U> hash_code hash_value(const std::pair<T, U> &arg); /// \brief Compute a hash_code for a standard string. template <typename T> hash_code hash_value(const std::basic_string<T> &arg); /// \brief Override the execution seed with a fixed value. /// /// This hashing library uses a per-execution seed designed to change on each /// run with high probability in order to ensure that the hash codes are not /// attackable and to ensure that output which is intended to be stable does /// not rely on the particulars of the hash codes produced. /// /// That said, there are use cases where it is important to be able to /// reproduce *exactly* a specific behavior. To that end, we provide a function /// which will forcibly set the seed to a fixed value. This must be done at the /// start of the program, before any hashes are computed. Also, it cannot be /// undone. This makes it thread-hostile and very hard to use outside of /// immediately on start of a simple program designed for reproducible /// behavior. void set_fixed_execution_hash_seed(size_t fixed_value); // All of the implementation details of actually computing the various hash // code values are held within this namespace. These routines are included in // the header file mainly to allow inlining and constant propagation. namespace hashing { namespace detail { inline uint64_t fetch64(const char *p) { uint64_t result; memcpy(&result, p, sizeof(result)); if (sys::IsBigEndianHost) sys::swapByteOrder(result); return result; } inline uint32_t fetch32(const char *p) { uint32_t result; memcpy(&result, p, sizeof(result)); if (sys::IsBigEndianHost) sys::swapByteOrder(result); return result; } /// Some primes between 2^63 and 2^64 for various uses. static const uint64_t k0 = 0xc3a5c85c97cb3127ULL; static const uint64_t k1 = 0xb492b66fbe98f273ULL; static const uint64_t k2 = 0x9ae16a3b2f90404fULL; static const uint64_t k3 = 0xc949d7c7509e6557ULL; /// \brief Bitwise right rotate. /// Normally this will compile to a single instruction, especially if the /// shift is a manifest constant. inline uint64_t rotate(uint64_t val, size_t shift) { // Avoid shifting by 64: doing so yields an undefined result. return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); } inline uint64_t shift_mix(uint64_t val) { return val ^ (val >> 47); } inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) { // Murmur-inspired hashing. const uint64_t kMul = 0x9ddfea08eb382d69ULL; uint64_t a = (low ^ high) * kMul; a ^= (a >> 47); uint64_t b = (high ^ a) * kMul; b ^= (b >> 47); b *= kMul; return b; } inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) { uint8_t a = s[0]; uint8_t b = s[len >> 1]; uint8_t c = s[len - 1]; uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8); uint32_t z = len + (static_cast<uint32_t>(c) << 2); return shift_mix(y * k2 ^ z * k3 ^ seed) * k2; } inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) { uint64_t a = fetch32(s); return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4)); } inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) { uint64_t a = fetch64(s); uint64_t b = fetch64(s + len - 8); return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b; } inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) { uint64_t a = fetch64(s) * k1; uint64_t b = fetch64(s + 8); uint64_t c = fetch64(s + len - 8) * k2; uint64_t d = fetch64(s + len - 16) * k0; return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d, a + rotate(b ^ k3, 20) - c + len + seed); } inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) { uint64_t z = fetch64(s + 24); uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0; uint64_t b = rotate(a + z, 52); uint64_t c = rotate(a, 37); a += fetch64(s + 8); c += rotate(a, 7); a += fetch64(s + 16); uint64_t vf = a + z; uint64_t vs = b + rotate(a, 31) + c; a = fetch64(s + 16) + fetch64(s + len - 32); z = fetch64(s + len - 8); b = rotate(a + z, 52); c = rotate(a, 37); a += fetch64(s + len - 24); c += rotate(a, 7); a += fetch64(s + len - 16); uint64_t wf = a + z; uint64_t ws = b + rotate(a, 31) + c; uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0); return shift_mix((seed ^ (r * k0)) + vs) * k2; } inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) { if (length >= 4 && length <= 8) return hash_4to8_bytes(s, length, seed); if (length > 8 && length <= 16) return hash_9to16_bytes(s, length, seed); if (length > 16 && length <= 32) return hash_17to32_bytes(s, length, seed); if (length > 32) return hash_33to64_bytes(s, length, seed); if (length != 0) return hash_1to3_bytes(s, length, seed); return k2 ^ seed; } /// \brief The intermediate state used during hashing. /// Currently, the algorithm for computing hash codes is based on CityHash and /// keeps 56 bytes of arbitrary state. struct hash_state { uint64_t h0, h1, h2, h3, h4, h5, h6; /// \brief Create a new hash_state structure and initialize it based on the /// seed and the first 64-byte chunk. /// This effectively performs the initial mix. static hash_state create(const char *s, uint64_t seed) { hash_state state = { 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49), seed * k1, shift_mix(seed), 0 }; state.h6 = hash_16_bytes(state.h4, state.h5); state.mix(s); return state; } /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a' /// and 'b', including whatever is already in 'a' and 'b'. static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) { a += fetch64(s); uint64_t c = fetch64(s + 24); b = rotate(b + a + c, 21); uint64_t d = a; a += fetch64(s + 8) + fetch64(s + 16); b += rotate(a, 44) + d; a += c; } /// \brief Mix in a 64-byte buffer of data. /// We mix all 64 bytes even when the chunk length is smaller, but we /// record the actual length. void mix(const char *s) { h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1; h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1; h0 ^= h6; h1 += h3 + fetch64(s + 40); h2 = rotate(h2 + h5, 33) * k1; h3 = h4 * k1; h4 = h0 + h5; mix_32_bytes(s, h3, h4); h5 = h2 + h6; h6 = h1 + fetch64(s + 16); mix_32_bytes(s + 32, h5, h6); std::swap(h2, h0); } /// \brief Compute the final 64-bit hash code value based on the current /// state and the length of bytes hashed. uint64_t finalize(size_t length) { return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2, hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0); } }; /// \brief A global, fixed seed-override variable. /// /// This variable can be set using the \see llvm::set_fixed_execution_seed /// function. See that function for details. Do not, under any circumstances, /// set or read this variable. extern size_t fixed_seed_override; inline size_t get_execution_seed() { // FIXME: This needs to be a per-execution seed. This is just a placeholder // implementation. Switching to a per-execution seed is likely to flush out // instability bugs and so will happen as its own commit. // // However, if there is a fixed seed override set the first time this is // called, return that instead of the per-execution seed. const uint64_t seed_prime = 0xff51afd7ed558ccdULL; size_t seed = fixed_seed_override ? fixed_seed_override : (size_t)seed_prime; return seed; } /// \brief Trait to indicate whether a type's bits can be hashed directly. /// /// A type trait which is true if we want to combine values for hashing by /// reading the underlying data. It is false if values of this type must /// first be passed to hash_value, and the resulting hash_codes combined. // // FIXME: We want to replace is_integral_or_enum and is_pointer here with // a predicate which asserts that comparing the underlying storage of two // values of the type for equality is equivalent to comparing the two values // for equality. For all the platforms we care about, this holds for integers // and pointers, but there are platforms where it doesn't and we would like to // support user-defined types which happen to satisfy this property. template <typename T> struct is_hashable_data : std::integral_constant<bool, ((is_integral_or_enum<T>::value || std::is_pointer<T>::value) && 64 % sizeof(T) == 0)> {}; // Special case std::pair to detect when both types are viable and when there // is no alignment-derived padding in the pair. This is a bit of a lie because // std::pair isn't truly POD, but it's close enough in all reasonable // implementations for our use case of hashing the underlying data. template <typename T, typename U> struct is_hashable_data<std::pair<T, U> > : std::integral_constant<bool, (is_hashable_data<T>::value && is_hashable_data<U>::value && (sizeof(T) + sizeof(U)) == sizeof(std::pair<T, U>))> {}; /// \brief Helper to get the hashable data representation for a type. /// This variant is enabled when the type itself can be used. template <typename T> typename std::enable_if<is_hashable_data<T>::value, T>::type get_hashable_data(const T &value) { return value; } /// \brief Helper to get the hashable data representation for a type. /// This variant is enabled when we must first call hash_value and use the /// result as our data. template <typename T> typename std::enable_if<!is_hashable_data<T>::value, size_t>::type get_hashable_data(const T &value) { using ::llvm::hash_value; return hash_value(value); } /// \brief Helper to store data from a value into a buffer and advance the /// pointer into that buffer. /// /// This routine first checks whether there is enough space in the provided /// buffer, and if not immediately returns false. If there is space, it /// copies the underlying bytes of value into the buffer, advances the /// buffer_ptr past the copied bytes, and returns true. template <typename T> bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value, size_t offset = 0) { size_t store_size = sizeof(value) - offset; if (buffer_ptr + store_size > buffer_end) return false; const char *value_data = reinterpret_cast<const char *>(&value); memcpy(buffer_ptr, value_data + offset, store_size); buffer_ptr += store_size; return true; } /// \brief Implement the combining of integral values into a hash_code. /// /// This overload is selected when the value type of the iterator is /// integral. Rather than computing a hash_code for each object and then /// combining them, this (as an optimization) directly combines the integers. template <typename InputIteratorT> hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { const size_t seed = get_execution_seed(); char buffer[64], *buffer_ptr = buffer; char *const buffer_end = std::end(buffer); while (first != last && store_and_advance(buffer_ptr, buffer_end, get_hashable_data(*first))) ++first; if (first == last) return hash_short(buffer, buffer_ptr - buffer, seed); assert(buffer_ptr == buffer_end); hash_state state = state.create(buffer, seed); size_t length = 64; while (first != last) { // Fill up the buffer. We don't clear it, which re-mixes the last round // when only a partial 64-byte chunk is left. buffer_ptr = buffer; while (first != last && store_and_advance(buffer_ptr, buffer_end, get_hashable_data(*first))) ++first; // Rotate the buffer if we did a partial fill in order to simulate doing // a mix of the last 64-bytes. That is how the algorithm works when we // have a contiguous byte sequence, and we want to emulate that here. std::rotate(buffer, buffer_ptr, buffer_end); // Mix this chunk into the current state. state.mix(buffer); length += buffer_ptr - buffer; }; return state.finalize(length); } /// \brief Implement the combining of integral values into a hash_code. /// /// This overload is selected when the value type of the iterator is integral /// and when the input iterator is actually a pointer. Rather than computing /// a hash_code for each object and then combining them, this (as an /// optimization) directly combines the integers. Also, because the integers /// are stored in contiguous memory, this routine avoids copying each value /// and directly reads from the underlying memory. template <typename ValueT> typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type hash_combine_range_impl(ValueT *first, ValueT *last) { const size_t seed = get_execution_seed(); const char *s_begin = reinterpret_cast<const char *>(first); const char *s_end = reinterpret_cast<const char *>(last); const size_t length = std::distance(s_begin, s_end); if (length <= 64) return hash_short(s_begin, length, seed); const char *s_aligned_end = s_begin + (length & ~63); hash_state state = state.create(s_begin, seed); s_begin += 64; while (s_begin != s_aligned_end) { state.mix(s_begin); s_begin += 64; } if (length & 63) state.mix(s_end - 64); return state.finalize(length); } } // namespace detail } // namespace hashing /// \brief Compute a hash_code for a sequence of values. /// /// This hashes a sequence of values. It produces the same hash_code as /// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences /// and is significantly faster given pointers and types which can be hashed as /// a sequence of bytes. template <typename InputIteratorT> hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) { return ::llvm::hashing::detail::hash_combine_range_impl(first, last); } // Implementation details for hash_combine. namespace hashing { namespace detail { /// \brief Helper class to manage the recursive combining of hash_combine /// arguments. /// /// This class exists to manage the state and various calls involved in the /// recursive combining of arguments used in hash_combine. It is particularly /// useful at minimizing the code in the recursive calls to ease the pain /// caused by a lack of variadic functions. struct hash_combine_recursive_helper { char buffer[64]; hash_state state; const size_t seed; public: /// \brief Construct a recursive hash combining helper. /// /// This sets up the state for a recursive hash combine, including getting /// the seed and buffer setup. hash_combine_recursive_helper() : seed(get_execution_seed()) {} /// \brief Combine one chunk of data into the current in-flight hash. /// /// This merges one chunk of data into the hash. First it tries to buffer /// the data. If the buffer is full, it hashes the buffer into its /// hash_state, empties it, and then merges the new chunk in. This also /// handles cases where the data straddles the end of the buffer. template <typename T> char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) { if (!store_and_advance(buffer_ptr, buffer_end, data)) { // Check for skew which prevents the buffer from being packed, and do // a partial store into the buffer to fill it. This is only a concern // with the variadic combine because that formation can have varying // argument types. size_t partial_store_size = buffer_end - buffer_ptr; memcpy(buffer_ptr, &data, partial_store_size); // If the store fails, our buffer is full and ready to hash. We have to // either initialize the hash state (on the first full buffer) or mix // this buffer into the existing hash state. Length tracks the *hashed* // length, not the buffered length. if (length == 0) { state = state.create(buffer, seed); length = 64; } else { // Mix this chunk into the current state and bump length up by 64. state.mix(buffer); length += 64; } // Reset the buffer_ptr to the head of the buffer for the next chunk of // data. buffer_ptr = buffer; // Try again to store into the buffer -- this cannot fail as we only // store types smaller than the buffer. if (!store_and_advance(buffer_ptr, buffer_end, data, partial_store_size)) abort(); } return buffer_ptr; } /// \brief Recursive, variadic combining method. /// /// This function recurses through each argument, combining that argument /// into a single hash. template <typename T, typename ...Ts> hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, const T &arg, const Ts &...args) { buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg)); // Recurse to the next argument. return combine(length, buffer_ptr, buffer_end, args...); } /// \brief Base case for recursive, variadic combining. /// /// The base case when combining arguments recursively is reached when all /// arguments have been handled. It flushes the remaining buffer and /// constructs a hash_code. hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) { // Check whether the entire set of values fit in the buffer. If so, we'll // use the optimized short hashing routine and skip state entirely. if (length == 0) return hash_short(buffer, buffer_ptr - buffer, seed); // Mix the final buffer, rotating it if we did a partial fill in order to // simulate doing a mix of the last 64-bytes. That is how the algorithm // works when we have a contiguous byte sequence, and we want to emulate // that here. std::rotate(buffer, buffer_ptr, buffer_end); // Mix this chunk into the current state. state.mix(buffer); length += buffer_ptr - buffer; return state.finalize(length); } }; } // namespace detail } // namespace hashing /// \brief Combine values into a single hash_code. /// /// This routine accepts a varying number of arguments of any type. It will /// attempt to combine them into a single hash_code. For user-defined types it /// attempts to call a \see hash_value overload (via ADL) for the type. For /// integer and pointer types it directly combines their data into the /// resulting hash_code. /// /// The result is suitable for returning from a user's hash_value /// *implementation* for their user-defined type. Consumers of a type should /// *not* call this routine, they should instead call 'hash_value'. template <typename ...Ts> hash_code hash_combine(const Ts &...args) { // Recursively hash each argument using a helper class. ::llvm::hashing::detail::hash_combine_recursive_helper helper; return helper.combine(0, helper.buffer, helper.buffer + 64, args...); } // Implementation details for implementations of hash_value overloads provided // here. namespace hashing { namespace detail { /// \brief Helper to hash the value of a single integer. /// /// Overloads for smaller integer types are not provided to ensure consistent /// behavior in the presence of integral promotions. Essentially, /// "hash_value('4')" and "hash_value('0' + 4)" should be the same. inline hash_code hash_integer_value(uint64_t value) { // Similar to hash_4to8_bytes but using a seed instead of length. const uint64_t seed = get_execution_seed(); const char *s = reinterpret_cast<const char *>(&value); const uint64_t a = fetch32(s); return hash_16_bytes(seed + (a << 3), fetch32(s + 4)); } } // namespace detail } // namespace hashing // Declared and documented above, but defined here so that any of the hashing // infrastructure is available. template <typename T> typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type hash_value(T value) { return ::llvm::hashing::detail::hash_integer_value(value); } // Declared and documented above, but defined here so that any of the hashing // infrastructure is available. template <typename T> hash_code hash_value(const T *ptr) { return ::llvm::hashing::detail::hash_integer_value( reinterpret_cast<uintptr_t>(ptr)); } // Declared and documented above, but defined here so that any of the hashing // infrastructure is available. template <typename T, typename U> hash_code hash_value(const std::pair<T, U> &arg) { return hash_combine(arg.first, arg.second); } // Declared and documented above, but defined here so that any of the hashing // infrastructure is available. template <typename T> hash_code hash_value(const std::basic_string<T> &arg) { return hash_combine_range(arg.begin(), arg.end()); } } // namespace llvm #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/SmallPtrSet.h
//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the SmallPtrSet class. See the doxygen comment for // SmallPtrSetImplBase for more details on the algorithm used. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_SMALLPTRSET_H #define LLVM_ADT_SMALLPTRSET_H #include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/PointerLikeTypeTraits.h" #include <cassert> #include <cstddef> #include <cstring> #include <iterator> #include <utility> namespace llvm { class SmallPtrSetIteratorImpl; /// SmallPtrSetImplBase - This is the common code shared among all the /// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one /// for small and one for large sets. /// /// Small sets use an array of pointers allocated in the SmallPtrSet object, /// which is treated as a simple array of pointers. When a pointer is added to /// the set, the array is scanned to see if the element already exists, if not /// the element is 'pushed back' onto the array. If we run out of space in the /// array, we grow into the 'large set' case. SmallSet should be used when the /// sets are often small. In this case, no memory allocation is used, and only /// light-weight and cache-efficient scanning is used. /// /// Large sets use a classic exponentially-probed hash table. Empty buckets are /// represented with an illegal pointer value (-1) to allow null pointers to be /// inserted. Tombstones are represented with another illegal pointer value /// (-2), to allow deletion. The hash table is resized when the table is 3/4 or /// more. When this happens, the table is doubled in size. /// class SmallPtrSetImplBase { friend class SmallPtrSetIteratorImpl; protected: /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'. const void **SmallArray; /// CurArray - This is the current set of buckets. If equal to SmallArray, /// then the set is in 'small mode'. const void **CurArray; /// CurArraySize - The allocated size of CurArray, always a power of two. unsigned CurArraySize; /// Number of elements in CurArray that contain a value or are a tombstone. /// If small, all these elements are at the beginning of CurArray and the rest /// is uninitialized. unsigned NumNonEmpty; /// Number of tombstones in CurArray. unsigned NumTombstones; // Helpers to copy and move construct a SmallPtrSet. SmallPtrSetImplBase(const void **SmallStorage, const SmallPtrSetImplBase &that); SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize, SmallPtrSetImplBase &&that); explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize) : SmallArray(SmallStorage), CurArray(SmallStorage), CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) { assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 && "Initial size must be a power of two!"); } ~SmallPtrSetImplBase(); public: typedef unsigned size_type; bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return size() == 0; } size_type size() const { return NumNonEmpty - NumTombstones; } void clear() { // If the capacity of the array is huge, and the # elements used is small, // shrink the array. if (!isSmall()) { if (size() * 4 < CurArraySize && CurArraySize > 32) return shrink_and_clear(); // Fill the array with empty markers. memset(CurArray, -1, CurArraySize * sizeof(void *)); } NumNonEmpty = 0; NumTombstones = 0; } protected: static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); } static void *getEmptyMarker() { // Note that -1 is chosen to make clear() efficiently implementable with // memset and because it's not a valid pointer value. return reinterpret_cast<void*>(-1); } const void **EndPointer() const { return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize; } /// insert_imp - This returns true if the pointer was new to the set, false if /// it was already in the set. This is hidden from the client so that the /// derived class can check that the right type of pointer is passed in. std::pair<const void *const *, bool> insert_imp(const void *Ptr) { if (isSmall()) { // Check to see if it is already in the set. const void **LastTombstone = nullptr; for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty; APtr != E; ++APtr) { const void *Value = *APtr; if (Value == Ptr) return std::make_pair(APtr, false); if (Value == getTombstoneMarker()) LastTombstone = APtr; } // Did we find any tombstone marker? if (LastTombstone != nullptr) { *LastTombstone = Ptr; --NumTombstones; return std::make_pair(LastTombstone, true); } // Nope, there isn't. If we stay small, just 'pushback' now. if (NumNonEmpty < CurArraySize) { SmallArray[NumNonEmpty++] = Ptr; return std::make_pair(SmallArray + (NumNonEmpty - 1), true); } // Otherwise, hit the big set case, which will call grow. } return insert_imp_big(Ptr); } /// erase_imp - If the set contains the specified pointer, remove it and /// return true, otherwise return false. This is hidden from the client so /// that the derived class can check that the right type of pointer is passed /// in. bool erase_imp(const void * Ptr); bool count_imp(const void * Ptr) const { if (isSmall()) { // Linear search for the item. for (const void *const *APtr = SmallArray, *const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr) if (*APtr == Ptr) return true; return false; } // Big set case. return *FindBucketFor(Ptr) == Ptr; } private: bool isSmall() const { return CurArray == SmallArray; } std::pair<const void *const *, bool> insert_imp_big(const void *Ptr); const void * const *FindBucketFor(const void *Ptr) const; void shrink_and_clear(); /// Grow - Allocate a larger backing store for the buckets and move it over. void Grow(unsigned NewSize); void operator=(const SmallPtrSetImplBase &RHS) = delete; protected: /// swap - Swaps the elements of two sets. /// Note: This method assumes that both sets have the same small size. void swap(SmallPtrSetImplBase &RHS); void CopyFrom(const SmallPtrSetImplBase &RHS); void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS); private: /// Code shared by MoveFrom() and move constructor. void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS); /// Code shared by CopyFrom() and copy constructor. void CopyHelper(const SmallPtrSetImplBase &RHS); }; /// SmallPtrSetIteratorImpl - This is the common base class shared between all /// instances of SmallPtrSetIterator. class SmallPtrSetIteratorImpl { protected: const void *const *Bucket; const void *const *End; public: explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E) : Bucket(BP), End(E) { AdvanceIfNotValid(); } bool operator==(const SmallPtrSetIteratorImpl &RHS) const { return Bucket == RHS.Bucket; } bool operator!=(const SmallPtrSetIteratorImpl &RHS) const { return Bucket != RHS.Bucket; } protected: /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket /// that is. This is guaranteed to stop because the end() bucket is marked /// valid. void AdvanceIfNotValid() { assert(Bucket <= End); while (Bucket != End && (*Bucket == SmallPtrSetImplBase::getEmptyMarker() || *Bucket == SmallPtrSetImplBase::getTombstoneMarker())) ++Bucket; } }; /// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet. template<typename PtrTy> class SmallPtrSetIterator : public SmallPtrSetIteratorImpl { typedef PointerLikeTypeTraits<PtrTy> PtrTraits; public: typedef PtrTy value_type; typedef PtrTy reference; typedef PtrTy pointer; typedef std::ptrdiff_t difference_type; typedef std::forward_iterator_tag iterator_category; explicit SmallPtrSetIterator(const void *const *BP, const void *const *E) : SmallPtrSetIteratorImpl(BP, E) {} // Most methods provided by baseclass. const PtrTy operator*() const { assert(Bucket < End); return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket)); } inline SmallPtrSetIterator& operator++() { // Preincrement ++Bucket; AdvanceIfNotValid(); return *this; } SmallPtrSetIterator operator++(int) { // Postincrement SmallPtrSetIterator tmp = *this; ++*this; return tmp; } }; /// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next /// power of two (which means N itself if N is already a power of two). template<unsigned N> struct RoundUpToPowerOfTwo; /// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a /// helper template used to implement RoundUpToPowerOfTwo. template<unsigned N, bool isPowerTwo> struct RoundUpToPowerOfTwoH { enum { Val = N }; }; template<unsigned N> struct RoundUpToPowerOfTwoH<N, false> { enum { // We could just use NextVal = N+1, but this converges faster. N|(N-1) sets // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111. Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val }; }; template<unsigned N> struct RoundUpToPowerOfTwo { enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val }; }; /// \brief A templated base class for \c SmallPtrSet which provides the /// typesafe interface that is common across all small sizes. /// /// This is particularly useful for passing around between interface boundaries /// to avoid encoding a particular small size in the interface boundary. template <typename PtrType> class SmallPtrSetImpl : public SmallPtrSetImplBase { typedef PointerLikeTypeTraits<PtrType> PtrTraits; SmallPtrSetImpl(const SmallPtrSetImpl&) = delete; protected: // Constructors that forward to the base. SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that) : SmallPtrSetImplBase(SmallStorage, that) {} SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize, SmallPtrSetImpl &&that) : SmallPtrSetImplBase(SmallStorage, SmallSize, std::move(that)) {} explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize) : SmallPtrSetImplBase(SmallStorage, SmallSize) {} public: typedef SmallPtrSetIterator<PtrType> iterator; typedef SmallPtrSetIterator<PtrType> const_iterator; /// Inserts Ptr if and only if there is no element in the container equal to /// Ptr. The bool component of the returned pair is true if and only if the /// insertion takes place, and the iterator component of the pair points to /// the element equal to Ptr. std::pair<iterator, bool> insert(PtrType Ptr) { auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr)); return std::make_pair(iterator(p.first, EndPointer()), p.second); } /// erase - If the set contains the specified pointer, remove it and return /// true, otherwise return false. bool erase(PtrType Ptr) { return erase_imp(PtrTraits::getAsVoidPointer(Ptr)); } /// count - Return 1 if the specified pointer is in the set, 0 otherwise. size_type count(PtrType Ptr) const { return count_imp(PtrTraits::getAsVoidPointer(Ptr)) ? 1 : 0; } template <typename IterT> void insert(IterT I, IterT E) { for (; I != E; ++I) insert(*I); } inline iterator begin() const { return iterator(CurArray, EndPointer()); } inline iterator end() const { const void *const *End = EndPointer(); return iterator(End, End); } }; /// SmallPtrSet - This class implements a set which is optimized for holding /// SmallSize or less elements. This internally rounds up SmallSize to the next /// power of two if it is not already a power of two. See the comments above /// SmallPtrSetImplBase for details of the algorithm. template<class PtrType, unsigned SmallSize> class SmallPtrSet : public SmallPtrSetImpl<PtrType> { typedef SmallPtrSetImpl<PtrType> BaseT; // Make sure that SmallSize is a power of two, round up if not. enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val }; /// SmallStorage - Fixed size storage used in 'small mode'. const void *SmallStorage[SmallSizePowTwo]; public: SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {} SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {} SmallPtrSet(SmallPtrSet &&that) : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {} template<typename It> SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) { this->insert(I, E); } SmallPtrSet<PtrType, SmallSize> & operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) { if (&RHS != this) this->CopyFrom(RHS); return *this; } SmallPtrSet<PtrType, SmallSize>& operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) { if (&RHS != this) this->MoveFrom(SmallSizePowTwo, std::move(RHS)); return *this; } /// swap - Swaps the elements of two sets. void swap(SmallPtrSet<PtrType, SmallSize> &RHS) { SmallPtrSetImplBase::swap(RHS); } }; } namespace std { /// Implement std::swap in terms of SmallPtrSet swap. template<class T, unsigned N> inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) { LHS.swap(RHS); } } #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/FoldingSet.h
//===-- llvm/ADT/FoldingSet.h - Uniquing Hash Set ---------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines a hash set that can be used to remove duplication of nodes // in a graph. This code was originally created by Chris Lattner for use with // SelectionDAGCSEMap, but was isolated to provide use across the llvm code set. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_FOLDINGSET_H #define LLVM_ADT_FOLDINGSET_H #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/DataTypes.h" namespace llvm { /// This folding set used for two purposes: /// 1. Given information about a node we want to create, look up the unique /// instance of the node in the set. If the node already exists, return /// it, otherwise return the bucket it should be inserted into. /// 2. Given a node that has already been created, remove it from the set. /// /// This class is implemented as a single-link chained hash table, where the /// "buckets" are actually the nodes themselves (the next pointer is in the /// node). The last node points back to the bucket to simplify node removal. /// /// Any node that is to be included in the folding set must be a subclass of /// FoldingSetNode. The node class must also define a Profile method used to /// establish the unique bits of data for the node. The Profile method is /// passed a FoldingSetNodeID object which is used to gather the bits. Just /// call one of the Add* functions defined in the FoldingSetImpl::NodeID class. /// NOTE: That the folding set does not own the nodes and it is the /// responsibility of the user to dispose of the nodes. /// /// Eg. /// class MyNode : public FoldingSetNode { /// private: /// std::string Name; /// unsigned Value; /// public: /// MyNode(const char *N, unsigned V) : Name(N), Value(V) {} /// ... /// void Profile(FoldingSetNodeID &ID) const { /// ID.AddString(Name); /// ID.AddInteger(Value); /// } /// ... /// }; /// /// To define the folding set itself use the FoldingSet template; /// /// Eg. /// FoldingSet<MyNode> MyFoldingSet; /// /// Four public methods are available to manipulate the folding set; /// /// 1) If you have an existing node that you want add to the set but unsure /// that the node might already exist then call; /// /// MyNode *M = MyFoldingSet.GetOrInsertNode(N); /// /// If The result is equal to the input then the node has been inserted. /// Otherwise, the result is the node existing in the folding set, and the /// input can be discarded (use the result instead.) /// /// 2) If you are ready to construct a node but want to check if it already /// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to /// check; /// /// FoldingSetNodeID ID; /// ID.AddString(Name); /// ID.AddInteger(Value); /// void *InsertPoint; /// /// MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint); /// /// If found then M with be non-NULL, else InsertPoint will point to where it /// should be inserted using InsertNode. /// /// 3) If you get a NULL result from FindNodeOrInsertPos then you can as a new /// node with FindNodeOrInsertPos; /// /// InsertNode(N, InsertPoint); /// /// 4) Finally, if you want to remove a node from the folding set call; /// /// bool WasRemoved = RemoveNode(N); /// /// The result indicates whether the node existed in the folding set. class FoldingSetNodeID; //===----------------------------------------------------------------------===// /// FoldingSetImpl - Implements the folding set functionality. The main /// structure is an array of buckets. Each bucket is indexed by the hash of /// the nodes it contains. The bucket itself points to the nodes contained /// in the bucket via a singly linked list. The last node in the list points /// back to the bucket to facilitate node removal. /// class FoldingSetImpl { virtual void anchor(); // Out of line virtual method. protected: /// Buckets - Array of bucket chains. /// void **Buckets; /// NumBuckets - Length of the Buckets array. Always a power of 2. /// unsigned NumBuckets; /// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes /// is greater than twice the number of buckets. unsigned NumNodes; ~FoldingSetImpl(); explicit FoldingSetImpl(unsigned Log2InitSize = 6); public: //===--------------------------------------------------------------------===// /// Node - This class is used to maintain the singly linked bucket list in /// a folding set. /// class Node { private: // NextInFoldingSetBucket - next link in the bucket list. void *NextInFoldingSetBucket; public: Node() : NextInFoldingSetBucket(nullptr) {} // Accessors void *getNextInBucket() const { return NextInFoldingSetBucket; } void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; } }; /// clear - Remove all nodes from the folding set. void clear(); /// RemoveNode - Remove a node from the folding set, returning true if one /// was removed or false if the node was not in the folding set. bool RemoveNode(Node *N); /// GetOrInsertNode - If there is an existing simple Node exactly /// equal to the specified node, return it. Otherwise, insert 'N' and return /// it instead. Node *GetOrInsertNode(Node *N); /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, /// return it. If not, return the insertion token that will make insertion /// faster. Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos); /// InsertNode - Insert the specified node into the folding set, knowing that /// it is not already in the folding set. InsertPos must be obtained from /// FindNodeOrInsertPos. void InsertNode(Node *N, void *InsertPos); /// InsertNode - Insert the specified node into the folding set, knowing that /// it is not already in the folding set. void InsertNode(Node *N) { Node *Inserted = GetOrInsertNode(N); (void)Inserted; assert(Inserted == N && "Node already inserted!"); } /// size - Returns the number of nodes in the folding set. unsigned size() const { return NumNodes; } /// empty - Returns true if there are no nodes in the folding set. bool empty() const { return NumNodes == 0; } private: /// GrowHashTable - Double the size of the hash table and rehash everything. /// void GrowHashTable(); protected: /// GetNodeProfile - Instantiations of the FoldingSet template implement /// this function to gather data bits for the given node. virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0; /// NodeEquals - Instantiations of the FoldingSet template implement /// this function to compare the given node with the given ID. virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash, FoldingSetNodeID &TempID) const=0; /// ComputeNodeHash - Instantiations of the FoldingSet template implement /// this function to compute a hash value for the given node. virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0; }; //===----------------------------------------------------------------------===// template<typename T> struct FoldingSetTrait; /// DefaultFoldingSetTrait - This class provides default implementations /// for FoldingSetTrait implementations. /// template<typename T> struct DefaultFoldingSetTrait { static void Profile(const T &X, FoldingSetNodeID &ID) { X.Profile(ID); } static void Profile(T &X, FoldingSetNodeID &ID) { X.Profile(ID); } // Equals - Test if the profile for X would match ID, using TempID // to compute a temporary ID if necessary. The default implementation // just calls Profile and does a regular comparison. Implementations // can override this to provide more efficient implementations. static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash, FoldingSetNodeID &TempID); // ComputeHash - Compute a hash value for X, using TempID to // compute a temporary ID if necessary. The default implementation // just calls Profile and does a regular hash computation. // Implementations can override this to provide more efficient // implementations. static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID); }; /// FoldingSetTrait - This trait class is used to define behavior of how /// to "profile" (in the FoldingSet parlance) an object of a given type. /// The default behavior is to invoke a 'Profile' method on an object, but /// through template specialization the behavior can be tailored for specific /// types. Combined with the FoldingSetNodeWrapper class, one can add objects /// to FoldingSets that were not originally designed to have that behavior. template<typename T> struct FoldingSetTrait : public DefaultFoldingSetTrait<T> {}; template<typename T, typename Ctx> struct ContextualFoldingSetTrait; /// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but /// for ContextualFoldingSets. template<typename T, typename Ctx> struct DefaultContextualFoldingSetTrait { static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) { X.Profile(ID, Context); } static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash, FoldingSetNodeID &TempID, Ctx Context); static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID, Ctx Context); }; /// ContextualFoldingSetTrait - Like FoldingSetTrait, but for /// ContextualFoldingSets. template<typename T, typename Ctx> struct ContextualFoldingSetTrait : public DefaultContextualFoldingSetTrait<T, Ctx> {}; //===--------------------------------------------------------------------===// /// FoldingSetNodeIDRef - This class describes a reference to an interned /// FoldingSetNodeID, which can be a useful to store node id data rather /// than using plain FoldingSetNodeIDs, since the 32-element SmallVector /// is often much larger than necessary, and the possibility of heap /// allocation means it requires a non-trivial destructor call. class FoldingSetNodeIDRef { const unsigned *Data; size_t Size; public: FoldingSetNodeIDRef() : Data(nullptr), Size(0) {} FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {} /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef, /// used to lookup the node in the FoldingSetImpl. unsigned ComputeHash() const; bool operator==(FoldingSetNodeIDRef) const; bool operator!=(FoldingSetNodeIDRef RHS) const { return !(*this == RHS); } /// Used to compare the "ordering" of two nodes as defined by the /// profiled bits and their ordering defined by memcmp(). bool operator<(FoldingSetNodeIDRef) const; const unsigned *getData() const { return Data; } size_t getSize() const { return Size; } }; //===--------------------------------------------------------------------===// /// FoldingSetNodeID - This class is used to gather all the unique data bits of /// a node. When all the bits are gathered this class is used to produce a /// hash value for the node. /// class FoldingSetNodeID { /// Bits - Vector of all the data bits that make the node unique. /// Use a SmallVector to avoid a heap allocation in the common case. SmallVector<unsigned, 32> Bits; public: FoldingSetNodeID() {} FoldingSetNodeID(FoldingSetNodeIDRef Ref) : Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {} /// Add* - Add various data types to Bit data. /// void AddPointer(const void *Ptr); void AddInteger(signed I); void AddInteger(unsigned I); void AddInteger(long I); void AddInteger(unsigned long I); void AddInteger(long long I); void AddInteger(unsigned long long I); void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); } void AddString(StringRef String); void AddNodeID(const FoldingSetNodeID &ID); template <typename T> inline void Add(const T &x) { FoldingSetTrait<T>::Profile(x, *this); } /// clear - Clear the accumulated profile, allowing this FoldingSetNodeID /// object to be used to compute a new profile. inline void clear() { Bits.clear(); } /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used /// to lookup the node in the FoldingSetImpl. unsigned ComputeHash() const; /// operator== - Used to compare two nodes to each other. /// bool operator==(const FoldingSetNodeID &RHS) const; bool operator==(const FoldingSetNodeIDRef RHS) const; bool operator!=(const FoldingSetNodeID &RHS) const { return !(*this == RHS); } bool operator!=(const FoldingSetNodeIDRef RHS) const { return !(*this ==RHS);} /// Used to compare the "ordering" of two nodes as defined by the /// profiled bits and their ordering defined by memcmp(). bool operator<(const FoldingSetNodeID &RHS) const; bool operator<(const FoldingSetNodeIDRef RHS) const; /// Intern - Copy this node's data to a memory region allocated from the /// given allocator and return a FoldingSetNodeIDRef describing the /// interned data. FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const; }; // Convenience type to hide the implementation of the folding set. typedef FoldingSetImpl::Node FoldingSetNode; template<class T> class FoldingSetIterator; template<class T> class FoldingSetBucketIterator; // Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which // require the definition of FoldingSetNodeID. template<typename T> inline bool DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID, unsigned /*IDHash*/, FoldingSetNodeID &TempID) { FoldingSetTrait<T>::Profile(X, TempID); return TempID == ID; } template<typename T> inline unsigned DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) { FoldingSetTrait<T>::Profile(X, TempID); return TempID.ComputeHash(); } template<typename T, typename Ctx> inline bool DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X, const FoldingSetNodeID &ID, unsigned /*IDHash*/, FoldingSetNodeID &TempID, Ctx Context) { ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context); return TempID == ID; } template<typename T, typename Ctx> inline unsigned DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X, FoldingSetNodeID &TempID, Ctx Context) { ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context); return TempID.ComputeHash(); } //===----------------------------------------------------------------------===// /// FoldingSet - This template class is used to instantiate a specialized /// implementation of the folding set to the node class T. T must be a /// subclass of FoldingSetNode and implement a Profile function. /// template <class T> class FoldingSet final : public FoldingSetImpl { private: /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a /// way to convert nodes into a unique specifier. void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override { T *TN = static_cast<T *>(N); FoldingSetTrait<T>::Profile(*TN, ID); } /// NodeEquals - Instantiations may optionally provide a way to compare a /// node with a specified ID. bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash, FoldingSetNodeID &TempID) const override { T *TN = static_cast<T *>(N); return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID); } /// ComputeNodeHash - Instantiations may optionally provide a way to compute a /// hash value directly from a node. unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override { T *TN = static_cast<T *>(N); return FoldingSetTrait<T>::ComputeHash(*TN, TempID); } public: explicit FoldingSet(unsigned Log2InitSize = 6) : FoldingSetImpl(Log2InitSize) {} typedef FoldingSetIterator<T> iterator; iterator begin() { return iterator(Buckets); } iterator end() { return iterator(Buckets+NumBuckets); } typedef FoldingSetIterator<const T> const_iterator; const_iterator begin() const { return const_iterator(Buckets); } const_iterator end() const { return const_iterator(Buckets+NumBuckets); } typedef FoldingSetBucketIterator<T> bucket_iterator; bucket_iterator bucket_begin(unsigned hash) { return bucket_iterator(Buckets + (hash & (NumBuckets-1))); } bucket_iterator bucket_end(unsigned hash) { return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true); } /// GetOrInsertNode - If there is an existing simple Node exactly /// equal to the specified node, return it. Otherwise, insert 'N' and /// return it instead. T *GetOrInsertNode(Node *N) { return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N)); } /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, /// return it. If not, return the insertion token that will make insertion /// faster. T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) { return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos)); } }; //===----------------------------------------------------------------------===// /// ContextualFoldingSet - This template class is a further refinement /// of FoldingSet which provides a context argument when calling /// Profile on its nodes. Currently, that argument is fixed at /// initialization time. /// /// T must be a subclass of FoldingSetNode and implement a Profile /// function with signature /// void Profile(llvm::FoldingSetNodeID &, Ctx); template <class T, class Ctx> class ContextualFoldingSet final : public FoldingSetImpl { // Unfortunately, this can't derive from FoldingSet<T> because the // construction vtable for FoldingSet<T> requires // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn // requires a single-argument T::Profile(). private: Ctx Context; /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a /// way to convert nodes into a unique specifier. void GetNodeProfile(FoldingSetImpl::Node *N, FoldingSetNodeID &ID) const override { T *TN = static_cast<T *>(N); ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context); } bool NodeEquals(FoldingSetImpl::Node *N, const FoldingSetNodeID &ID, unsigned IDHash, FoldingSetNodeID &TempID) const override { T *TN = static_cast<T *>(N); return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID, Context); } unsigned ComputeNodeHash(FoldingSetImpl::Node *N, FoldingSetNodeID &TempID) const override { T *TN = static_cast<T *>(N); return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context); } public: explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6) : FoldingSetImpl(Log2InitSize), Context(Context) {} Ctx getContext() const { return Context; } typedef FoldingSetIterator<T> iterator; iterator begin() { return iterator(Buckets); } iterator end() { return iterator(Buckets+NumBuckets); } typedef FoldingSetIterator<const T> const_iterator; const_iterator begin() const { return const_iterator(Buckets); } const_iterator end() const { return const_iterator(Buckets+NumBuckets); } typedef FoldingSetBucketIterator<T> bucket_iterator; bucket_iterator bucket_begin(unsigned hash) { return bucket_iterator(Buckets + (hash & (NumBuckets-1))); } bucket_iterator bucket_end(unsigned hash) { return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true); } /// GetOrInsertNode - If there is an existing simple Node exactly /// equal to the specified node, return it. Otherwise, insert 'N' /// and return it instead. T *GetOrInsertNode(Node *N) { return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N)); } /// FindNodeOrInsertPos - Look up the node specified by ID. If it /// exists, return it. If not, return the insertion token that will /// make insertion faster. T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) { return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos)); } }; //===----------------------------------------------------------------------===// /// FoldingSetVector - This template class combines a FoldingSet and a vector /// to provide the interface of FoldingSet but with deterministic iteration /// order based on the insertion order. T must be a subclass of FoldingSetNode /// and implement a Profile function. template <class T, class VectorT = SmallVector<T*, 8> > class FoldingSetVector { FoldingSet<T> Set; VectorT Vector; public: explicit FoldingSetVector(unsigned Log2InitSize = 6) : Set(Log2InitSize) { } typedef pointee_iterator<typename VectorT::iterator> iterator; iterator begin() { return Vector.begin(); } iterator end() { return Vector.end(); } typedef pointee_iterator<typename VectorT::const_iterator> const_iterator; const_iterator begin() const { return Vector.begin(); } const_iterator end() const { return Vector.end(); } /// clear - Remove all nodes from the folding set. void clear() { Set.clear(); Vector.clear(); } /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists, /// return it. If not, return the insertion token that will make insertion /// faster. T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) { return Set.FindNodeOrInsertPos(ID, InsertPos); } /// GetOrInsertNode - If there is an existing simple Node exactly /// equal to the specified node, return it. Otherwise, insert 'N' and /// return it instead. T *GetOrInsertNode(T *N) { T *Result = Set.GetOrInsertNode(N); if (Result == N) Vector.push_back(N); return Result; } /// InsertNode - Insert the specified node into the folding set, knowing that /// it is not already in the folding set. InsertPos must be obtained from /// FindNodeOrInsertPos. void InsertNode(T *N, void *InsertPos) { Set.InsertNode(N, InsertPos); Vector.push_back(N); } /// InsertNode - Insert the specified node into the folding set, knowing that /// it is not already in the folding set. void InsertNode(T *N) { Set.InsertNode(N); Vector.push_back(N); } /// size - Returns the number of nodes in the folding set. unsigned size() const { return Set.size(); } /// empty - Returns true if there are no nodes in the folding set. bool empty() const { return Set.empty(); } }; //===----------------------------------------------------------------------===// /// FoldingSetIteratorImpl - This is the common iterator support shared by all /// folding sets, which knows how to walk the folding set hash table. class FoldingSetIteratorImpl { protected: FoldingSetNode *NodePtr; FoldingSetIteratorImpl(void **Bucket); void advance(); public: bool operator==(const FoldingSetIteratorImpl &RHS) const { return NodePtr == RHS.NodePtr; } bool operator!=(const FoldingSetIteratorImpl &RHS) const { return NodePtr != RHS.NodePtr; } }; template<class T> class FoldingSetIterator : public FoldingSetIteratorImpl { public: explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {} T &operator*() const { return *static_cast<T*>(NodePtr); } T *operator->() const { return static_cast<T*>(NodePtr); } inline FoldingSetIterator &operator++() { // Preincrement advance(); return *this; } FoldingSetIterator operator++(int) { // Postincrement FoldingSetIterator tmp = *this; ++*this; return tmp; } }; //===----------------------------------------------------------------------===// /// FoldingSetBucketIteratorImpl - This is the common bucket iterator support /// shared by all folding sets, which knows how to walk a particular bucket /// of a folding set hash table. class FoldingSetBucketIteratorImpl { protected: void *Ptr; explicit FoldingSetBucketIteratorImpl(void **Bucket); FoldingSetBucketIteratorImpl(void **Bucket, bool) : Ptr(Bucket) {} void advance() { void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket(); uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1; Ptr = reinterpret_cast<void*>(x); } public: bool operator==(const FoldingSetBucketIteratorImpl &RHS) const { return Ptr == RHS.Ptr; } bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const { return Ptr != RHS.Ptr; } }; template<class T> class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl { public: explicit FoldingSetBucketIterator(void **Bucket) : FoldingSetBucketIteratorImpl(Bucket) {} FoldingSetBucketIterator(void **Bucket, bool) : FoldingSetBucketIteratorImpl(Bucket, true) {} T &operator*() const { return *static_cast<T*>(Ptr); } T *operator->() const { return static_cast<T*>(Ptr); } inline FoldingSetBucketIterator &operator++() { // Preincrement advance(); return *this; } FoldingSetBucketIterator operator++(int) { // Postincrement FoldingSetBucketIterator tmp = *this; ++*this; return tmp; } }; //===----------------------------------------------------------------------===// /// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary /// types in an enclosing object so that they can be inserted into FoldingSets. template <typename T> class FoldingSetNodeWrapper : public FoldingSetNode { T data; public: template <typename... Ts> explicit FoldingSetNodeWrapper(Ts &&... Args) : data(std::forward<Ts>(Args)...) {} void Profile(FoldingSetNodeID &ID) { FoldingSetTrait<T>::Profile(data, ID); } T &getValue() { return data; } const T &getValue() const { return data; } operator T&() { return data; } operator const T&() const { return data; } }; //===----------------------------------------------------------------------===// /// FastFoldingSetNode - This is a subclass of FoldingSetNode which stores /// a FoldingSetNodeID value rather than requiring the node to recompute it /// each time it is needed. This trades space for speed (which can be /// significant if the ID is long), and it also permits nodes to drop /// information that would otherwise only be required for recomputing an ID. class FastFoldingSetNode : public FoldingSetNode { FoldingSetNodeID FastID; protected: explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {} public: void Profile(FoldingSetNodeID &ID) const { ID.AddNodeID(FastID); } }; // // /////////////////////////////////////////////////////////////////////////////// // Partial specializations of FoldingSetTrait. template<typename T> struct FoldingSetTrait<T*> { static inline void Profile(T *X, FoldingSetNodeID &ID) { ID.AddPointer(X); } }; template <typename T1, typename T2> struct FoldingSetTrait<std::pair<T1, T2>> { static inline void Profile(const std::pair<T1, T2> &P, llvm::FoldingSetNodeID &ID) { ID.Add(P.first); ID.Add(P.second); } }; } // End of namespace llvm. #endif
0
repos/DirectXShaderCompiler/include/llvm
repos/DirectXShaderCompiler/include/llvm/ADT/APSInt.h
//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the APSInt class, which is a simple class that // represents an arbitrary sized integer that knows its signedness. // //===----------------------------------------------------------------------===// #ifndef LLVM_ADT_APSINT_H #define LLVM_ADT_APSINT_H #include "llvm/ADT/APInt.h" namespace llvm { class APSInt : public APInt { bool IsUnsigned; public: /// Default constructor that creates an uninitialized APInt. explicit APSInt() : IsUnsigned(false) {} /// APSInt ctor - Create an APSInt with the specified width, default to /// unsigned. explicit APSInt(uint32_t BitWidth, bool isUnsigned = true) : APInt(BitWidth, 0), IsUnsigned(isUnsigned) {} explicit APSInt(APInt I, bool isUnsigned = true) : APInt(std::move(I)), IsUnsigned(isUnsigned) {} /// Construct an APSInt from a string representation. /// /// This constructor interprets the string \p Str using the radix of 10. /// The interpretation stops at the end of the string. The bit width of the /// constructed APSInt is determined automatically. /// /// \param Str the string to be interpreted. explicit APSInt(StringRef Str); APSInt &operator=(APInt RHS) { // Retain our current sign. APInt::operator=(std::move(RHS)); return *this; } APSInt &operator=(uint64_t RHS) { // Retain our current sign. APInt::operator=(RHS); return *this; } // Query sign information. bool isSigned() const { return !IsUnsigned; } bool isUnsigned() const { return IsUnsigned; } void setIsUnsigned(bool Val) { IsUnsigned = Val; } void setIsSigned(bool Val) { IsUnsigned = !Val; } /// toString - Append this APSInt to the specified SmallString. void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const { APInt::toString(Str, Radix, isSigned()); } /// toString - Converts an APInt to a std::string. This is an inefficient /// method; you should prefer passing in a SmallString instead. std::string toString(unsigned Radix) const { return APInt::toString(Radix, isSigned()); } using APInt::toString; /// \brief Get the correctly-extended \c int64_t value. int64_t getExtValue() const { assert(getMinSignedBits() <= 64 && "Too many bits for int64_t"); return isSigned() ? getSExtValue() : getZExtValue(); } APSInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(uint32_t width) const { return APSInt(APInt::trunc(width), IsUnsigned); } APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extend(uint32_t width) const { if (IsUnsigned) return APSInt(zext(width), IsUnsigned); else return APSInt(sext(width), IsUnsigned); } APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extOrTrunc(uint32_t width) const { if (IsUnsigned) return APSInt(zextOrTrunc(width), IsUnsigned); else return APSInt(sextOrTrunc(width), IsUnsigned); } const APSInt &operator%=(const APSInt &RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); if (IsUnsigned) *this = urem(RHS); else *this = srem(RHS); return *this; } const APSInt &operator/=(const APSInt &RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); if (IsUnsigned) *this = udiv(RHS); else *this = sdiv(RHS); return *this; } APSInt operator%(const APSInt &RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false); } APSInt operator/(const APSInt &RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false); } APSInt operator>>(unsigned Amt) const { return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false); } APSInt& operator>>=(unsigned Amt) { *this = *this >> Amt; return *this; } inline bool operator<(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return IsUnsigned ? ult(RHS) : slt(RHS); } inline bool operator>(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return IsUnsigned ? ugt(RHS) : sgt(RHS); } inline bool operator<=(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return IsUnsigned ? ule(RHS) : sle(RHS); } inline bool operator>=(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return IsUnsigned ? uge(RHS) : sge(RHS); } inline bool operator==(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return eq(RHS); } inline bool operator!=(const APSInt& RHS) const { return !((*this) == RHS); } bool operator==(int64_t RHS) const { return compareValues(*this, get(RHS)) == 0; } bool operator!=(int64_t RHS) const { return compareValues(*this, get(RHS)) != 0; } bool operator<=(int64_t RHS) const { return compareValues(*this, get(RHS)) <= 0; } bool operator>=(int64_t RHS) const { return compareValues(*this, get(RHS)) >= 0; } bool operator<(int64_t RHS) const { return compareValues(*this, get(RHS)) < 0; } bool operator>(int64_t RHS) const { return compareValues(*this, get(RHS)) > 0; } // The remaining operators just wrap the logic of APInt, but retain the // signedness information. APSInt operator<<(unsigned Bits) const { return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned); } APSInt& operator<<=(unsigned Amt) { *this = *this << Amt; return *this; } APSInt& operator++() { ++(static_cast<APInt&>(*this)); return *this; } APSInt& operator--() { --(static_cast<APInt&>(*this)); return *this; } APSInt operator++(int) { return APSInt(++static_cast<APInt&>(*this), IsUnsigned); } APSInt operator--(int) { return APSInt(--static_cast<APInt&>(*this), IsUnsigned); } APSInt operator-() const { return APSInt(-static_cast<const APInt&>(*this), IsUnsigned); } APSInt& operator+=(const APSInt& RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); static_cast<APInt&>(*this) += RHS; return *this; } APSInt& operator-=(const APSInt& RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); static_cast<APInt&>(*this) -= RHS; return *this; } APSInt& operator*=(const APSInt& RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); static_cast<APInt&>(*this) *= RHS; return *this; } APSInt& operator&=(const APSInt& RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); static_cast<APInt&>(*this) &= RHS; return *this; } APSInt& operator|=(const APSInt& RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); static_cast<APInt&>(*this) |= RHS; return *this; } APSInt& operator^=(const APSInt& RHS) { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); static_cast<APInt&>(*this) ^= RHS; return *this; } APSInt operator&(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned); } APSInt LLVM_ATTRIBUTE_UNUSED_RESULT And(const APSInt& RHS) const { return this->operator&(RHS); } APSInt operator|(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned); } APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Or(const APSInt& RHS) const { return this->operator|(RHS); } APSInt operator^(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned); } APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Xor(const APSInt& RHS) const { return this->operator^(RHS); } APSInt operator*(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned); } APSInt operator+(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned); } APSInt operator-(const APSInt& RHS) const { assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!"); return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned); } APSInt operator~() const { return APSInt(~static_cast<const APInt&>(*this), IsUnsigned); } /// getMaxValue - Return the APSInt representing the maximum integer value /// with the given bit width and signedness. static APSInt getMaxValue(uint32_t numBits, bool Unsigned) { return APSInt(Unsigned ? APInt::getMaxValue(numBits) : APInt::getSignedMaxValue(numBits), Unsigned); } /// getMinValue - Return the APSInt representing the minimum integer value /// with the given bit width and signedness. static APSInt getMinValue(uint32_t numBits, bool Unsigned) { return APSInt(Unsigned ? APInt::getMinValue(numBits) : APInt::getSignedMinValue(numBits), Unsigned); } /// \brief Determine if two APSInts have the same value, zero- or /// sign-extending as needed. static bool isSameValue(const APSInt &I1, const APSInt &I2) { return !compareValues(I1, I2); } /// \brief Compare underlying values of two numbers. static int compareValues(const APSInt &I1, const APSInt &I2) { if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned()) return I1 == I2 ? 0 : I1 > I2 ? 1 : -1; // Check for a bit-width mismatch. if (I1.getBitWidth() > I2.getBitWidth()) return compareValues(I1, I2.extend(I1.getBitWidth())); else if (I2.getBitWidth() > I1.getBitWidth()) return compareValues(I1.extend(I2.getBitWidth()), I2); // We have a signedness mismatch. Check for negative values and do an // unsigned compare if both are positive. if (I1.isSigned()) { assert(!I2.isSigned() && "Expected signed mismatch"); if (I1.isNegative()) return -1; } else { assert(I2.isSigned() && "Expected signed mismatch"); if (I2.isNegative()) return 1; } return I1.eq(I2) ? 0 : I1.ugt(I2) ? 1 : -1; } static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); } static APSInt getUnsigned(uint64_t X) { return APSInt(APInt(64, X), true); } /// Profile - Used to insert APSInt objects, or objects that contain APSInt /// objects, into FoldingSets. void Profile(FoldingSetNodeID& ID) const; }; inline bool operator==(int64_t V1, const APSInt &V2) { return V2 == V1; } inline bool operator!=(int64_t V1, const APSInt &V2) { return V2 != V1; } inline bool operator<=(int64_t V1, const APSInt &V2) { return V2 >= V1; } inline bool operator>=(int64_t V1, const APSInt &V2) { return V2 <= V1; } inline bool operator<(int64_t V1, const APSInt &V2) { return V2 > V1; } inline bool operator>(int64_t V1, const APSInt &V2) { return V2 < V1; } inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) { I.print(OS, I.isSigned()); return OS; } } // end namespace llvm #endif