Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/NullResolver.h | //===------ NullResolver.h - Reject symbol lookup requests ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Defines a RuntimeDyld::SymbolResolver subclass that rejects all symbol
// resolution requests, for clients that have no cross-object fixups.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
#define LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
#include "llvm/ExecutionEngine/RuntimeDyld.h"
namespace llvm {
namespace orc {
/// SymbolResolver impliementation that rejects all resolution requests.
/// Useful for clients that have no cross-object fixups.
class NullResolver : public RuntimeDyld::SymbolResolver {
public:
RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) final;
RuntimeDyld::SymbolInfo
findSymbolInLogicalDylib(const std::string &Name) final;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h | //===----- IRTransformLayer.h - Run all IR through a functor ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Run all IR passed in through a user supplied functor.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
#include "JITSymbol.h"
namespace llvm {
namespace orc {
/// @brief IR mutating layer.
///
/// This layer accepts sets of LLVM IR Modules (via addModuleSet). It
/// immediately applies the user supplied functor to each module, then adds
/// the set of transformed modules to the layer below.
template <typename BaseLayerT, typename TransformFtor>
class IRTransformLayer {
public:
/// @brief Handle to a set of added modules.
typedef typename BaseLayerT::ModuleSetHandleT ModuleSetHandleT;
/// @brief Construct an IRTransformLayer with the given BaseLayer
IRTransformLayer(BaseLayerT &BaseLayer,
TransformFtor Transform = TransformFtor())
: BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
/// @brief Apply the transform functor to each module in the module set, then
/// add the resulting set of modules to the base layer, along with the
/// memory manager and symbol resolver.
///
/// @return A handle for the added modules.
template <typename ModuleSetT, typename MemoryManagerPtrT,
typename SymbolResolverPtrT>
ModuleSetHandleT addModuleSet(ModuleSetT Ms,
MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver) {
for (auto I = Ms.begin(), E = Ms.end(); I != E; ++I)
*I = Transform(std::move(*I));
return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
std::move(Resolver));
}
/// @brief Remove the module set associated with the handle H.
void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeModuleSet(H); }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it exists.
JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
/// @brief Get the address of the given symbol in the context of the set of
/// modules represented by the handle H. This call is forwarded to the
/// base layer's implementation.
/// @param H The handle for the module set to search in.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it is found in the
/// given module set.
JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
}
/// @brief Immediately emit and finalize the module set represented by the
/// given handle.
/// @param H Handle for module set to emit/finalize.
void emitAndFinalize(ModuleSetHandleT H) {
BaseLayer.emitAndFinalize(H);
}
/// @brief Access the transform functor directly.
TransformFtor& getTransform() { return Transform; }
/// @brief Access the mumate functor directly.
const TransformFtor& getTransform() const { return Transform; }
private:
BaseLayerT &BaseLayer;
TransformFtor Transform;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/CompileUtils.h | //===-- CompileUtils.h - Utilities for compiling IR in the JIT --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for compiling IR to object files.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
namespace orc {
/// @brief Simple compile functor: Takes a single IR module and returns an
/// ObjectFile.
class SimpleCompiler {
public:
/// @brief Construct a simple compile functor with the given target.
SimpleCompiler(TargetMachine &TM) : TM(TM) {}
/// @brief Compile a Module to an ObjectFile.
object::OwningBinary<object::ObjectFile> operator()(Module &M) const {
SmallVector<char, 0> ObjBufferSV;
raw_svector_ostream ObjStream(ObjBufferSV);
legacy::PassManager PM;
MCContext *Ctx;
if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
llvm_unreachable("Target does not support MC emission.");
PM.run(M);
ObjStream.flush();
std::unique_ptr<MemoryBuffer> ObjBuffer(
new ObjectMemoryBuffer(std::move(ObjBufferSV)));
ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
// TODO: Actually report errors helpfully.
typedef object::OwningBinary<object::ObjectFile> OwningObj;
if (Obj)
return OwningObj(std::move(*Obj), std::move(ObjBuffer));
return OwningObj(nullptr, nullptr);
}
private:
TargetMachine &TM;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h | //===------ IRCompileLayer.h -- Eagerly compile IR for JIT ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for a basic, eagerly compiling layer of the JIT.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
#include "JITSymbol.h"
#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/Object/ObjectFile.h"
#include <memory>
namespace llvm {
namespace orc {
/// @brief Eager IR compiling layer.
///
/// This layer accepts sets of LLVM IR Modules (via addModuleSet). It
/// immediately compiles each IR module to an object file (each IR Module is
/// compiled separately). The resulting set of object files is then added to
/// the layer below, which must implement the object layer concept.
template <typename BaseLayerT> class IRCompileLayer {
public:
typedef std::function<object::OwningBinary<object::ObjectFile>(Module &)>
CompileFtor;
private:
typedef typename BaseLayerT::ObjSetHandleT ObjSetHandleT;
typedef std::vector<std::unique_ptr<object::ObjectFile>> OwningObjectVec;
typedef std::vector<std::unique_ptr<MemoryBuffer>> OwningBufferVec;
public:
/// @brief Handle to a set of compiled modules.
typedef ObjSetHandleT ModuleSetHandleT;
/// @brief Construct an IRCompileLayer with the given BaseLayer, which must
/// implement the ObjectLayer concept.
IRCompileLayer(BaseLayerT &BaseLayer, CompileFtor Compile)
: BaseLayer(BaseLayer), Compile(std::move(Compile)), ObjCache(nullptr) {}
/// @brief Set an ObjectCache to query before compiling.
void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }
/// @brief Compile each module in the given module set, then add the resulting
/// set of objects to the base layer along with the memory manager and
/// symbol resolver.
///
/// @return A handle for the added modules.
template <typename ModuleSetT, typename MemoryManagerPtrT,
typename SymbolResolverPtrT>
ModuleSetHandleT addModuleSet(ModuleSetT Ms,
MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver) {
OwningObjectVec Objects;
OwningBufferVec Buffers;
for (const auto &M : Ms) {
std::unique_ptr<object::ObjectFile> Object;
std::unique_ptr<MemoryBuffer> Buffer;
if (ObjCache)
std::tie(Object, Buffer) = tryToLoadFromObjectCache(*M).takeBinary();
if (!Object) {
std::tie(Object, Buffer) = Compile(*M).takeBinary();
if (ObjCache)
ObjCache->notifyObjectCompiled(&*M, Buffer->getMemBufferRef());
}
Objects.push_back(std::move(Object));
Buffers.push_back(std::move(Buffer));
}
ModuleSetHandleT H =
BaseLayer.addObjectSet(Objects, std::move(MemMgr), std::move(Resolver));
BaseLayer.takeOwnershipOfBuffers(H, std::move(Buffers));
return H;
}
/// @brief Remove the module set associated with the handle H.
void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeObjectSet(H); }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it exists.
JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
/// @brief Get the address of the given symbol in the context of the set of
/// compiled modules represented by the handle H. This call is
/// forwarded to the base layer's implementation.
/// @param H The handle for the module set to search in.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it is found in the
/// given module set.
JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
}
/// @brief Immediately emit and finalize the moduleOB set represented by the
/// given handle.
/// @param H Handle for module set to emit/finalize.
void emitAndFinalize(ModuleSetHandleT H) {
BaseLayer.emitAndFinalize(H);
}
private:
object::OwningBinary<object::ObjectFile>
tryToLoadFromObjectCache(const Module &M) {
std::unique_ptr<MemoryBuffer> ObjBuffer = ObjCache->getObject(&M);
if (!ObjBuffer)
return object::OwningBinary<object::ObjectFile>();
ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
if (!Obj)
return object::OwningBinary<object::ObjectFile>();
return object::OwningBinary<object::ObjectFile>(std::move(*Obj),
std::move(ObjBuffer));
}
BaseLayerT &BaseLayer;
CompileFtor Compile;
ObjectCache *ObjCache;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_IRCOMPILINGLAYER_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h | //===- ObjectLinkingLayer.h - Add object files to a JIT process -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for the object layer of the JIT.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
#include "JITSymbol.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include <list>
#include <memory>
namespace llvm {
namespace orc {
class ObjectLinkingLayerBase {
protected:
/// @brief Holds a set of objects to be allocated/linked as a unit in the JIT.
///
/// An instance of this class will be created for each set of objects added
/// via JITObjectLayer::addObjectSet. Deleting the instance (via
/// removeObjectSet) frees its memory, removing all symbol definitions that
/// had been provided by this instance. Higher level layers are responsible
/// for taking any action required to handle the missing symbols.
class LinkedObjectSet {
LinkedObjectSet(const LinkedObjectSet&) = delete;
void operator=(const LinkedObjectSet&) = delete;
public:
LinkedObjectSet(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver)
: RTDyld(llvm::make_unique<RuntimeDyld>(MemMgr, Resolver)),
State(Raw) {}
virtual ~LinkedObjectSet() {}
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
addObject(const object::ObjectFile &Obj) {
return RTDyld->loadObject(Obj);
}
RuntimeDyld::SymbolInfo getSymbol(StringRef Name) const {
return RTDyld->getSymbol(Name);
}
bool NeedsFinalization() const { return (State == Raw); }
virtual void Finalize() = 0;
void mapSectionAddress(const void *LocalAddress, TargetAddress TargetAddr) {
assert((State != Finalized) &&
"Attempting to remap sections for finalized objects.");
RTDyld->mapSectionAddress(LocalAddress, TargetAddr);
}
void takeOwnershipOfBuffer(std::unique_ptr<MemoryBuffer> B) {
OwnedBuffers.push_back(std::move(B));
}
protected:
std::unique_ptr<RuntimeDyld> RTDyld;
enum { Raw, Finalizing, Finalized } State;
// FIXME: This ownership hack only exists because RuntimeDyldELF still
// wants to be able to inspect the original object when resolving
// relocations. As soon as that can be fixed this should be removed.
std::vector<std::unique_ptr<MemoryBuffer>> OwnedBuffers;
};
typedef std::list<std::unique_ptr<LinkedObjectSet>> LinkedObjectSetListT;
public:
/// @brief Handle to a set of loaded objects.
typedef LinkedObjectSetListT::iterator ObjSetHandleT;
// Ownership hack.
// FIXME: Remove this as soon as RuntimeDyldELF can apply relocations without
// referencing the original object.
template <typename OwningMBSet>
void takeOwnershipOfBuffers(ObjSetHandleT H, OwningMBSet MBs) {
for (auto &MB : MBs)
(*H)->takeOwnershipOfBuffer(std::move(MB));
}
};
/// @brief Default (no-op) action to perform when loading objects.
class DoNothingOnNotifyLoaded {
public:
template <typename ObjSetT, typename LoadResult>
void operator()(ObjectLinkingLayerBase::ObjSetHandleT, const ObjSetT &,
const LoadResult &) {}
};
/// @brief Bare bones object linking layer.
///
/// This class is intended to be used as the base layer for a JIT. It allows
/// object files to be loaded into memory, linked, and the addresses of their
/// symbols queried. All objects added to this layer can see each other's
/// symbols.
template <typename NotifyLoadedFtor = DoNothingOnNotifyLoaded>
class ObjectLinkingLayer : public ObjectLinkingLayerBase {
private:
template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
class ConcreteLinkedObjectSet : public LinkedObjectSet {
public:
ConcreteLinkedObjectSet(MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver)
: LinkedObjectSet(*MemMgr, *Resolver), MemMgr(std::move(MemMgr)),
Resolver(std::move(Resolver)) { }
void Finalize() override {
State = Finalizing;
RTDyld->resolveRelocations();
RTDyld->registerEHFrames();
MemMgr->finalizeMemory();
OwnedBuffers.clear();
State = Finalized;
}
private:
MemoryManagerPtrT MemMgr;
SymbolResolverPtrT Resolver;
};
template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
std::unique_ptr<LinkedObjectSet>
createLinkedObjectSet(MemoryManagerPtrT MemMgr, SymbolResolverPtrT Resolver) {
typedef ConcreteLinkedObjectSet<MemoryManagerPtrT, SymbolResolverPtrT> LOS;
return llvm::make_unique<LOS>(std::move(MemMgr), std::move(Resolver));
}
public:
/// @brief LoadedObjectInfo list. Contains a list of owning pointers to
/// RuntimeDyld::LoadedObjectInfo instances.
typedef std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>
LoadedObjInfoList;
/// @brief Functor for receiving finalization notifications.
typedef std::function<void(ObjSetHandleT)> NotifyFinalizedFtor;
/// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
/// and NotifyFinalized functors.
ObjectLinkingLayer(
NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor())
: NotifyLoaded(std::move(NotifyLoaded)),
NotifyFinalized(std::move(NotifyFinalized)) {}
/// @brief Add a set of objects (or archives) that will be treated as a unit
/// for the purposes of symbol lookup and memory management.
///
/// @return A pair containing (1) A handle that can be used to free the memory
/// allocated for the objects, and (2) a LoadedObjInfoList containing
/// one LoadedObjInfo instance for each object at the corresponding
/// index in the Objects list.
///
/// This version of this method allows the client to pass in an
/// RTDyldMemoryManager instance that will be used to allocate memory and look
/// up external symbol addresses for the given objects.
template <typename ObjSetT,
typename MemoryManagerPtrT,
typename SymbolResolverPtrT>
ObjSetHandleT addObjectSet(const ObjSetT &Objects,
MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver) {
ObjSetHandleT Handle =
LinkedObjSetList.insert(
LinkedObjSetList.end(),
createLinkedObjectSet(std::move(MemMgr), std::move(Resolver)));
LinkedObjectSet &LOS = **Handle;
LoadedObjInfoList LoadedObjInfos;
for (auto &Obj : Objects)
LoadedObjInfos.push_back(LOS.addObject(*Obj));
NotifyLoaded(Handle, Objects, LoadedObjInfos);
return Handle;
}
/// @brief Remove the set of objects associated with handle H.
///
/// All memory allocated for the objects will be freed, and the sections and
/// symbols they provided will no longer be available. No attempt is made to
/// re-emit the missing symbols, and any use of these symbols (directly or
/// indirectly) will result in undefined behavior. If dependence tracking is
/// required to detect or resolve such issues it should be added at a higher
/// layer.
void removeObjectSet(ObjSetHandleT H) {
// How do we invalidate the symbols in H?
LinkedObjSetList.erase(H);
}
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it exists.
JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
for (auto I = LinkedObjSetList.begin(), E = LinkedObjSetList.end(); I != E;
++I)
if (auto Symbol = findSymbolIn(I, Name, ExportedSymbolsOnly))
return Symbol;
return nullptr;
}
/// @brief Search for the given named symbol in the context of the set of
/// loaded objects represented by the handle H.
/// @param H The handle for the object set to search in.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it is found in the
/// given object set.
JITSymbol findSymbolIn(ObjSetHandleT H, StringRef Name,
bool ExportedSymbolsOnly) {
if (auto Sym = (*H)->getSymbol(Name)) {
if (Sym.isExported() || !ExportedSymbolsOnly) {
auto Addr = Sym.getAddress();
auto Flags = Sym.getFlags();
if (!(*H)->NeedsFinalization()) {
// If this instance has already been finalized then we can just return
// the address.
return JITSymbol(Addr, Flags);
} else {
// If this instance needs finalization return a functor that will do
// it. The functor still needs to double-check whether finalization is
// required, in case someone else finalizes this set before the
// functor is called.
auto GetAddress =
[this, Addr, H]() {
if ((*H)->NeedsFinalization()) {
(*H)->Finalize();
if (NotifyFinalized)
NotifyFinalized(H);
}
return Addr;
};
return JITSymbol(std::move(GetAddress), Flags);
}
}
}
return nullptr;
}
/// @brief Map section addresses for the objects associated with the handle H.
void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
TargetAddress TargetAddr) {
(*H)->mapSectionAddress(LocalAddress, TargetAddr);
}
/// @brief Immediately emit and finalize the object set represented by the
/// given handle.
/// @param H Handle for object set to emit/finalize.
void emitAndFinalize(ObjSetHandleT H) {
(*H)->Finalize();
if (NotifyFinalized)
NotifyFinalized(H);
}
private:
LinkedObjectSetListT LinkedObjSetList;
NotifyLoadedFtor NotifyLoaded;
NotifyFinalizedFtor NotifyFinalized;
};
} // End namespace orc.
} // End namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h | //===- CompileOnDemandLayer.h - Compile each function on demand -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// JIT layer for breaking up modules and inserting callbacks to allow
// individual functions to be compiled on demand.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
#include "IndirectionUtils.h"
#include "LambdaResolver.h"
#include "LogicalDylib.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include <list>
#include <set>
#include "llvm/Support/Debug.h"
namespace llvm {
namespace orc {
/// @brief Compile-on-demand layer.
///
/// When a module is added to this layer a stub is created for each of its
/// function definitions. The stubs and other global values are immediately
/// added to the layer below. When a stub is called it triggers the extraction
/// of the function body from the original module. The extracted body is then
/// compiled and executed.
template <typename BaseLayerT, typename CompileCallbackMgrT,
typename PartitioningFtor =
std::function<std::set<Function*>(Function&)>>
class CompileOnDemandLayer {
private:
// Utility class for MapValue. Only materializes declarations for global
// variables.
class GlobalDeclMaterializer : public ValueMaterializer {
public:
typedef std::set<const Function*> StubSet;
GlobalDeclMaterializer(Module &Dst, const StubSet *StubsToClone = nullptr)
: Dst(Dst), StubsToClone(StubsToClone) {}
Value* materializeValueFor(Value *V) final {
if (auto *GV = dyn_cast<GlobalVariable>(V))
return cloneGlobalVariableDecl(Dst, *GV);
else if (auto *F = dyn_cast<Function>(V)) {
auto *ClonedF = cloneFunctionDecl(Dst, *F);
if (StubsToClone && StubsToClone->count(F)) {
GlobalVariable *FnBodyPtr =
createImplPointer(*ClonedF->getType(), *ClonedF->getParent(),
ClonedF->getName() + "$orc_addr", nullptr);
makeStub(*ClonedF, *FnBodyPtr);
ClonedF->setLinkage(GlobalValue::AvailableExternallyLinkage);
ClonedF->addFnAttr(Attribute::AlwaysInline);
}
return ClonedF;
}
// Else.
return nullptr;
}
private:
Module &Dst;
const StubSet *StubsToClone;
};
typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
struct LogicalModuleResources {
std::shared_ptr<Module> SourceModule;
std::set<const Function*> StubsToClone;
};
struct LogicalDylibResources {
typedef std::function<RuntimeDyld::SymbolInfo(const std::string&)>
SymbolResolverFtor;
SymbolResolverFtor ExternalSymbolResolver;
PartitioningFtor Partitioner;
};
typedef LogicalDylib<BaseLayerT, LogicalModuleResources,
LogicalDylibResources> CODLogicalDylib;
typedef typename CODLogicalDylib::LogicalModuleHandle LogicalModuleHandle;
typedef std::list<CODLogicalDylib> LogicalDylibList;
public:
/// @brief Handle to a set of loaded modules.
typedef typename LogicalDylibList::iterator ModuleSetHandleT;
/// @brief Construct a compile-on-demand layer instance.
CompileOnDemandLayer(BaseLayerT &BaseLayer, CompileCallbackMgrT &CallbackMgr,
bool CloneStubsIntoPartitions)
: BaseLayer(BaseLayer), CompileCallbackMgr(CallbackMgr),
CloneStubsIntoPartitions(CloneStubsIntoPartitions) {}
/// @brief Add a module to the compile-on-demand layer.
template <typename ModuleSetT, typename MemoryManagerPtrT,
typename SymbolResolverPtrT>
ModuleSetHandleT addModuleSet(ModuleSetT Ms,
MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver) {
assert(MemMgr == nullptr &&
"User supplied memory managers not supported with COD yet.");
LogicalDylibs.push_back(CODLogicalDylib(BaseLayer));
auto &LDResources = LogicalDylibs.back().getDylibResources();
LDResources.ExternalSymbolResolver =
[Resolver](const std::string &Name) {
return Resolver->findSymbol(Name);
};
LDResources.Partitioner =
[](Function &F) {
std::set<Function*> Partition;
Partition.insert(&F);
return Partition;
};
// Process each of the modules in this module set.
for (auto &M : Ms)
addLogicalModule(LogicalDylibs.back(),
std::shared_ptr<Module>(std::move(M)));
return std::prev(LogicalDylibs.end());
}
/// @brief Remove the module represented by the given handle.
///
/// This will remove all modules in the layers below that were derived from
/// the module represented by H.
void removeModuleSet(ModuleSetHandleT H) {
LogicalDylibs.erase(H);
}
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it exists.
JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
/// @brief Get the address of a symbol provided by this layer, or some layer
/// below this one.
JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return H->findSymbol(Name, ExportedSymbolsOnly);
}
private:
void addLogicalModule(CODLogicalDylib &LD, std::shared_ptr<Module> SrcM) {
// Bump the linkage and rename any anonymous/privote members in SrcM to
// ensure that everything will resolve properly after we partition SrcM.
makeAllSymbolsExternallyAccessible(*SrcM);
// Create a logical module handle for SrcM within the logical dylib.
auto LMH = LD.createLogicalModule();
auto &LMResources = LD.getLogicalModuleResources(LMH);
LMResources.SourceModule = SrcM;
// Create the GVs-and-stubs module.
auto GVsAndStubsM = llvm::make_unique<Module>(
(SrcM->getName() + ".globals_and_stubs").str(),
SrcM->getContext());
GVsAndStubsM->setDataLayout(SrcM->getDataLayout());
ValueToValueMapTy VMap;
// Process module and create stubs.
// We create the stubs before copying the global variables as we know the
// stubs won't refer to any globals (they only refer to their implementation
// pointer) so there's no ordering/value-mapping issues.
for (auto &F : *SrcM) {
// Skip declarations.
if (F.isDeclaration())
continue;
// Record all functions defined by this module.
if (CloneStubsIntoPartitions)
LMResources.StubsToClone.insert(&F);
// For each definition: create a callback, a stub, and a function body
// pointer. Initialize the function body pointer to point at the callback,
// and set the callback to compile the function body.
auto CCInfo = CompileCallbackMgr.getCompileCallback(SrcM->getContext());
Function *StubF = cloneFunctionDecl(*GVsAndStubsM, F, &VMap);
GlobalVariable *FnBodyPtr =
createImplPointer(*StubF->getType(), *StubF->getParent(),
StubF->getName() + "$orc_addr",
createIRTypedAddress(*StubF->getFunctionType(),
CCInfo.getAddress()));
makeStub(*StubF, *FnBodyPtr);
CCInfo.setCompileAction(
[this, &LD, LMH, &F]() {
return this->extractAndCompile(LD, LMH, F);
});
}
// Now clone the global variable declarations.
GlobalDeclMaterializer GDMat(*GVsAndStubsM);
for (auto &GV : SrcM->globals())
if (!GV.isDeclaration())
cloneGlobalVariableDecl(*GVsAndStubsM, GV, &VMap);
// Then clone the initializers.
for (auto &GV : SrcM->globals())
if (!GV.isDeclaration())
moveGlobalVariableInitializer(GV, VMap, &GDMat);
// Build a resolver for the stubs module and add it to the base layer.
auto GVsAndStubsResolver = createLambdaResolver(
[&LD](const std::string &Name) {
return LD.getDylibResources().ExternalSymbolResolver(Name);
},
[](const std::string &Name) {
return RuntimeDyld::SymbolInfo(nullptr);
});
std::vector<std::unique_ptr<Module>> GVsAndStubsMSet;
GVsAndStubsMSet.push_back(std::move(GVsAndStubsM));
auto GVsAndStubsH =
BaseLayer.addModuleSet(std::move(GVsAndStubsMSet),
llvm::make_unique<SectionMemoryManager>(),
std::move(GVsAndStubsResolver));
LD.addToLogicalModule(LMH, GVsAndStubsH);
}
static std::string Mangle(StringRef Name, const DataLayout &DL) {
std::string MangledName;
{
raw_string_ostream MangledNameStream(MangledName);
Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
}
return MangledName;
}
TargetAddress extractAndCompile(CODLogicalDylib &LD,
LogicalModuleHandle LMH,
Function &F) {
Module &SrcM = *LD.getLogicalModuleResources(LMH).SourceModule;
// If F is a declaration we must already have compiled it.
if (F.isDeclaration())
return 0;
// Grab the name of the function being called here.
std::string CalledFnName = Mangle(F.getName(), SrcM.getDataLayout());
auto Partition = LD.getDylibResources().Partitioner(F);
auto PartitionH = emitPartition(LD, LMH, Partition);
TargetAddress CalledAddr = 0;
for (auto *SubF : Partition) {
std::string FName = SubF->getName();
auto FnBodySym =
BaseLayer.findSymbolIn(PartitionH, Mangle(FName, SrcM.getDataLayout()),
false);
auto FnPtrSym =
BaseLayer.findSymbolIn(*LD.moduleHandlesBegin(LMH),
Mangle(FName + "$orc_addr",
SrcM.getDataLayout()),
false);
assert(FnBodySym && "Couldn't find function body.");
assert(FnPtrSym && "Couldn't find function body pointer.");
TargetAddress FnBodyAddr = FnBodySym.getAddress();
void *FnPtrAddr = reinterpret_cast<void*>(
static_cast<uintptr_t>(FnPtrSym.getAddress()));
// If this is the function we're calling record the address so we can
// return it from this function.
if (SubF == &F)
CalledAddr = FnBodyAddr;
memcpy(FnPtrAddr, &FnBodyAddr, sizeof(uintptr_t));
}
return CalledAddr;
}
template <typename PartitionT>
BaseLayerModuleSetHandleT emitPartition(CODLogicalDylib &LD,
LogicalModuleHandle LMH,
const PartitionT &Partition) {
auto &LMResources = LD.getLogicalModuleResources(LMH);
Module &SrcM = *LMResources.SourceModule;
// Create the module.
std::string NewName = SrcM.getName();
for (auto *F : Partition) {
NewName += ".";
NewName += F->getName();
}
auto M = llvm::make_unique<Module>(NewName, SrcM.getContext());
M->setDataLayout(SrcM.getDataLayout());
ValueToValueMapTy VMap;
GlobalDeclMaterializer GDM(*M, &LMResources.StubsToClone);
// Create decls in the new module.
for (auto *F : Partition)
cloneFunctionDecl(*M, *F, &VMap);
// Move the function bodies.
for (auto *F : Partition)
moveFunctionBody(*F, VMap, &GDM);
// Create memory manager and symbol resolver.
auto MemMgr = llvm::make_unique<SectionMemoryManager>();
auto Resolver = createLambdaResolver(
[this, &LD, LMH](const std::string &Name) {
if (auto Symbol = LD.findSymbolInternally(LMH, Name))
return RuntimeDyld::SymbolInfo(Symbol.getAddress(),
Symbol.getFlags());
return LD.getDylibResources().ExternalSymbolResolver(Name);
},
[this, &LD, LMH](const std::string &Name) {
if (auto Symbol = LD.findSymbolInternally(LMH, Name))
return RuntimeDyld::SymbolInfo(Symbol.getAddress(),
Symbol.getFlags());
return RuntimeDyld::SymbolInfo(nullptr);
});
std::vector<std::unique_ptr<Module>> PartMSet;
PartMSet.push_back(std::move(M));
return BaseLayer.addModuleSet(std::move(PartMSet), std::move(MemMgr),
std::move(Resolver));
}
BaseLayerT &BaseLayer;
CompileCallbackMgrT &CompileCallbackMgr;
LogicalDylibList LogicalDylibs;
bool CloneStubsIntoPartitions;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h | //===- ObjectTransformLayer.h - Run all objects through functor -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Run all objects passed in through a user supplied functor.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
#include "JITSymbol.h"
namespace llvm {
namespace orc {
/// @brief Object mutating layer.
///
/// This layer accepts sets of ObjectFiles (via addObjectSet). It
/// immediately applies the user supplied functor to each object, then adds
/// the set of transformed objects to the layer below.
template <typename BaseLayerT, typename TransformFtor>
class ObjectTransformLayer {
public:
/// @brief Handle to a set of added objects.
typedef typename BaseLayerT::ObjSetHandleT ObjSetHandleT;
/// @brief Construct an ObjectTransformLayer with the given BaseLayer
ObjectTransformLayer(BaseLayerT &BaseLayer,
TransformFtor Transform = TransformFtor())
: BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
/// @brief Apply the transform functor to each object in the object set, then
/// add the resulting set of objects to the base layer, along with the
/// memory manager and symbol resolver.
///
/// @return A handle for the added objects.
template <typename ObjSetT, typename MemoryManagerPtrT,
typename SymbolResolverPtrT>
ObjSetHandleT addObjectSet(ObjSetT &Objects, MemoryManagerPtrT MemMgr,
SymbolResolverPtrT Resolver) {
for (auto I = Objects.begin(), E = Objects.end(); I != E; ++I)
*I = Transform(std::move(*I));
return BaseLayer.addObjectSet(Objects, std::move(MemMgr),
std::move(Resolver));
}
/// @brief Remove the object set associated with the handle H.
void removeObjectSet(ObjSetHandleT H) { BaseLayer.removeObjectSet(H); }
/// @brief Search for the given named symbol.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it exists.
JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
}
/// @brief Get the address of the given symbol in the context of the set of
/// objects represented by the handle H. This call is forwarded to the
/// base layer's implementation.
/// @param H The handle for the object set to search in.
/// @param Name The name of the symbol to search for.
/// @param ExportedSymbolsOnly If true, search only for exported symbols.
/// @return A handle for the given named symbol, if it is found in the
/// given object set.
JITSymbol findSymbolIn(ObjSetHandleT H, const std::string &Name,
bool ExportedSymbolsOnly) {
return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
}
/// @brief Immediately emit and finalize the object set represented by the
/// given handle.
/// @param H Handle for object set to emit/finalize.
void emitAndFinalize(ObjSetHandleT H) { BaseLayer.emitAndFinalize(H); }
/// @brief Map section addresses for the objects associated with the handle H.
void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
TargetAddress TargetAddr) {
BaseLayer.mapSectionAddress(H, LocalAddress, TargetAddr);
}
// Ownership hack.
// FIXME: Remove this as soon as RuntimeDyldELF can apply relocations without
// referencing the original object.
template <typename OwningMBSet>
void takeOwnershipOfBuffers(ObjSetHandleT H, OwningMBSet MBs) {
BaseLayer.takeOwnershipOfBuffers(H, std::move(MBs));
}
/// @brief Access the transform functor directly.
TransformFtor &getTransform() { return Transform; }
/// @brief Access the mumate functor directly.
const TransformFtor &getTransform() const { return Transform; }
private:
BaseLayerT &BaseLayer;
TransformFtor Transform;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/JITSymbol.h | //===----------- JITSymbol.h - JIT symbol abstraction -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Abstraction for target process addresses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
#define LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
#include "llvm/ExecutionEngine/JITSymbolFlags.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <functional>
namespace llvm {
namespace orc {
/// @brief Represents an address in the target process's address space.
typedef uint64_t TargetAddress;
/// @brief Represents a symbol in the JIT.
class JITSymbol : public JITSymbolBase {
public:
typedef std::function<TargetAddress()> GetAddressFtor;
/// @brief Create a 'null' symbol that represents failure to find a symbol
/// definition.
JITSymbol(std::nullptr_t)
: JITSymbolBase(JITSymbolFlags::None), CachedAddr(0) {}
/// @brief Create a symbol for a definition with a known address.
JITSymbol(TargetAddress Addr, JITSymbolFlags Flags)
: JITSymbolBase(Flags), CachedAddr(Addr) {}
/// @brief Create a symbol for a definition that doesn't have a known address
/// yet.
/// @param GetAddress A functor to materialize a definition (fixing the
/// address) on demand.
///
/// This constructor allows a JIT layer to provide a reference to a symbol
/// definition without actually materializing the definition up front. The
/// user can materialize the definition at any time by calling the getAddress
/// method.
JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
: JITSymbolBase(Flags), GetAddress(std::move(GetAddress)), CachedAddr(0) {}
/// @brief Returns true if the symbol exists, false otherwise.
explicit operator bool() const { return CachedAddr || GetAddress; }
/// @brief Get the address of the symbol in the target address space. Returns
/// '0' if the symbol does not exist.
TargetAddress getAddress() {
if (GetAddress) {
CachedAddr = GetAddress();
assert(CachedAddr && "Symbol could not be materialized.");
GetAddress = nullptr;
}
return CachedAddr;
}
private:
GetAddressFtor GetAddress;
TargetAddress CachedAddr;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/LogicalDylib.h | //===--- LogicalDylib.h - Simulates dylib-style symbol lookup ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Simulates symbol resolution inside a dylib.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
#define LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
namespace llvm {
namespace orc {
template <typename BaseLayerT,
typename LogicalModuleResources,
typename LogicalDylibResources>
class LogicalDylib {
public:
typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
private:
typedef std::vector<BaseLayerModuleSetHandleT> BaseLayerHandleList;
struct LogicalModule {
LogicalModuleResources Resources;
BaseLayerHandleList BaseLayerHandles;
};
typedef std::vector<LogicalModule> LogicalModuleList;
public:
typedef typename BaseLayerHandleList::iterator BaseLayerHandleIterator;
typedef typename LogicalModuleList::iterator LogicalModuleHandle;
LogicalDylib(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
~LogicalDylib() {
for (auto &LM : LogicalModules)
for (auto BLH : LM.BaseLayerHandles)
BaseLayer.removeModuleSet(BLH);
}
LogicalModuleHandle createLogicalModule() {
LogicalModules.push_back(LogicalModule());
return std::prev(LogicalModules.end());
}
void addToLogicalModule(LogicalModuleHandle LMH,
BaseLayerModuleSetHandleT BaseLayerHandle) {
LMH->BaseLayerHandles.push_back(BaseLayerHandle);
}
LogicalModuleResources& getLogicalModuleResources(LogicalModuleHandle LMH) {
return LMH->Resources;
}
BaseLayerHandleIterator moduleHandlesBegin(LogicalModuleHandle LMH) {
return LMH->BaseLayerHandles.begin();
}
BaseLayerHandleIterator moduleHandlesEnd(LogicalModuleHandle LMH) {
return LMH->BaseLayerHandles.end();
}
JITSymbol findSymbolInLogicalModule(LogicalModuleHandle LMH,
const std::string &Name) {
for (auto BLH : LMH->BaseLayerHandles)
if (auto Symbol = BaseLayer.findSymbolIn(BLH, Name, false))
return Symbol;
return nullptr;
}
JITSymbol findSymbolInternally(LogicalModuleHandle LMH,
const std::string &Name) {
if (auto Symbol = findSymbolInLogicalModule(LMH, Name))
return Symbol;
for (auto LMI = LogicalModules.begin(), LME = LogicalModules.end();
LMI != LME; ++LMI) {
if (LMI != LMH)
if (auto Symbol = findSymbolInLogicalModule(LMI, Name))
return Symbol;
}
return nullptr;
}
JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
for (auto &LM : LogicalModules)
for (auto BLH : LM.BaseLayerHandles)
if (auto Symbol =
BaseLayer.findSymbolIn(BLH, Name, ExportedSymbolsOnly))
return Symbol;
return nullptr;
}
LogicalDylibResources& getDylibResources() { return DylibResources; }
protected:
BaseLayerT BaseLayer;
LogicalModuleList LogicalModules;
LogicalDylibResources DylibResources;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/LambdaResolver.h | //===-- LambdaResolverMM - Redirect symbol lookup via a functor -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Defines a RuntimeDyld::SymbolResolver subclass that uses a user-supplied
// functor for symbol resolution.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
#define LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include <memory>
#include <vector>
namespace llvm {
namespace orc {
template <typename ExternalLookupFtorT, typename DylibLookupFtorT>
class LambdaResolver : public RuntimeDyld::SymbolResolver {
public:
LambdaResolver(ExternalLookupFtorT ExternalLookupFtor,
DylibLookupFtorT DylibLookupFtor)
: ExternalLookupFtor(ExternalLookupFtor),
DylibLookupFtor(DylibLookupFtor) {}
RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) final {
return ExternalLookupFtor(Name);
}
RuntimeDyld::SymbolInfo
findSymbolInLogicalDylib(const std::string &Name) final {
return DylibLookupFtor(Name);
}
private:
ExternalLookupFtorT ExternalLookupFtor;
DylibLookupFtorT DylibLookupFtor;
};
template <typename ExternalLookupFtorT,
typename DylibLookupFtorT>
std::unique_ptr<LambdaResolver<ExternalLookupFtorT, DylibLookupFtorT>>
createLambdaResolver(ExternalLookupFtorT ExternalLookupFtor,
DylibLookupFtorT DylibLookupFtor) {
typedef LambdaResolver<ExternalLookupFtorT, DylibLookupFtorT> LR;
return make_unique<LR>(std::move(ExternalLookupFtor),
std::move(DylibLookupFtor));
}
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h | //===-- ExecutionUtils.h - Utilities for executing code in Orc --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for executing code in Orc.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
#include "JITSymbol.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include <vector>
namespace llvm {
class ConstantArray;
class GlobalVariable;
class Function;
class Module;
class Value;
namespace orc {
/// @brief This iterator provides a convenient way to iterate over the elements
/// of an llvm.global_ctors/llvm.global_dtors instance.
///
/// The easiest way to get hold of instances of this class is to use the
/// getConstructors/getDestructors functions.
class CtorDtorIterator {
public:
/// @brief Accessor for an element of the global_ctors/global_dtors array.
///
/// This class provides a read-only view of the element with any casts on
/// the function stripped away.
struct Element {
Element(unsigned Priority, const Function *Func, const Value *Data)
: Priority(Priority), Func(Func), Data(Data) {}
unsigned Priority;
const Function *Func;
const Value *Data;
};
/// @brief Construct an iterator instance. If End is true then this iterator
/// acts as the end of the range, otherwise it is the beginning.
CtorDtorIterator(const GlobalVariable *GV, bool End);
/// @brief Test iterators for equality.
bool operator==(const CtorDtorIterator &Other) const;
/// @brief Test iterators for inequality.
bool operator!=(const CtorDtorIterator &Other) const;
/// @brief Pre-increment iterator.
CtorDtorIterator& operator++();
/// @brief Post-increment iterator.
CtorDtorIterator operator++(int);
/// @brief Dereference iterator. The resulting value provides a read-only view
/// of this element of the global_ctors/global_dtors list.
Element operator*() const;
private:
const ConstantArray *InitList;
unsigned I;
};
/// @brief Create an iterator range over the entries of the llvm.global_ctors
/// array.
iterator_range<CtorDtorIterator> getConstructors(const Module &M);
/// @brief Create an iterator range over the entries of the llvm.global_ctors
/// array.
iterator_range<CtorDtorIterator> getDestructors(const Module &M);
/// @brief Convenience class for recording constructor/destructor names for
/// later execution.
template <typename JITLayerT>
class CtorDtorRunner {
public:
/// @brief Construct a CtorDtorRunner for the given range using the given
/// name mangling function.
CtorDtorRunner(std::vector<std::string> CtorDtorNames,
typename JITLayerT::ModuleSetHandleT H)
: CtorDtorNames(std::move(CtorDtorNames)), H(H) {}
/// @brief Run the recorded constructors/destructors through the given JIT
/// layer.
bool runViaLayer(JITLayerT &JITLayer) const {
typedef void (*CtorDtorTy)();
bool Error = false;
for (const auto &CtorDtorName : CtorDtorNames)
if (auto CtorDtorSym = JITLayer.findSymbolIn(H, CtorDtorName, false)) {
CtorDtorTy CtorDtor =
reinterpret_cast<CtorDtorTy>(
static_cast<uintptr_t>(CtorDtorSym.getAddress()));
CtorDtor();
} else
Error = true;
return !Error;
}
private:
std::vector<std::string> CtorDtorNames;
typename JITLayerT::ModuleSetHandleT H;
};
/// @brief Support class for static dtor execution. For hosted (in-process) JITs
/// only!
///
/// If a __cxa_atexit function isn't found C++ programs that use static
/// destructors will fail to link. However, we don't want to use the host
/// process's __cxa_atexit, because it will schedule JIT'd destructors to run
/// after the JIT has been torn down, which is no good. This class makes it easy
/// to override __cxa_atexit (and the related __dso_handle).
///
/// To use, clients should manually call searchOverrides from their symbol
/// resolver. This should generally be done after attempting symbol resolution
/// inside the JIT, but before searching the host process's symbol table. When
/// the client determines that destructors should be run (generally at JIT
/// teardown or after a return from main), the runDestructors method should be
/// called.
class LocalCXXRuntimeOverrides {
public:
/// Create a runtime-overrides class.
template <typename MangleFtorT>
LocalCXXRuntimeOverrides(const MangleFtorT &Mangle) {
addOverride(Mangle("__dso_handle"), toTargetAddress(&DSOHandleOverride));
addOverride(Mangle("__cxa_atexit"), toTargetAddress(&CXAAtExitOverride));
}
/// Search overrided symbols.
RuntimeDyld::SymbolInfo searchOverrides(const std::string &Name) {
auto I = CXXRuntimeOverrides.find(Name);
if (I != CXXRuntimeOverrides.end())
return RuntimeDyld::SymbolInfo(I->second, JITSymbolFlags::Exported);
return nullptr;
}
/// Run any destructors recorded by the overriden __cxa_atexit function
/// (CXAAtExitOverride).
void runDestructors();
private:
template <typename PtrTy>
TargetAddress toTargetAddress(PtrTy* P) {
return static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(P));
}
void addOverride(const std::string &Name, TargetAddress Addr) {
CXXRuntimeOverrides.insert(std::make_pair(Name, Addr));
}
StringMap<TargetAddress> CXXRuntimeOverrides;
typedef void (*DestructorPtr)(void*);
typedef std::pair<DestructorPtr, void*> CXXDestructorDataPair;
typedef std::vector<CXXDestructorDataPair> CXXDestructorDataPairList;
CXXDestructorDataPairList DSOHandleOverride;
static int CXAAtExitOverride(DestructorPtr Destructor, void *Arg,
void *DSOHandle);
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
|
0 | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine | repos/DirectXShaderCompiler/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h | //===-- IndirectionUtils.h - Utilities for adding indirections --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for adding indirections and breaking up modules.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
#include "JITSymbol.h"
#include "LambdaResolver.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <sstream>
namespace llvm {
namespace orc {
/// @brief Base class for JITLayer independent aspects of
/// JITCompileCallbackManager.
class JITCompileCallbackManagerBase {
public:
typedef std::function<TargetAddress()> CompileFtor;
/// @brief Handle to a newly created compile callback. Can be used to get an
/// IR constant representing the address of the trampoline, and to set
/// the compile action for the callback.
class CompileCallbackInfo {
public:
CompileCallbackInfo(TargetAddress Addr, CompileFtor &Compile)
: Addr(Addr), Compile(Compile) {}
TargetAddress getAddress() const { return Addr; }
void setCompileAction(CompileFtor Compile) {
this->Compile = std::move(Compile);
}
private:
TargetAddress Addr;
CompileFtor &Compile;
};
/// @brief Construct a JITCompileCallbackManagerBase.
/// @param ErrorHandlerAddress The address of an error handler in the target
/// process to be used if a compile callback fails.
/// @param NumTrampolinesPerBlock Number of trampolines to emit if there is no
/// available trampoline when getCompileCallback is
/// called.
JITCompileCallbackManagerBase(TargetAddress ErrorHandlerAddress,
unsigned NumTrampolinesPerBlock)
: ErrorHandlerAddress(ErrorHandlerAddress),
NumTrampolinesPerBlock(NumTrampolinesPerBlock) {}
virtual ~JITCompileCallbackManagerBase() {}
/// @brief Execute the callback for the given trampoline id. Called by the JIT
/// to compile functions on demand.
TargetAddress executeCompileCallback(TargetAddress TrampolineAddr) {
auto I = ActiveTrampolines.find(TrampolineAddr);
// FIXME: Also raise an error in the Orc error-handler when we finally have
// one.
if (I == ActiveTrampolines.end())
return ErrorHandlerAddress;
// Found a callback handler. Yank this trampoline out of the active list and
// put it back in the available trampolines list, then try to run the
// handler's compile and update actions.
// Moving the trampoline ID back to the available list first means there's at
// least one available trampoline if the compile action triggers a request for
// a new one.
auto Compile = std::move(I->second);
ActiveTrampolines.erase(I);
AvailableTrampolines.push_back(TrampolineAddr);
if (auto Addr = Compile())
return Addr;
return ErrorHandlerAddress;
}
/// @brief Reserve a compile callback.
virtual CompileCallbackInfo getCompileCallback(LLVMContext &Context) = 0;
/// @brief Get a CompileCallbackInfo for an existing callback.
CompileCallbackInfo getCompileCallbackInfo(TargetAddress TrampolineAddr) {
auto I = ActiveTrampolines.find(TrampolineAddr);
assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
return CompileCallbackInfo(I->first, I->second);
}
/// @brief Release a compile callback.
///
/// Note: Callbacks are auto-released after they execute. This method should
/// only be called to manually release a callback that is not going to
/// execute.
void releaseCompileCallback(TargetAddress TrampolineAddr) {
auto I = ActiveTrampolines.find(TrampolineAddr);
assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
ActiveTrampolines.erase(I);
AvailableTrampolines.push_back(TrampolineAddr);
}
protected:
TargetAddress ErrorHandlerAddress;
unsigned NumTrampolinesPerBlock;
typedef std::map<TargetAddress, CompileFtor> TrampolineMapT;
TrampolineMapT ActiveTrampolines;
std::vector<TargetAddress> AvailableTrampolines;
};
/// @brief Manage compile callbacks.
template <typename JITLayerT, typename TargetT>
class JITCompileCallbackManager : public JITCompileCallbackManagerBase {
public:
/// @brief Construct a JITCompileCallbackManager.
/// @param JIT JIT layer to emit callback trampolines, etc. into.
/// @param Context LLVMContext to use for trampoline & resolve block modules.
/// @param ErrorHandlerAddress The address of an error handler in the target
/// process to be used if a compile callback fails.
/// @param NumTrampolinesPerBlock Number of trampolines to allocate whenever
/// there is no existing callback trampoline.
/// (Trampolines are allocated in blocks for
/// efficiency.)
JITCompileCallbackManager(JITLayerT &JIT, RuntimeDyld::MemoryManager &MemMgr,
LLVMContext &Context,
TargetAddress ErrorHandlerAddress,
unsigned NumTrampolinesPerBlock)
: JITCompileCallbackManagerBase(ErrorHandlerAddress,
NumTrampolinesPerBlock),
JIT(JIT), MemMgr(MemMgr) {
emitResolverBlock(Context);
}
/// @brief Get/create a compile callback with the given signature.
CompileCallbackInfo getCompileCallback(LLVMContext &Context) final {
TargetAddress TrampolineAddr = getAvailableTrampolineAddr(Context);
auto &Compile = this->ActiveTrampolines[TrampolineAddr];
return CompileCallbackInfo(TrampolineAddr, Compile);
}
private:
std::vector<std::unique_ptr<Module>>
SingletonSet(std::unique_ptr<Module> M) {
std::vector<std::unique_ptr<Module>> Ms;
Ms.push_back(std::move(M));
return Ms;
}
void emitResolverBlock(LLVMContext &Context) {
std::unique_ptr<Module> M(new Module("resolver_block_module",
Context));
TargetT::insertResolverBlock(*M, *this);
auto NonResolver =
createLambdaResolver(
[](const std::string &Name) -> RuntimeDyld::SymbolInfo {
llvm_unreachable("External symbols in resolver block?");
},
[](const std::string &Name) -> RuntimeDyld::SymbolInfo {
llvm_unreachable("Dylib symbols in resolver block?");
});
auto H = JIT.addModuleSet(SingletonSet(std::move(M)), &MemMgr,
std::move(NonResolver));
JIT.emitAndFinalize(H);
auto ResolverBlockSymbol =
JIT.findSymbolIn(H, TargetT::ResolverBlockName, false);
assert(ResolverBlockSymbol && "Failed to insert resolver block");
ResolverBlockAddr = ResolverBlockSymbol.getAddress();
}
TargetAddress getAvailableTrampolineAddr(LLVMContext &Context) {
if (this->AvailableTrampolines.empty())
grow(Context);
assert(!this->AvailableTrampolines.empty() &&
"Failed to grow available trampolines.");
TargetAddress TrampolineAddr = this->AvailableTrampolines.back();
this->AvailableTrampolines.pop_back();
return TrampolineAddr;
}
void grow(LLVMContext &Context) {
assert(this->AvailableTrampolines.empty() && "Growing prematurely?");
std::unique_ptr<Module> M(new Module("trampoline_block", Context));
auto GetLabelName =
TargetT::insertCompileCallbackTrampolines(*M, ResolverBlockAddr,
this->NumTrampolinesPerBlock,
this->ActiveTrampolines.size());
auto NonResolver =
createLambdaResolver(
[](const std::string &Name) -> RuntimeDyld::SymbolInfo {
llvm_unreachable("External symbols in trampoline block?");
},
[](const std::string &Name) -> RuntimeDyld::SymbolInfo {
llvm_unreachable("Dylib symbols in trampoline block?");
});
auto H = JIT.addModuleSet(SingletonSet(std::move(M)), &MemMgr,
std::move(NonResolver));
JIT.emitAndFinalize(H);
for (unsigned I = 0; I < this->NumTrampolinesPerBlock; ++I) {
std::string Name = GetLabelName(I);
auto TrampolineSymbol = JIT.findSymbolIn(H, Name, false);
assert(TrampolineSymbol && "Failed to emit trampoline.");
this->AvailableTrampolines.push_back(TrampolineSymbol.getAddress());
}
}
JITLayerT &JIT;
RuntimeDyld::MemoryManager &MemMgr;
TargetAddress ResolverBlockAddr;
};
/// @brief Build a function pointer of FunctionType with the given constant
/// address.
///
/// Usage example: Turn a trampoline address into a function pointer constant
/// for use in a stub.
Constant* createIRTypedAddress(FunctionType &FT, TargetAddress Addr);
/// @brief Create a function pointer with the given type, name, and initializer
/// in the given Module.
GlobalVariable* createImplPointer(PointerType &PT, Module &M,
const Twine &Name, Constant *Initializer);
/// @brief Turn a function declaration into a stub function that makes an
/// indirect call using the given function pointer.
void makeStub(Function &F, GlobalVariable &ImplPointer);
/// @brief Raise linkage types and rename as necessary to ensure that all
/// symbols are accessible for other modules.
///
/// This should be called before partitioning a module to ensure that the
/// partitions retain access to each other's symbols.
void makeAllSymbolsExternallyAccessible(Module &M);
/// @brief Clone a function declaration into a new module.
///
/// This function can be used as the first step towards creating a callback
/// stub (see makeStub), or moving a function body (see moveFunctionBody).
///
/// If the VMap argument is non-null, a mapping will be added between F and
/// the new declaration, and between each of F's arguments and the new
/// declaration's arguments. This map can then be passed in to moveFunction to
/// move the function body if required. Note: When moving functions between
/// modules with these utilities, all decls should be cloned (and added to a
/// single VMap) before any bodies are moved. This will ensure that references
/// between functions all refer to the versions in the new module.
Function* cloneFunctionDecl(Module &Dst, const Function &F,
ValueToValueMapTy *VMap = nullptr);
/// @brief Move the body of function 'F' to a cloned function declaration in a
/// different module (See related cloneFunctionDecl).
///
/// If the target function declaration is not supplied via the NewF parameter
/// then it will be looked up via the VMap.
///
/// This will delete the body of function 'F' from its original parent module,
/// but leave its declaration.
void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
ValueMaterializer *Materializer = nullptr,
Function *NewF = nullptr);
/// @brief Clone a global variable declaration into a new module.
GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
ValueToValueMapTy *VMap = nullptr);
/// @brief Move global variable GV from its parent module to cloned global
/// declaration in a different module.
///
/// If the target global declaration is not supplied via the NewGV parameter
/// then it will be looked up via the VMap.
///
/// This will delete the initializer of GV from its original parent module,
/// but leave its declaration.
void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
ValueToValueMapTy &VMap,
ValueMaterializer *Materializer = nullptr,
GlobalVariable *NewGV = nullptr);
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/LineEditor/LineEditor.h | //===-- llvm/LineEditor/LineEditor.h - line editor --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LINEEDITOR_LINEEDITOR_H
#define LLVM_LINEEDITOR_LINEEDITOR_H
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include <cstdio>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class LineEditor {
public:
/// Create a LineEditor object.
///
/// \param ProgName The name of the current program. Used to form a default
/// prompt.
/// \param HistoryPath Path to the file in which to store history data, if
/// possible.
/// \param In The input stream used by the editor.
/// \param Out The output stream used by the editor.
/// \param Err The error stream used by the editor.
LineEditor(StringRef ProgName, StringRef HistoryPath = "", FILE *In = stdin,
FILE *Out = stdout, FILE *Err = stderr);
~LineEditor();
/// Reads a line.
///
/// \return The line, or llvm::Optional<std::string>() on EOF.
llvm::Optional<std::string> readLine() const;
void saveHistory();
void loadHistory();
static std::string getDefaultHistoryPath(StringRef ProgName);
/// The action to perform upon a completion request.
struct CompletionAction {
enum ActionKind {
/// Insert Text at the cursor position.
AK_Insert,
/// Show Completions, or beep if the list is empty.
AK_ShowCompletions
};
ActionKind Kind;
/// The text to insert.
std::string Text;
/// The list of completions to show.
std::vector<std::string> Completions;
};
/// A possible completion at a given cursor position.
struct Completion {
Completion() {}
Completion(const std::string &TypedText, const std::string &DisplayText)
: TypedText(TypedText), DisplayText(DisplayText) {}
/// The text to insert. If the user has already input some of the
/// completion, this should only include the rest of the text.
std::string TypedText;
/// A description of this completion. This may be the completion itself, or
/// maybe a summary of its type or arguments.
std::string DisplayText;
};
/// Set the completer for this LineEditor. A completer is a function object
/// which takes arguments of type StringRef (the string to complete) and
/// size_t (the zero-based cursor position in the StringRef) and returns a
/// CompletionAction.
template <typename T> void setCompleter(T Comp) {
Completer.reset(new CompleterModel<T>(Comp));
}
/// Set the completer for this LineEditor to the given list completer.
/// A list completer is a function object which takes arguments of type
/// StringRef (the string to complete) and size_t (the zero-based cursor
/// position in the StringRef) and returns a std::vector<Completion>.
template <typename T> void setListCompleter(T Comp) {
Completer.reset(new ListCompleterModel<T>(Comp));
}
/// Use the current completer to produce a CompletionAction for the given
/// completion request. If the current completer is a list completer, this
/// will return an AK_Insert CompletionAction if each completion has a common
/// prefix, or an AK_ShowCompletions CompletionAction otherwise.
///
/// \param Buffer The string to complete
/// \param Pos The zero-based cursor position in the StringRef
CompletionAction getCompletionAction(StringRef Buffer, size_t Pos) const;
const std::string &getPrompt() const { return Prompt; }
void setPrompt(const std::string &P) { Prompt = P; }
// Public so callbacks in LineEditor.cpp can use it.
struct InternalData;
private:
std::string Prompt;
std::string HistoryPath;
std::unique_ptr<InternalData> Data;
struct CompleterConcept {
virtual ~CompleterConcept();
virtual CompletionAction complete(StringRef Buffer, size_t Pos) const = 0;
};
struct ListCompleterConcept : CompleterConcept {
~ListCompleterConcept() override;
CompletionAction complete(StringRef Buffer, size_t Pos) const override;
static std::string getCommonPrefix(const std::vector<Completion> &Comps);
virtual std::vector<Completion> getCompletions(StringRef Buffer,
size_t Pos) const = 0;
};
template <typename T>
struct CompleterModel : CompleterConcept {
CompleterModel(T Value) : Value(Value) {}
CompletionAction complete(StringRef Buffer, size_t Pos) const override {
return Value(Buffer, Pos);
}
T Value;
};
template <typename T>
struct ListCompleterModel : ListCompleterConcept {
ListCompleterModel(T Value) : Value(Value) {}
std::vector<Completion> getCompletions(StringRef Buffer,
size_t Pos) const override {
return Value(Buffer, Pos);
}
T Value;
};
std::unique_ptr<const CompleterConcept> Completer;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/LibDriver/LibDriver.h | //===- llvm/LibDriver/LibDriver.h - lib.exe-compatible driver ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Defines an interface to a lib.exe-compatible driver that also understands
// bitcode files. Used by llvm-lib and lld-link2 /lib.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIBDRIVER_LIBDRIVER_H
#define LLVM_LIBDRIVER_LIBDRIVER_H
#include "llvm/ADT/ArrayRef.h"
namespace llvm {
int libDriverMain(llvm::ArrayRef<const char*> ARgs);
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DivergenceAnalysis.h | //===- llvm/Analysis/DivergenceAnalysis.h - Divergence Analysis -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The divergence analysis is an LLVM pass which can be used to find out
// if a branch instruction in a GPU program is divergent or not. It can help
// branch optimizations such as jump threading and loop unswitching to make
// better decisions.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DenseSet.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
namespace llvm {
class Value;
class DivergenceAnalysis : public FunctionPass {
public:
static char ID;
DivergenceAnalysis() : FunctionPass(ID) {
initializeDivergenceAnalysisPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
// Print all divergent branches in the function.
void print(raw_ostream &OS, const Module *) const override;
// Returns true if V is divergent.
bool isDivergent(const Value *V) const { return DivergentValues.count(V); }
// Returns true if V is uniform/non-divergent.
bool isUniform(const Value *V) const { return !isDivergent(V); }
private:
// Stores all divergent values.
DenseSet<const Value *> DivergentValues;
};
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/AliasSetTracker.h | //===- llvm/Analysis/AliasSetTracker.h - Build Alias Sets -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines two classes: AliasSetTracker and AliasSet. These interface
// are used to classify a collection of pointer references into a maximal number
// of disjoint sets. Each AliasSet object constructed by the AliasSetTracker
// object refers to memory disjoint from the other sets.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H
#define LLVM_ANALYSIS_ALIASSETTRACKER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/ValueHandle.h"
#include <vector>
namespace llvm {
class AliasAnalysis;
class LoadInst;
class StoreInst;
class VAArgInst;
class AliasSetTracker;
class AliasSet;
class AliasSet : public ilist_node<AliasSet> {
friend class AliasSetTracker;
class PointerRec {
Value *Val; // The pointer this record corresponds to.
PointerRec **PrevInList, *NextInList;
AliasSet *AS;
uint64_t Size;
AAMDNodes AAInfo;
public:
PointerRec(Value *V)
: Val(V), PrevInList(nullptr), NextInList(nullptr), AS(nullptr), Size(0),
AAInfo(DenseMapInfo<AAMDNodes>::getEmptyKey()) {}
Value *getValue() const { return Val; }
PointerRec *getNext() const { return NextInList; }
bool hasAliasSet() const { return AS != nullptr; }
PointerRec** setPrevInList(PointerRec **PIL) {
PrevInList = PIL;
return &NextInList;
}
void updateSizeAndAAInfo(uint64_t NewSize, const AAMDNodes &NewAAInfo) {
if (NewSize > Size) Size = NewSize;
if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey())
// We don't have a AAInfo yet. Set it to NewAAInfo.
AAInfo = NewAAInfo;
else if (AAInfo != NewAAInfo)
// NewAAInfo conflicts with AAInfo.
AAInfo = DenseMapInfo<AAMDNodes>::getTombstoneKey();
}
uint64_t getSize() const { return Size; }
/// getAAInfo - Return the AAInfo, or null if there is no
/// information or conflicting information.
AAMDNodes getAAInfo() const {
// If we have missing or conflicting AAInfo, return null.
if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey() ||
AAInfo == DenseMapInfo<AAMDNodes>::getTombstoneKey())
return AAMDNodes();
return AAInfo;
}
AliasSet *getAliasSet(AliasSetTracker &AST) {
assert(AS && "No AliasSet yet!");
if (AS->Forward) {
AliasSet *OldAS = AS;
AS = OldAS->getForwardedTarget(AST);
AS->addRef();
OldAS->dropRef(AST);
}
return AS;
}
void setAliasSet(AliasSet *as) {
assert(!AS && "Already have an alias set!");
AS = as;
}
void eraseFromList() {
if (NextInList) NextInList->PrevInList = PrevInList;
*PrevInList = NextInList;
if (AS->PtrListEnd == &NextInList) {
AS->PtrListEnd = PrevInList;
assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
}
delete this;
}
};
PointerRec *PtrList, **PtrListEnd; // Doubly linked list of nodes.
AliasSet *Forward; // Forwarding pointer.
// All instructions without a specific address in this alias set.
std::vector<AssertingVH<Instruction> > UnknownInsts;
// RefCount - Number of nodes pointing to this AliasSet plus the number of
// AliasSets forwarding to it.
unsigned RefCount : 28;
/// The kinds of access this alias set models.
///
/// We keep track of whether this alias set merely refers to the locations of
/// memory (and not any particular access), whether it modifies or references
/// the memory, or whether it does both. The lattice goes from "NoAccess" to
/// either RefAccess or ModAccess, then to ModRefAccess as necessary.
enum AccessLattice {
NoAccess = 0,
RefAccess = 1,
ModAccess = 2,
ModRefAccess = RefAccess | ModAccess
};
unsigned Access : 2;
/// The kind of alias relationship between pointers of the set.
///
/// These represent conservatively correct alias results between any members
/// of the set. We represent these independently of the values of alias
/// results in order to pack it into a single bit. Lattice goes from
/// MustAlias to MayAlias.
enum AliasLattice {
SetMustAlias = 0, SetMayAlias = 1
};
unsigned Alias : 1;
// Volatile - True if this alias set contains volatile loads or stores.
bool Volatile : 1;
void addRef() { ++RefCount; }
void dropRef(AliasSetTracker &AST) {
assert(RefCount >= 1 && "Invalid reference count detected!");
if (--RefCount == 0)
removeFromTracker(AST);
}
Instruction *getUnknownInst(unsigned i) const {
assert(i < UnknownInsts.size());
return UnknownInsts[i];
}
public:
/// Accessors...
bool isRef() const { return Access & RefAccess; }
bool isMod() const { return Access & ModAccess; }
bool isMustAlias() const { return Alias == SetMustAlias; }
bool isMayAlias() const { return Alias == SetMayAlias; }
// isVolatile - Return true if this alias set contains volatile loads or
// stores.
bool isVolatile() const { return Volatile; }
/// isForwardingAliasSet - Return true if this alias set should be ignored as
/// part of the AliasSetTracker object.
bool isForwardingAliasSet() const { return Forward; }
/// mergeSetIn - Merge the specified alias set into this alias set...
///
void mergeSetIn(AliasSet &AS, AliasSetTracker &AST);
// Alias Set iteration - Allow access to all of the pointer which are part of
// this alias set...
class iterator;
iterator begin() const { return iterator(PtrList); }
iterator end() const { return iterator(); }
bool empty() const { return PtrList == nullptr; }
void print(raw_ostream &OS) const;
void dump() const;
/// Define an iterator for alias sets... this is just a forward iterator.
class iterator {
PointerRec *CurNode;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = PointerRec;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}
bool operator==(const iterator& x) const {
return CurNode == x.CurNode;
}
bool operator!=(const iterator& x) const { return !operator==(x); }
value_type &operator*() const {
assert(CurNode && "Dereferencing AliasSet.end()!");
return *CurNode;
}
value_type *operator->() const { return &operator*(); }
Value *getPointer() const { return CurNode->getValue(); }
uint64_t getSize() const { return CurNode->getSize(); }
AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); }
iterator& operator++() { // Preincrement
assert(CurNode && "Advancing past AliasSet.end()!");
CurNode = CurNode->getNext();
return *this;
}
iterator operator++(int) { // Postincrement
iterator tmp = *this; ++*this; return tmp;
}
};
private:
// Can only be created by AliasSetTracker. Also, ilist creates one
// to serve as a sentinel.
friend struct ilist_sentinel_traits<AliasSet>;
AliasSet()
: PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
Access(NoAccess), Alias(SetMustAlias), Volatile(false) {
}
AliasSet(const AliasSet &AS) = delete;
void operator=(const AliasSet &AS) = delete;
PointerRec *getSomePointer() const {
return PtrList;
}
/// getForwardedTarget - Return the real alias set this represents. If this
/// has been merged with another set and is forwarding, return the ultimate
/// destination set. This also implements the union-find collapsing as well.
AliasSet *getForwardedTarget(AliasSetTracker &AST) {
if (!Forward) return this;
AliasSet *Dest = Forward->getForwardedTarget(AST);
if (Dest != Forward) {
Dest->addRef();
Forward->dropRef(AST);
Forward = Dest;
}
return Dest;
}
void removeFromTracker(AliasSetTracker &AST);
void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size,
const AAMDNodes &AAInfo,
bool KnownMustAlias = false);
void addUnknownInst(Instruction *I, AliasAnalysis &AA);
void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
bool WasEmpty = UnknownInsts.empty();
for (size_t i = 0, e = UnknownInsts.size(); i != e; ++i)
if (UnknownInsts[i] == I) {
UnknownInsts[i] = UnknownInsts.back();
UnknownInsts.pop_back();
--i; --e; // Revisit the moved entry.
}
if (!WasEmpty && UnknownInsts.empty())
dropRef(AST);
}
void setVolatile() { Volatile = true; }
public:
/// aliasesPointer - Return true if the specified pointer "may" (or must)
/// alias one of the members in the set.
///
bool aliasesPointer(const Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo,
AliasAnalysis &AA) const;
bool aliasesUnknownInst(const Instruction *Inst, AliasAnalysis &AA) const;
};
inline raw_ostream& operator<<(raw_ostream &OS, const AliasSet &AS) {
AS.print(OS);
return OS;
}
class AliasSetTracker {
/// CallbackVH - A CallbackVH to arrange for AliasSetTracker to be
/// notified whenever a Value is deleted.
class ASTCallbackVH : public CallbackVH {
AliasSetTracker *AST;
void deleted() override;
void allUsesReplacedWith(Value *) override;
public:
ASTCallbackVH(Value *V, AliasSetTracker *AST = nullptr);
ASTCallbackVH &operator=(Value *V);
};
/// ASTCallbackVHDenseMapInfo - Traits to tell DenseMap that tell us how to
/// compare and hash the value handle.
struct ASTCallbackVHDenseMapInfo : public DenseMapInfo<Value *> {};
AliasAnalysis &AA;
ilist<AliasSet> AliasSets;
typedef DenseMap<ASTCallbackVH, AliasSet::PointerRec*,
ASTCallbackVHDenseMapInfo>
PointerMapType;
// Map from pointers to their node
PointerMapType PointerMap;
public:
/// AliasSetTracker ctor - Create an empty collection of AliasSets, and use
/// the specified alias analysis object to disambiguate load and store
/// addresses.
explicit AliasSetTracker(AliasAnalysis &aa) : AA(aa) {}
~AliasSetTracker() { clear(); }
/// add methods - These methods are used to add different types of
/// instructions to the alias sets. Adding a new instruction can result in
/// one of three actions happening:
///
/// 1. If the instruction doesn't alias any other sets, create a new set.
/// 2. If the instruction aliases exactly one set, add it to the set
/// 3. If the instruction aliases multiple sets, merge the sets, and add
/// the instruction to the result.
///
/// These methods return true if inserting the instruction resulted in the
/// addition of a new alias set (i.e., the pointer did not alias anything).
///
bool add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
bool add(LoadInst *LI);
bool add(StoreInst *SI);
bool add(VAArgInst *VAAI);
bool add(Instruction *I); // Dispatch to one of the other add methods...
void add(BasicBlock &BB); // Add all instructions in basic block
void add(const AliasSetTracker &AST); // Add alias relations from another AST
bool addUnknown(Instruction *I);
/// remove methods - These methods are used to remove all entries that might
/// be aliased by the specified instruction. These methods return true if any
/// alias sets were eliminated.
// Remove a location
bool remove(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo);
bool remove(LoadInst *LI);
bool remove(StoreInst *SI);
bool remove(VAArgInst *VAAI);
bool remove(Instruction *I);
void remove(AliasSet &AS);
bool removeUnknown(Instruction *I);
void clear();
/// getAliasSets - Return the alias sets that are active.
///
const ilist<AliasSet> &getAliasSets() const { return AliasSets; }
/// getAliasSetForPointer - Return the alias set that the specified pointer
/// lives in. If the New argument is non-null, this method sets the value to
/// true if a new alias set is created to contain the pointer (because the
/// pointer didn't alias anything).
AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
const AAMDNodes &AAInfo,
bool *New = nullptr);
/// getAliasSetForPointerIfExists - Return the alias set containing the
/// location specified if one exists, otherwise return null.
AliasSet *getAliasSetForPointerIfExists(const Value *P, uint64_t Size,
const AAMDNodes &AAInfo) {
return findAliasSetForPointer(P, Size, AAInfo);
}
/// containsPointer - Return true if the specified location is represented by
/// this alias set, false otherwise. This does not modify the AST object or
/// alias sets.
bool containsPointer(const Value *P, uint64_t Size,
const AAMDNodes &AAInfo) const;
/// Return true if the specified instruction "may" (or must) alias one of the
/// members in any of the sets.
bool containsUnknown(const Instruction *I) const;
/// getAliasAnalysis - Return the underlying alias analysis object used by
/// this tracker.
AliasAnalysis &getAliasAnalysis() const { return AA; }
/// deleteValue method - This method is used to remove a pointer value from
/// the AliasSetTracker entirely. It should be used when an instruction is
/// deleted from the program to update the AST. If you don't use this, you
/// would have dangling pointers to deleted instructions.
///
void deleteValue(Value *PtrVal);
/// copyValue - This method should be used whenever a preexisting value in the
/// program is copied or cloned, introducing a new value. Note that it is ok
/// for clients that use this method to introduce the same value multiple
/// times: if the tracker already knows about a value, it will ignore the
/// request.
///
void copyValue(Value *From, Value *To);
typedef ilist<AliasSet>::iterator iterator;
typedef ilist<AliasSet>::const_iterator const_iterator;
const_iterator begin() const { return AliasSets.begin(); }
const_iterator end() const { return AliasSets.end(); }
iterator begin() { return AliasSets.begin(); }
iterator end() { return AliasSets.end(); }
void print(raw_ostream &OS) const;
void dump() const;
private:
friend class AliasSet;
void removeAliasSet(AliasSet *AS);
// getEntryFor - Just like operator[] on the map, except that it creates an
// entry for the pointer if it doesn't already exist.
AliasSet::PointerRec &getEntryFor(Value *V) {
AliasSet::PointerRec *&Entry = PointerMap[ASTCallbackVH(V, this)];
if (!Entry)
Entry = new AliasSet::PointerRec(V);
return *Entry;
}
AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
AliasSet::AccessLattice E,
bool &NewSet) {
NewSet = false;
AliasSet &AS = getAliasSetForPointer(P, Size, AAInfo, &NewSet);
AS.Access |= E;
return AS;
}
AliasSet *findAliasSetForPointer(const Value *Ptr, uint64_t Size,
const AAMDNodes &AAInfo);
AliasSet *findAliasSetForUnknownInst(Instruction *Inst);
};
inline raw_ostream& operator<<(raw_ostream &OS, const AliasSetTracker &AST) {
AST.print(OS);
return OS;
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LoopInfoImpl.h | //===- llvm/Analysis/LoopInfoImpl.h - Natural Loop Calculator ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the generic implementation of LoopInfo used for both Loops and
// MachineLoops.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPINFOIMPL_H
#define LLVM_ANALYSIS_LOOPINFOIMPL_H
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Dominators.h"
namespace llvm {
//===----------------------------------------------------------------------===//
// APIs for simple analysis of the loop. See header notes.
/// getExitingBlocks - Return all blocks inside the loop that have successors
/// outside of the loop. These are the blocks _inside of the current loop_
/// which branch out. The returned list is always unique.
///
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!contains(*I)) {
// Not in current loop? It must be an exit block.
ExitingBlocks.push_back(*BI);
break;
}
}
/// getExitingBlock - If getExitingBlocks would return exactly one block,
/// return that block. Otherwise return null.
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
SmallVector<BlockT*, 8> ExitingBlocks;
getExitingBlocks(ExitingBlocks);
if (ExitingBlocks.size() == 1)
return ExitingBlocks[0];
return nullptr;
}
/// getExitBlocks - Return all of the successor blocks of this loop. These
/// are the blocks _outside of the current loop_ which are branched to.
///
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!contains(*I))
// Not in current loop? It must be an exit block.
ExitBlocks.push_back(*I);
}
/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
SmallVector<BlockT*, 8> ExitBlocks;
getExitBlocks(ExitBlocks);
if (ExitBlocks.size() == 1)
return ExitBlocks[0];
return nullptr;
}
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!contains(*I))
// Not in current loop? It must be an exit block.
ExitEdges.push_back(Edge(*BI, *I));
}
/// getLoopPreheader - If there is a preheader for this loop, return it. A
/// loop has a preheader if there is only one edge to the header of the loop
/// from outside of the loop. If this is the case, the block branching to the
/// header of the loop is the preheader node.
///
/// This method returns null if there is no preheader for the loop.
///
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = getLoopPredecessor();
if (!Out) return nullptr;
// Make sure there is only one exit out of the preheader.
typedef GraphTraits<BlockT*> BlockTraits;
typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
++SI;
if (SI != BlockTraits::child_end(Out))
return nullptr; // Multiple exits from the block, must not be a preheader.
// The predecessor has exactly one successor, so it is a preheader.
return Out;
}
/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
/// This is less strict that the loop "preheader" concept, which requires
/// the predecessor to have exactly one successor.
///
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = nullptr;
// Loop over the predecessors of the header node...
BlockT *Header = getHeader();
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (!contains(N)) { // If the block is not in the loop...
if (Out && Out != N)
return nullptr; // Multiple predecessors outside the loop
Out = N;
}
}
// Make sure there is only one exit out of the preheader.
assert(Out && "Header of loop has no predecessors from outside loop?");
return Out;
}
/// getLoopLatch - If there is a single latch block for this loop, return it.
/// A latch block is a block that contains a branch back to the header.
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
BlockT *Header = getHeader();
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header);
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
BlockT *Latch = nullptr;
for (; PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (contains(N)) {
if (Latch) return nullptr;
Latch = N;
}
}
return Latch;
}
//===----------------------------------------------------------------------===//
// APIs for updating loop information after changing the CFG
//
/// addBasicBlockToLoop - This method is used by other analyses to update loop
/// information. NewBB is set to be a new member of the current loop.
/// Because of this, it is added as a member of all parent loops, and is added
/// to the specified LoopInfo object as being in the current basic block. It
/// is not valid to replace the loop header with this method.
///
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
assert((Blocks.empty() || LIB[getHeader()] == this) &&
"Incorrect LI specified for this loop!");
assert(NewBB && "Cannot add a null basic block to the loop!");
assert(!LIB[NewBB] && "BasicBlock already in the loop!");
LoopT *L = static_cast<LoopT *>(this);
// Add the loop mapping to the LoopInfo object...
LIB.BBMap[NewBB] = L;
// Add the basic block to this loop and all parent loops...
while (L) {
L->addBlockEntry(NewBB);
L = L->getParentLoop();
}
}
/// replaceChildLoopWith - This is used when splitting loops up. It replaces
/// the OldChild entry in our children list with NewChild, and updates the
/// parent pointer of OldChild to be null and the NewChild to be this loop.
/// This updates the loop depth of the new child.
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild) {
assert(OldChild->ParentLoop == this && "This loop is already broken!");
assert(!NewChild->ParentLoop && "NewChild already has a parent!");
typename std::vector<LoopT *>::iterator I =
std::find(SubLoops.begin(), SubLoops.end(), OldChild);
assert(I != SubLoops.end() && "OldChild not in loop!");
*I = NewChild;
OldChild->ParentLoop = nullptr;
NewChild->ParentLoop = static_cast<LoopT *>(this);
}
/// verifyLoop - Verify loop structure
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoop() const {
#ifndef NDEBUG
assert(!Blocks.empty() && "Loop header is missing");
// Setup for using a depth-first iterator to visit every block in the loop.
SmallVector<BlockT*, 8> ExitBBs;
getExitBlocks(ExitBBs);
llvm::SmallPtrSet<BlockT*, 8> VisitSet;
VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
BI = df_ext_begin(getHeader(), VisitSet),
BE = df_ext_end(getHeader(), VisitSet);
// Keep track of the number of BBs visited.
unsigned NumVisited = 0;
// Check the individual blocks.
for ( ; BI != BE; ++BI) {
BlockT *BB = *BI;
bool HasInsideLoopSuccs = false;
bool HasInsideLoopPreds = false;
SmallVector<BlockT *, 2> OutsideLoopPreds;
typedef GraphTraits<BlockT*> BlockTraits;
for (typename BlockTraits::ChildIteratorType SI =
BlockTraits::child_begin(BB), SE = BlockTraits::child_end(BB);
SI != SE; ++SI)
if (contains(*SI)) {
HasInsideLoopSuccs = true;
break;
}
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
BlockT *N = *PI;
if (contains(N))
HasInsideLoopPreds = true;
else
OutsideLoopPreds.push_back(N);
}
if (BB == getHeader()) {
assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
} else if (!OutsideLoopPreds.empty()) {
// A non-header loop shouldn't be reachable from outside the loop,
// though it is permitted if the predecessor is not itself actually
// reachable.
BlockT *EntryBB = BB->getParent()->begin();
for (BlockT *CB : depth_first(EntryBB))
for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
assert(CB != OutsideLoopPreds[i] &&
"Loop has multiple entry points!");
}
assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
assert(BB != getHeader()->getParent()->begin() &&
"Loop contains function entry block!");
NumVisited++;
}
assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
// Check the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
// Each block in each subloop should be contained within this loop.
for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
BI != BE; ++BI) {
assert(contains(*BI) &&
"Loop does not contain all the blocks of a subloop!");
}
// Check the parent loop pointer.
if (ParentLoop) {
assert(std::find(ParentLoop->begin(), ParentLoop->end(), this) !=
ParentLoop->end() &&
"Loop is not a subloop of its parent!");
}
#endif
}
/// verifyLoop - Verify loop structure of this loop and all nested loops.
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoopNest(
DenseSet<const LoopT*> *Loops) const {
Loops->insert(static_cast<const LoopT *>(this));
// Verify this loop.
verifyLoop();
// Verify the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
(*I)->verifyLoopNest(Loops);
}
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth) const {
OS.indent(Depth*2) << "Loop at depth " << getLoopDepth()
<< " containing: ";
for (unsigned i = 0; i < getBlocks().size(); ++i) {
if (i) OS << ",";
BlockT *BB = getBlocks()[i];
BB->printAsOperand(OS, false);
if (BB == getHeader()) OS << "<header>";
if (BB == getLoopLatch()) OS << "<latch>";
if (isLoopExiting(BB)) OS << "<exiting>";
}
OS << "\n";
for (iterator I = begin(), E = end(); I != E; ++I)
(*I)->print(OS, Depth+2);
}
// //
///////////////////////////////////////////////////////////////////////////////
/// Stable LoopInfo Analysis - Build a loop tree using stable iterators so the
/// result does / not depend on use list (block predecessor) order.
///
/// Discover a subloop with the specified backedges such that: All blocks within
/// this loop are mapped to this loop or a subloop. And all subloops within this
/// loop have their parent loop set to this loop or a subloop.
template<class BlockT, class LoopT>
static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT*> Backedges,
LoopInfoBase<BlockT, LoopT> *LI,
DominatorTreeBase<BlockT> &DomTree) {
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
unsigned NumBlocks = 0;
unsigned NumSubloops = 0;
// Perform a backward CFG traversal using a worklist.
std::vector<BlockT *> ReverseCFGWorklist(Backedges.begin(), Backedges.end());
while (!ReverseCFGWorklist.empty()) {
BlockT *PredBB = ReverseCFGWorklist.back();
ReverseCFGWorklist.pop_back();
LoopT *Subloop = LI->getLoopFor(PredBB);
if (!Subloop) {
if (!DomTree.isReachableFromEntry(PredBB))
continue;
// This is an undiscovered block. Map it to the current loop.
LI->changeLoopFor(PredBB, L);
++NumBlocks;
if (PredBB == L->getHeader())
continue;
// Push all block predecessors on the worklist.
ReverseCFGWorklist.insert(ReverseCFGWorklist.end(),
InvBlockTraits::child_begin(PredBB),
InvBlockTraits::child_end(PredBB));
}
else {
// This is a discovered block. Find its outermost discovered loop.
while (LoopT *Parent = Subloop->getParentLoop())
Subloop = Parent;
// If it is already discovered to be a subloop of this loop, continue.
if (Subloop == L)
continue;
// Discover a subloop of this loop.
Subloop->setParentLoop(L);
++NumSubloops;
NumBlocks += Subloop->getBlocks().capacity();
PredBB = Subloop->getHeader();
// Continue traversal along predecessors that are not loop-back edges from
// within this subloop tree itself. Note that a predecessor may directly
// reach another subloop that is not yet discovered to be a subloop of
// this loop, which we must traverse.
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(PredBB),
PE = InvBlockTraits::child_end(PredBB); PI != PE; ++PI) {
if (LI->getLoopFor(*PI) != Subloop)
ReverseCFGWorklist.push_back(*PI);
}
}
}
L->getSubLoopsVector().reserve(NumSubloops);
L->reserveBlocks(NumBlocks);
}
/// Populate all loop data in a stable order during a single forward DFS.
template<class BlockT, class LoopT>
class PopulateLoopsDFS {
typedef GraphTraits<BlockT*> BlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
LoopInfoBase<BlockT, LoopT> *LI;
public:
PopulateLoopsDFS(LoopInfoBase<BlockT, LoopT> *li):
LI(li) {}
void traverse(BlockT *EntryBlock);
protected:
void insertIntoLoop(BlockT *Block);
};
/// Top-level driver for the forward DFS within the loop.
template<class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::traverse(BlockT *EntryBlock) {
for (BlockT *BB : post_order(EntryBlock))
insertIntoLoop(BB);
}
/// Add a single Block to its ancestor loops in PostOrder. If the block is a
/// subloop header, add the subloop to its parent in PostOrder, then reverse the
/// Block and Subloop vectors of the now complete subloop to achieve RPO.
template<class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
LoopT *Subloop = LI->getLoopFor(Block);
if (Subloop && Block == Subloop->getHeader()) {
// We reach this point once per subloop after processing all the blocks in
// the subloop.
if (Subloop->getParentLoop())
Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
else
LI->addTopLevelLoop(Subloop);
// For convenience, Blocks and Subloops are inserted in postorder. Reverse
// the lists, except for the loop header, which is always at the beginning.
Subloop->reverseBlock(1);
std::reverse(Subloop->getSubLoopsVector().begin(),
Subloop->getSubLoopsVector().end());
Subloop = Subloop->getParentLoop();
}
for (; Subloop; Subloop = Subloop->getParentLoop())
Subloop->addBlockEntry(Block);
}
/// Analyze LoopInfo discovers loops during a postorder DominatorTree traversal
/// interleaved with backward CFG traversals within each subloop
/// (discoverAndMapSubloop). The backward traversal skips inner subloops, so
/// this part of the algorithm is linear in the number of CFG edges. Subloop and
/// Block vectors are then populated during a single forward CFG traversal
/// (PopulateLoopDFS).
///
/// During the two CFG traversals each block is seen three times:
/// 1) Discovered and mapped by a reverse CFG traversal.
/// 2) Visited during a forward DFS CFG traversal.
/// 3) Reverse-inserted in the loop in postorder following forward DFS.
///
/// The Block vectors are inclusive, so step 3 requires loop-depth number of
/// insertions per block.
template<class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::
Analyze(DominatorTreeBase<BlockT> &DomTree) {
// Postorder traversal of the dominator tree.
DomTreeNodeBase<BlockT>* DomRoot = DomTree.getRootNode();
for (auto DomNode : post_order(DomRoot)) {
BlockT *Header = DomNode->getBlock();
SmallVector<BlockT *, 4> Backedges;
// Check each predecessor of the potential loop header.
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
BlockT *Backedge = *PI;
// If Header dominates predBB, this is a new loop. Collect the backedges.
if (DomTree.dominates(Header, Backedge)
&& DomTree.isReachableFromEntry(Backedge)) {
Backedges.push_back(Backedge);
}
}
// Perform a backward CFG traversal to discover and map blocks in this loop.
if (!Backedges.empty()) {
LoopT *L = new LoopT(Header);
discoverAndMapSubloop(L, ArrayRef<BlockT*>(Backedges), this, DomTree);
}
}
// Perform a single forward CFG traversal to populate block and subloop
// vectors for all loops.
PopulateLoopsDFS<BlockT, LoopT> DFS(this);
DFS.traverse(DomRoot->getBlock());
}
// Debugging
template<class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
TopLevelLoops[i]->print(OS);
#if 0
for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
E = BBMap.end(); I != E; ++I)
OS << "BB '" << I->first->getName() << "' level = "
<< I->second->getLoopDepth() << "\n";
#endif
}
template<class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::verify() const {
DenseSet<const LoopT*> Loops;
for (iterator I = begin(), E = end(); I != E; ++I) {
assert(!(*I)->getParentLoop() && "Top-level loop has a parent!");
(*I)->verifyLoopNest(&Loops);
}
// Verify that blocks are mapped to valid loops.
#ifndef NDEBUG
for (auto &Entry : BBMap) {
const BlockT *BB = Entry.first;
LoopT *L = Entry.second;
assert(Loops.count(L) && "orphaned loop");
assert(L->contains(BB) && "orphaned block");
}
#endif
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/RegionPrinter.h | //===-- RegionPrinter.h - Region printer external interface -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the region printer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_REGIONPRINTER_H
#define LLVM_ANALYSIS_REGIONPRINTER_H
namespace llvm {
class FunctionPass;
FunctionPass *createRegionViewerPass();
FunctionPass *createRegionOnlyViewerPass();
FunctionPass *createRegionPrinterPass();
FunctionPass *createRegionOnlyPrinterPass();
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/ScalarEvolution.h | //===- llvm/Analysis/ScalarEvolution.h - Scalar Evolution -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// The ScalarEvolution class is an LLVM pass which can be used to analyze and
// categorize scalar expressions in loops. It specializes in recognizing
// general induction variables, representing them with the abstract and opaque
// SCEV class. Given this analysis, trip counts of loops and other important
// properties can be obtained.
//
// This analysis is primarily useful for induction variable substitution and
// strength reduction.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
#define LLVM_ANALYSIS_SCALAREVOLUTION_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DataTypes.h"
#include <map>
namespace llvm {
class APInt;
class AssumptionCache;
class Constant;
class ConstantInt;
class DominatorTree;
class Type;
class ScalarEvolution;
class DataLayout;
class TargetLibraryInfo;
class LLVMContext;
class Loop;
class LoopInfo;
class Operator;
class SCEVUnknown;
class SCEV;
template<> struct FoldingSetTrait<SCEV>;
/// SCEV - This class represents an analyzed expression in the program. These
/// are opaque objects that the client is not allowed to do much with
/// directly.
///
class SCEV : public FoldingSetNode {
friend struct FoldingSetTrait<SCEV>;
/// FastID - A reference to an Interned FoldingSetNodeID for this node.
/// The ScalarEvolution's BumpPtrAllocator holds the data.
FoldingSetNodeIDRef FastID;
// The SCEV baseclass this node corresponds to
const unsigned short SCEVType;
protected:
/// SubclassData - This field is initialized to zero and may be used in
/// subclasses to store miscellaneous information.
unsigned short SubclassData;
private:
SCEV(const SCEV &) = delete;
void operator=(const SCEV &) = delete;
public:
/// NoWrapFlags are bitfield indices into SubclassData.
///
/// Add and Mul expressions may have no-unsigned-wrap <NUW> or
/// no-signed-wrap <NSW> properties, which are derived from the IR
/// operator. NSW is a misnomer that we use to mean no signed overflow or
/// underflow.
///
/// AddRec expressions may have a no-self-wraparound <NW> property if, in
/// the integer domain, abs(step) * max-iteration(loop) <=
/// unsigned-max(bitwidth). This means that the recurrence will never reach
/// its start value if the step is non-zero. Computing the same value on
/// each iteration is not considered wrapping, and recurrences with step = 0
/// are trivially <NW>. <NW> is independent of the sign of step and the
/// value the add recurrence starts with.
///
/// Note that NUW and NSW are also valid properties of a recurrence, and
/// either implies NW. For convenience, NW will be set for a recurrence
/// whenever either NUW or NSW are set.
enum NoWrapFlags { FlagAnyWrap = 0, // No guarantee.
FlagNW = (1 << 0), // No self-wrap.
FlagNUW = (1 << 1), // No unsigned wrap.
FlagNSW = (1 << 2), // No signed wrap.
NoWrapMask = (1 << 3) -1 };
explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
unsigned getSCEVType() const { return SCEVType; }
/// getType - Return the LLVM type of this SCEV expression.
///
Type *getType() const;
/// isZero - Return true if the expression is a constant zero.
///
bool isZero() const;
/// isOne - Return true if the expression is a constant one.
///
bool isOne() const;
/// isAllOnesValue - Return true if the expression is a constant
/// all-ones value.
///
bool isAllOnesValue() const;
/// isNonConstantNegative - Return true if the specified scev is negated,
/// but not a constant.
bool isNonConstantNegative() const;
/// print - Print out the internal representation of this scalar to the
/// specified stream. This should really only be used for debugging
/// purposes.
void print(raw_ostream &OS) const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - This method is used for debugging.
///
void dump() const;
#endif
};
// Specialize FoldingSetTrait for SCEV to avoid needing to compute
// temporary FoldingSetNodeID values.
template<> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
static void Profile(const SCEV &X, FoldingSetNodeID& ID) {
ID = X.FastID;
}
static bool Equals(const SCEV &X, const FoldingSetNodeID &ID,
unsigned IDHash, FoldingSetNodeID &TempID) {
return ID == X.FastID;
}
static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
return X.FastID.ComputeHash();
}
};
inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
S.print(OS);
return OS;
}
/// SCEVCouldNotCompute - An object of this class is returned by queries that
/// could not be answered. For example, if you ask for the number of
/// iterations of a linked-list traversal loop, you will get one of these.
/// None of the standard SCEV operations are valid on this class, it is just a
/// marker.
struct SCEVCouldNotCompute : public SCEV {
SCEVCouldNotCompute();
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S);
};
/// ScalarEvolution - This class is the main scalar evolution driver. Because
/// client code (intentionally) can't do much with the SCEV objects directly,
/// they must ask this class for services.
///
class ScalarEvolution : public FunctionPass {
public:
/// LoopDisposition - An enum describing the relationship between a
/// SCEV and a loop.
enum LoopDisposition {
LoopVariant, ///< The SCEV is loop-variant (unknown).
LoopInvariant, ///< The SCEV is loop-invariant.
LoopComputable ///< The SCEV varies predictably with the loop.
};
/// BlockDisposition - An enum describing the relationship between a
/// SCEV and a basic block.
enum BlockDisposition {
DoesNotDominateBlock, ///< The SCEV does not dominate the block.
DominatesBlock, ///< The SCEV dominates the block.
ProperlyDominatesBlock ///< The SCEV properly dominates the block.
};
/// Convenient NoWrapFlags manipulation that hides enum casts and is
/// visible in the ScalarEvolution name space.
static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
maskFlags(SCEV::NoWrapFlags Flags, int Mask) {
return (SCEV::NoWrapFlags)(Flags & Mask);
}
static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags) {
return (SCEV::NoWrapFlags)(Flags | OnFlags);
}
static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) {
return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
}
private:
/// SCEVCallbackVH - A CallbackVH to arrange for ScalarEvolution to be
/// notified whenever a Value is deleted.
class SCEVCallbackVH : public CallbackVH {
ScalarEvolution *SE;
void deleted() override;
void allUsesReplacedWith(Value *New) override;
public:
SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
};
friend class SCEVCallbackVH;
friend class SCEVExpander;
friend class SCEVUnknown;
/// F - The function we are analyzing.
///
Function *F;
/// The tracker for @llvm.assume intrinsics in this function.
AssumptionCache *AC;
/// LI - The loop information for the function we are currently analyzing.
///
LoopInfo *LI;
/// TLI - The target library information for the target we are targeting.
///
TargetLibraryInfo *TLI;
/// DT - The dominator tree.
///
DominatorTree *DT;
/// CouldNotCompute - This SCEV is used to represent unknown trip
/// counts and things.
SCEVCouldNotCompute CouldNotCompute;
/// ValueExprMapType - The typedef for ValueExprMap.
///
typedef DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *> >
ValueExprMapType;
/// ValueExprMap - This is a cache of the values we have analyzed so far.
///
ValueExprMapType ValueExprMap;
/// Mark predicate values currently being processed by isImpliedCond.
DenseSet<Value*> PendingLoopPredicates;
/// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
/// conditions dominating the backedge of a loop.
bool WalkingBEDominatingConds;
/// ExitLimit - Information about the number of loop iterations for which a
/// loop exit's branch condition evaluates to the not-taken path. This is a
/// temporary pair of exact and max expressions that are eventually
/// summarized in ExitNotTakenInfo and BackedgeTakenInfo.
struct ExitLimit {
const SCEV *Exact;
const SCEV *Max;
/*implicit*/ ExitLimit(const SCEV *E) : Exact(E), Max(E) {}
ExitLimit(const SCEV *E, const SCEV *M) : Exact(E), Max(M) {}
/// hasAnyInfo - Test whether this ExitLimit contains any computed
/// information, or whether it's all SCEVCouldNotCompute values.
bool hasAnyInfo() const {
return !isa<SCEVCouldNotCompute>(Exact) ||
!isa<SCEVCouldNotCompute>(Max);
}
};
/// ExitNotTakenInfo - Information about the number of times a particular
/// loop exit may be reached before exiting the loop.
struct ExitNotTakenInfo {
AssertingVH<BasicBlock> ExitingBlock;
const SCEV *ExactNotTaken;
PointerIntPair<ExitNotTakenInfo*, 1> NextExit;
ExitNotTakenInfo() : ExitingBlock(nullptr), ExactNotTaken(nullptr) {}
/// isCompleteList - Return true if all loop exits are computable.
bool isCompleteList() const {
return NextExit.getInt() == 0;
}
void setIncomplete() { NextExit.setInt(1); }
/// getNextExit - Return a pointer to the next exit's not-taken info.
ExitNotTakenInfo *getNextExit() const {
return NextExit.getPointer();
}
void setNextExit(ExitNotTakenInfo *ENT) { NextExit.setPointer(ENT); }
};
/// BackedgeTakenInfo - Information about the backedge-taken count
/// of a loop. This currently includes an exact count and a maximum count.
///
class BackedgeTakenInfo {
/// ExitNotTaken - A list of computable exits and their not-taken counts.
/// Loops almost never have more than one computable exit.
ExitNotTakenInfo ExitNotTaken;
/// Max - An expression indicating the least maximum backedge-taken
/// count of the loop that is known, or a SCEVCouldNotCompute.
const SCEV *Max;
public:
BackedgeTakenInfo() : Max(nullptr) {}
/// Initialize BackedgeTakenInfo from a list of exact exit counts.
BackedgeTakenInfo(
SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
bool Complete, const SCEV *MaxCount);
/// hasAnyInfo - Test whether this BackedgeTakenInfo contains any
/// computed information, or whether it's all SCEVCouldNotCompute
/// values.
bool hasAnyInfo() const {
return ExitNotTaken.ExitingBlock || !isa<SCEVCouldNotCompute>(Max);
}
/// getExact - Return an expression indicating the exact backedge-taken
/// count of the loop if it is known, or SCEVCouldNotCompute
/// otherwise. This is the number of times the loop header can be
/// guaranteed to execute, minus one.
const SCEV *getExact(ScalarEvolution *SE) const;
/// getExact - Return the number of times this loop exit may fall through
/// to the back edge, or SCEVCouldNotCompute. The loop is guaranteed not
/// to exit via this block before this number of iterations, but may exit
/// via another block.
const SCEV *getExact(BasicBlock *ExitingBlock, ScalarEvolution *SE) const;
/// getMax - Get the max backedge taken count for the loop.
const SCEV *getMax(ScalarEvolution *SE) const;
/// Return true if any backedge taken count expressions refer to the given
/// subexpression.
bool hasOperand(const SCEV *S, ScalarEvolution *SE) const;
/// clear - Invalidate this result and free associated memory.
void clear();
};
/// BackedgeTakenCounts - Cache the backedge-taken count of the loops for
/// this function as they are computed.
DenseMap<const Loop*, BackedgeTakenInfo> BackedgeTakenCounts;
/// ConstantEvolutionLoopExitValue - This map contains entries for all of
/// the PHI instructions that we attempt to compute constant evolutions for.
/// This allows us to avoid potentially expensive recomputation of these
/// properties. An instruction maps to null if we are unable to compute its
/// exit value.
DenseMap<PHINode*, Constant*> ConstantEvolutionLoopExitValue;
/// ValuesAtScopes - This map contains entries for all the expressions
/// that we attempt to compute getSCEVAtScope information for, which can
/// be expensive in extreme cases.
DenseMap<const SCEV *,
SmallVector<std::pair<const Loop *, const SCEV *>, 2> > ValuesAtScopes;
/// LoopDispositions - Memoized computeLoopDisposition results.
DenseMap<const SCEV *,
SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
LoopDispositions;
/// computeLoopDisposition - Compute a LoopDisposition value.
LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
/// BlockDispositions - Memoized computeBlockDisposition results.
DenseMap<
const SCEV *,
SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
BlockDispositions;
/// computeBlockDisposition - Compute a BlockDisposition value.
BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
/// UnsignedRanges - Memoized results from getRange
DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
/// SignedRanges - Memoized results from getRange
DenseMap<const SCEV *, ConstantRange> SignedRanges;
/// RangeSignHint - Used to parameterize getRange
enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };
/// setRange - Set the memoized range for the given SCEV.
const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
const ConstantRange &CR) {
DenseMap<const SCEV *, ConstantRange> &Cache =
Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
std::pair<DenseMap<const SCEV *, ConstantRange>::iterator, bool> Pair =
Cache.insert(std::make_pair(S, CR));
if (!Pair.second)
Pair.first->second = CR;
return Pair.first->second;
}
/// getRange - Determine the range for a particular SCEV.
ConstantRange getRange(const SCEV *S, RangeSignHint Hint);
/// createSCEV - We know that there is no SCEV for the specified value.
/// Analyze the expression.
const SCEV *createSCEV(Value *V);
/// createNodeForPHI - Provide the special handling we need to analyze PHI
/// SCEVs.
const SCEV *createNodeForPHI(PHINode *PN);
/// createNodeForGEP - Provide the special handling we need to analyze GEP
/// SCEVs.
const SCEV *createNodeForGEP(GEPOperator *GEP);
/// computeSCEVAtScope - Implementation code for getSCEVAtScope; called
/// at most once for each SCEV+Loop pair.
///
const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
/// ForgetSymbolicValue - This looks up computed SCEV values for all
/// instructions that depend on the given instruction and removes them from
/// the ValueExprMap map if they reference SymName. This is used during PHI
/// resolution.
void ForgetSymbolicName(Instruction *I, const SCEV *SymName);
/// getBackedgeTakenInfo - Return the BackedgeTakenInfo for the given
/// loop, lazily computing new values if the loop hasn't been analyzed
/// yet.
const BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
/// ComputeBackedgeTakenCount - Compute the number of times the specified
/// loop will iterate.
BackedgeTakenInfo ComputeBackedgeTakenCount(const Loop *L);
/// ComputeExitLimit - Compute the number of times the backedge of the
/// specified loop will execute if it exits via the specified block.
ExitLimit ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock);
/// ComputeExitLimitFromCond - Compute the number of times the backedge of
/// the specified loop will execute if its exit condition were a conditional
/// branch of ExitCond, TBB, and FBB.
ExitLimit ComputeExitLimitFromCond(const Loop *L,
Value *ExitCond,
BasicBlock *TBB,
BasicBlock *FBB,
bool IsSubExpr);
/// ComputeExitLimitFromICmp - Compute the number of times the backedge of
/// the specified loop will execute if its exit condition were a conditional
/// branch of the ICmpInst ExitCond, TBB, and FBB.
ExitLimit ComputeExitLimitFromICmp(const Loop *L,
ICmpInst *ExitCond,
BasicBlock *TBB,
BasicBlock *FBB,
bool IsSubExpr);
/// ComputeExitLimitFromSingleExitSwitch - Compute the number of times the
/// backedge of the specified loop will execute if its exit condition were a
/// switch with a single exiting case to ExitingBB.
ExitLimit
ComputeExitLimitFromSingleExitSwitch(const Loop *L, SwitchInst *Switch,
BasicBlock *ExitingBB, bool IsSubExpr);
/// ComputeLoadConstantCompareExitLimit - Given an exit condition
/// of 'icmp op load X, cst', try to see if we can compute the
/// backedge-taken count.
ExitLimit ComputeLoadConstantCompareExitLimit(LoadInst *LI,
Constant *RHS,
const Loop *L,
ICmpInst::Predicate p);
/// ComputeExitCountExhaustively - If the loop is known to execute a
/// constant number of times (the condition evolves only from constants),
/// try to evaluate a few iterations of the loop until we get the exit
/// condition gets a value of ExitWhen (true or false). If we cannot
/// evaluate the exit count of the loop, return CouldNotCompute.
const SCEV *ComputeExitCountExhaustively(const Loop *L,
Value *Cond,
bool ExitWhen);
/// HowFarToZero - Return the number of times an exit condition comparing
/// the specified value to zero will execute. If not computable, return
/// CouldNotCompute.
ExitLimit HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr);
/// HowFarToNonZero - Return the number of times an exit condition checking
/// the specified value for nonzero will execute. If not computable, return
/// CouldNotCompute.
ExitLimit HowFarToNonZero(const SCEV *V, const Loop *L);
/// HowManyLessThans - Return the number of times an exit condition
/// containing the specified less-than comparison will execute. If not
/// computable, return CouldNotCompute. isSigned specifies whether the
/// less-than is signed.
ExitLimit HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
const Loop *L, bool isSigned, bool IsSubExpr);
ExitLimit HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
const Loop *L, bool isSigned, bool IsSubExpr);
/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
/// (which may not be an immediate predecessor) which has exactly one
/// successor from which BB is reachable, or null if no such block is
/// found.
std::pair<BasicBlock *, BasicBlock *>
getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB);
/// isImpliedCond - Test whether the condition described by Pred, LHS, and
/// RHS is true whenever the given FoundCondValue value evaluates to true.
bool isImpliedCond(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
Value *FoundCondValue,
bool Inverse);
/// isImpliedCondOperands - Test whether the condition described by Pred,
/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
/// and FoundRHS is true.
bool isImpliedCondOperands(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
const SCEV *FoundLHS, const SCEV *FoundRHS);
/// isImpliedCondOperandsHelper - Test whether the condition described by
/// Pred, LHS, and RHS is true whenever the condition described by Pred,
/// FoundLHS, and FoundRHS is true.
bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
const SCEV *FoundLHS,
const SCEV *FoundRHS);
/// isImpliedCondOperandsViaRanges - Test whether the condition described by
/// Pred, LHS, and RHS is true whenever the condition described by Pred,
/// FoundLHS, and FoundRHS is true. Utility function used by
/// isImpliedCondOperands.
bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
const SCEV *FoundLHS,
const SCEV *FoundRHS);
/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
/// in the header of its containing loop, we know the loop executes a
/// constant number of times, and the PHI node is just a recurrence
/// involving constants, fold it.
Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs,
const Loop *L);
/// isKnownPredicateWithRanges - Test if the given expression is known to
/// satisfy the condition described by Pred and the known constant ranges
/// of LHS and RHS.
///
bool isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
/// forgetMemoizedResults - Drop memoized information computed for S.
void forgetMemoizedResults(const SCEV *S);
/// Return false iff given SCEV contains a SCEVUnknown with NULL value-
/// pointer.
bool checkValidity(const SCEV *S) const;
// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be equal
// to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}. This is equivalent to
// proving no signed (resp. unsigned) wrap in {`Start`,+,`Step`} if
// `ExtendOpTy` is `SCEVSignExtendExpr` (resp. `SCEVZeroExtendExpr`).
//
template<typename ExtendOpTy>
bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
const Loop *L);
public:
static char ID; // Pass identification, replacement for typeid
ScalarEvolution();
LLVMContext &getContext() const { return F->getContext(); }
/// isSCEVable - Test if values of the given type are analyzable within
/// the SCEV framework. This primarily includes integer types, and it
/// can optionally include pointer types if the ScalarEvolution class
/// has access to target-specific information.
bool isSCEVable(Type *Ty) const;
/// getTypeSizeInBits - Return the size in bits of the specified type,
/// for which isSCEVable must return true.
uint64_t getTypeSizeInBits(Type *Ty) const;
/// getEffectiveSCEVType - Return a type with the same bitwidth as
/// the given type and which represents how SCEV will treat the given
/// type, for which isSCEVable must return true. For pointer types,
/// this is the pointer-sized integer type.
Type *getEffectiveSCEVType(Type *Ty) const;
/// getSCEV - Return a SCEV expression for the full generality of the
/// specified expression.
const SCEV *getSCEV(Value *V);
const SCEV *getConstant(ConstantInt *V);
const SCEV *getConstant(const APInt& Val);
const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty);
const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty);
const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
SmallVector<const SCEV *, 2> Ops;
Ops.push_back(LHS);
Ops.push_back(RHS);
return getAddExpr(Ops, Flags);
}
const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
SmallVector<const SCEV *, 3> Ops;
Ops.push_back(Op0);
Ops.push_back(Op1);
Ops.push_back(Op2);
return getAddExpr(Ops, Flags);
}
const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap)
{
SmallVector<const SCEV *, 2> Ops;
Ops.push_back(LHS);
Ops.push_back(RHS);
return getMulExpr(Ops, Flags);
}
const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
SmallVector<const SCEV *, 3> Ops;
Ops.push_back(Op0);
Ops.push_back(Op1);
Ops.push_back(Op2);
return getMulExpr(Ops, Flags);
}
const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
const Loop *L, SCEV::NoWrapFlags Flags);
const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
const Loop *L, SCEV::NoWrapFlags Flags);
const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
const Loop *L, SCEV::NoWrapFlags Flags) {
SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
return getAddRecExpr(NewOp, L, Flags);
}
/// \brief Returns an expression for a GEP
///
/// \p PointeeType The type used as the basis for the pointer arithmetics
/// \p BaseExpr The expression for the pointer operand.
/// \p IndexExprs The expressions for the indices.
/// \p InBounds Whether the GEP is in bounds.
const SCEV *getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
const SmallVectorImpl<const SCEV *> &IndexExprs,
bool InBounds = false);
const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUnknown(Value *V);
const SCEV *getCouldNotCompute();
/// getSizeOfExpr - Return an expression for sizeof AllocTy that is type
/// IntTy
///
const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
/// getOffsetOfExpr - Return an expression for offsetof on the given field
/// with type IntTy
///
const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
/// getNegativeSCEV - Return the SCEV object corresponding to -V.
///
const SCEV *getNegativeSCEV(const SCEV *V);
/// getNotSCEV - Return the SCEV object corresponding to ~V.
///
const SCEV *getNotSCEV(const SCEV *V);
/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion
/// of the input value to the specified type. If the type must be
/// extended, it is zero extended.
const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty);
/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion
/// of the input value to the specified type. If the type must be
/// extended, it is sign extended.
const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty);
/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of
/// the input value to the specified type. If the type must be extended,
/// it is zero extended. The conversion must not be narrowing.
const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of
/// the input value to the specified type. If the type must be extended,
/// it is sign extended. The conversion must not be narrowing.
const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
/// the input value to the specified type. If the type must be extended,
/// it is extended with unspecified bits. The conversion must not be
/// narrowing.
const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
/// input value to the specified type. The conversion must not be
/// widening.
const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umax operation
/// with them.
const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS,
const SCEV *RHS);
/// getUMinFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umin operation
/// with them.
const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS,
const SCEV *RHS);
/// getPointerBase - Transitively follow the chain of pointer-type operands
/// until reaching a SCEV that does not have a single pointer operand. This
/// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
/// but corner cases do exist.
const SCEV *getPointerBase(const SCEV *V);
/// getSCEVAtScope - Return a SCEV expression for the specified value
/// at the specified scope in the program. The L value specifies a loop
/// nest to evaluate the expression at, where null is the top-level or a
/// specified loop is immediately inside of the loop.
///
/// This method can be used to compute the exit value for a variable defined
/// in a loop by querying what the value will hold in the parent loop.
///
/// In the case that a relevant loop exit value cannot be computed, the
/// original value V is returned.
const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
/// getSCEVAtScope - This is a convenience function which does
/// getSCEVAtScope(getSCEV(V), L).
const SCEV *getSCEVAtScope(Value *V, const Loop *L);
/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
/// by a conditional between LHS and RHS. This is used to help avoid max
/// expressions in loop trip counts, and to eliminate casts.
bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
/// protected by a conditional between LHS and RHS. This is used to
/// to eliminate casts.
bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
/// \brief Returns the maximum trip count of the loop if it is a single-exit
/// loop and we can compute a small maximum for that loop.
///
/// Implemented in terms of the \c getSmallConstantTripCount overload with
/// the single exiting block passed to it. See that routine for details.
unsigned getSmallConstantTripCount(Loop *L);
/// getSmallConstantTripCount - Returns the maximum trip count of this loop
/// as a normal unsigned value. Returns 0 if the trip count is unknown or
/// not constant. This "trip count" assumes that control exits via
/// ExitingBlock. More precisely, it is the number of times that control may
/// reach ExitingBlock before taking the branch. For loops with multiple
/// exits, it may not be the number times that the loop header executes if
/// the loop exits prematurely via another branch.
unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock);
/// \brief Returns the largest constant divisor of the trip count of the
/// loop if it is a single-exit loop and we can compute a small maximum for
/// that loop.
///
/// Implemented in terms of the \c getSmallConstantTripMultiple overload with
/// the single exiting block passed to it. See that routine for details.
unsigned getSmallConstantTripMultiple(Loop *L);
/// getSmallConstantTripMultiple - Returns the largest constant divisor of
/// the trip count of this loop as a normal unsigned value, if
/// possible. This means that the actual trip count is always a multiple of
/// the returned value (don't forget the trip count could very well be zero
/// as well!). As explained in the comments for getSmallConstantTripCount,
/// this assumes that control exits the loop via ExitingBlock.
unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock);
// getExitCount - Get the expression for the number of loop iterations for
// which this loop is guaranteed not to exit via ExitingBlock. Otherwise
// return SCEVCouldNotCompute.
const SCEV *getExitCount(Loop *L, BasicBlock *ExitingBlock);
/// getBackedgeTakenCount - If the specified loop has a predictable
/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
/// object. The backedge-taken count is the number of times the loop header
/// will be branched to from within the loop. This is one less than the
/// trip count of the loop, since it doesn't count the first iteration,
/// when the header is branched to from outside the loop.
///
/// Note that it is not valid to call this method on a loop without a
/// loop-invariant backedge-taken count (see
/// hasLoopInvariantBackedgeTakenCount).
///
const SCEV *getBackedgeTakenCount(const Loop *L);
/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
/// return the least SCEV value that is known never to be less than the
/// actual backedge taken count.
const SCEV *getMaxBackedgeTakenCount(const Loop *L);
/// hasLoopInvariantBackedgeTakenCount - Return true if the specified loop
/// has an analyzable loop-invariant backedge-taken count.
bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
/// forgetLoop - This method should be called by the client when it has
/// changed a loop in a way that may effect ScalarEvolution's ability to
/// compute a trip count, or if the loop is deleted. This call is
/// potentially expensive for large loop bodies.
void forgetLoop(const Loop *L);
/// forgetValue - This method should be called by the client when it has
/// changed a value in a way that may effect its value, or which may
/// disconnect it from a def-use chain linking it to a loop.
void forgetValue(Value *V);
/// \brief Called when the client has changed the disposition of values in
/// this loop.
///
/// We don't have a way to invalidate per-loop dispositions. Clear and
/// recompute is simpler.
void forgetLoopDispositions(const Loop *L) { LoopDispositions.clear(); }
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S
/// is guaranteed to end in (at every loop iteration). It is, at the same
/// time, the minimum number of times S is divisible by 2. For example,
/// given {4,+,8} it returns 2. If S is guaranteed to be 0, it returns the
/// bitwidth of S.
uint32_t GetMinTrailingZeros(const SCEV *S);
/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
///
ConstantRange getUnsignedRange(const SCEV *S) {
return getRange(S, HINT_RANGE_UNSIGNED);
}
/// getSignedRange - Determine the signed range for a particular SCEV.
///
ConstantRange getSignedRange(const SCEV *S) {
return getRange(S, HINT_RANGE_SIGNED);
}
/// isKnownNegative - Test if the given expression is known to be negative.
///
bool isKnownNegative(const SCEV *S);
/// isKnownPositive - Test if the given expression is known to be positive.
///
bool isKnownPositive(const SCEV *S);
/// isKnownNonNegative - Test if the given expression is known to be
/// non-negative.
///
bool isKnownNonNegative(const SCEV *S);
/// isKnownNonPositive - Test if the given expression is known to be
/// non-positive.
///
bool isKnownNonPositive(const SCEV *S);
/// isKnownNonZero - Test if the given expression is known to be
/// non-zero.
///
bool isKnownNonZero(const SCEV *S);
/// isKnownPredicate - Test if the given expression is known to satisfy
/// the condition described by Pred, LHS, and RHS.
///
bool isKnownPredicate(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
/// predicate Pred. Return true iff any changes were made. If the
/// operands are provably equal or unequal, LHS and RHS are set to
/// the same value and Pred is set to either ICMP_EQ or ICMP_NE.
///
bool SimplifyICmpOperands(ICmpInst::Predicate &Pred,
const SCEV *&LHS,
const SCEV *&RHS,
unsigned Depth = 0);
/// getLoopDisposition - Return the "disposition" of the given SCEV with
/// respect to the given loop.
LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
/// isLoopInvariant - Return true if the value of the given SCEV is
/// unchanging in the specified loop.
bool isLoopInvariant(const SCEV *S, const Loop *L);
/// hasComputableLoopEvolution - Return true if the given SCEV changes value
/// in a known way in the specified loop. This property being true implies
/// that the value is variant in the loop AND that we can emit an expression
/// to compute the value of the expression at any particular loop iteration.
bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
/// getLoopDisposition - Return the "disposition" of the given SCEV with
/// respect to the given block.
BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
/// dominates - Return true if elements that makes up the given SCEV
/// dominate the specified basic block.
bool dominates(const SCEV *S, const BasicBlock *BB);
/// properlyDominates - Return true if elements that makes up the given SCEV
/// properly dominate the specified basic block.
bool properlyDominates(const SCEV *S, const BasicBlock *BB);
/// hasOperand - Test whether the given SCEV has Op as a direct or
/// indirect operand.
bool hasOperand(const SCEV *S, const SCEV *Op) const;
/// Return the size of an element read or written by Inst.
const SCEV *getElementSize(Instruction *Inst);
/// Compute the array dimensions Sizes from the set of Terms extracted from
/// the memory access function of this SCEVAddRecExpr.
void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
const SCEV *ElementSize) const;
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
void print(raw_ostream &OS, const Module* = nullptr) const override;
void verifyAnalysis() const override;
/// Collect parametric terms occurring in step expressions.
void collectParametricTerms(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Terms);
/// Return in Subscripts the access functions for each dimension in Sizes.
void computeAccessFunctions(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes);
/// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
/// subscripts and sizes of an array access.
///
/// The delinearization is a 3 step process: the first two steps compute the
/// sizes of each subscript and the third step computes the access functions
/// for the delinearized array:
///
/// 1. Find the terms in the step functions
/// 2. Compute the array size
/// 3. Compute the access function: divide the SCEV by the array size
/// starting with the innermost dimensions found in step 2. The Quotient
/// is the SCEV to be divided in the next step of the recursion. The
/// Remainder is the subscript of the innermost dimension. Loop over all
/// array dimensions computed in step 2.
///
/// To compute a uniform array size for several memory accesses to the same
/// object, one can collect in step 1 all the step terms for all the memory
/// accesses, and compute in step 2 a unique array shape. This guarantees
/// that the array shape will be the same across all memory accesses.
///
/// FIXME: We could derive the result of steps 1 and 2 from a description of
/// the array shape given in metadata.
///
/// Example:
///
/// A[][n][m]
///
/// for i
/// for j
/// for k
/// A[j+k][2i][5i] =
///
/// The initial SCEV:
///
/// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
///
/// 1. Find the different terms in the step functions:
/// -> [2*m, 5, n*m, n*m]
///
/// 2. Compute the array size: sort and unique them
/// -> [n*m, 2*m, 5]
/// find the GCD of all the terms = 1
/// divide by the GCD and erase constant terms
/// -> [n*m, 2*m]
/// GCD = m
/// divide by GCD -> [n, 2]
/// remove constant terms
/// -> [n]
/// size of the array is A[unknown][n][m]
///
/// 3. Compute the access function
/// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
/// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
/// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
/// The remainder is the subscript of the innermost array dimension: [5i].
///
/// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
/// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
/// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
/// The Remainder is the subscript of the next array dimension: [2i].
///
/// The subscript of the outermost dimension is the Quotient: [j+k].
///
/// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
void delinearize(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes,
const SCEV *ElementSize);
private:
/// Compute the backedge taken count knowing the interval difference, the
/// stride and presence of the equality in the comparison.
const SCEV *computeBECount(const SCEV *Delta, const SCEV *Stride,
bool Equality);
/// Verify if an linear IV with positive stride can overflow when in a
/// less-than comparison, knowing the invariant term of the comparison,
/// the stride and the knowledge of NSW/NUW flags on the recurrence.
bool doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
bool IsSigned, bool NoWrap);
/// Verify if an linear IV with negative stride can overflow when in a
/// greater-than comparison, knowing the invariant term of the comparison,
/// the stride and the knowledge of NSW/NUW flags on the recurrence.
bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
bool IsSigned, bool NoWrap);
private:
FoldingSet<SCEV> UniqueSCEVs;
BumpPtrAllocator SCEVAllocator;
/// FirstUnknown - The head of a linked list of all SCEVUnknown
/// values that have been allocated. This is used by releaseMemory
/// to locate them all and call their destructors.
SCEVUnknown *FirstUnknown;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/TargetTransformInfoImpl.h | //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides helpers for the implementation of
/// a TargetTransformInfo-conforming class.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
namespace llvm {
/// \brief Base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
class TargetTransformInfoImplBase {
protected:
typedef TargetTransformInfo TTI;
const DataLayout &DL;
explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
public:
// Provide value semantics. MSVC requires that we spell all of these out.
TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
: DL(Arg.DL) {}
TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}
const DataLayout &getDataLayout() const { return DL; }
unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
switch (Opcode) {
default:
// By default, just classify everything as 'basic'.
return TTI::TCC_Basic;
case Instruction::GetElementPtr:
llvm_unreachable("Use getGEPCost for GEP operations!");
case Instruction::BitCast:
assert(OpTy && "Cast instructions must provide the operand type");
if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
// Identity and pointer-to-pointer casts are free.
return TTI::TCC_Free;
// Otherwise, the default basic cost is used.
return TTI::TCC_Basic;
case Instruction::IntToPtr: {
// An inttoptr cast is free so long as the input is a legal integer type
// which doesn't contain values outside the range of a pointer.
unsigned OpSize = OpTy->getScalarSizeInBits();
if (DL.isLegalInteger(OpSize) &&
OpSize <= DL.getPointerTypeSizeInBits(Ty))
return TTI::TCC_Free;
// Otherwise it's not a no-op.
return TTI::TCC_Basic;
}
case Instruction::PtrToInt: {
// A ptrtoint cast is free so long as the result is large enough to store
// the pointer, and a legal integer type.
unsigned DestSize = Ty->getScalarSizeInBits();
if (DL.isLegalInteger(DestSize) &&
DestSize >= DL.getPointerTypeSizeInBits(OpTy))
return TTI::TCC_Free;
// Otherwise it's not a no-op.
return TTI::TCC_Basic;
}
case Instruction::Trunc:
// trunc to a native type is free (assuming the target has compare and
// shift-right of the same width).
if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
return TTI::TCC_Free;
return TTI::TCC_Basic;
}
}
unsigned getGEPCost(const Value *Ptr, ArrayRef<const Value *> Operands) {
// In the basic model, we just assume that all-constant GEPs will be folded
// into their uses via addressing modes.
for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
if (!isa<Constant>(Operands[Idx]))
return TTI::TCC_Basic;
return TTI::TCC_Free;
}
unsigned getCallCost(FunctionType *FTy, int NumArgs) {
assert(FTy && "FunctionType must be provided to this routine.");
// The target-independent implementation just measures the size of the
// function by approximating that each argument will take on average one
// instruction to prepare.
if (NumArgs < 0)
// Set the argument number to the number of explicit arguments in the
// function.
NumArgs = FTy->getNumParams();
return TTI::TCC_Basic * (NumArgs + 1);
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) {
switch (IID) {
default:
// Intrinsics rarely (if ever) have normal argument setup constraints.
// Model them as having a basic instruction cost.
// FIXME: This is wrong for libc intrinsics.
return TTI::TCC_Basic;
case Intrinsic::annotation:
case Intrinsic::assume:
case Intrinsic::dbg_declare:
case Intrinsic::dbg_value:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::objectsize:
case Intrinsic::ptr_annotation:
case Intrinsic::var_annotation:
case Intrinsic::experimental_gc_result_int:
case Intrinsic::experimental_gc_result_float:
case Intrinsic::experimental_gc_result_ptr:
case Intrinsic::experimental_gc_result:
case Intrinsic::experimental_gc_relocate:
// These intrinsics don't actually represent code after lowering.
return TTI::TCC_Free;
}
}
bool hasBranchDivergence() { return false; }
bool isSourceOfDivergence(const Value *V) { return false; }
bool isLoweredToCall(const Function *F) {
// FIXME: These should almost certainly not be handled here, and instead
// handled with the help of TLI or the target itself. This was largely
// ported from existing analysis heuristics here so that such refactorings
// can take place in the future.
if (F->isIntrinsic())
return false;
if (F->hasLocalLinkage() || !F->hasName())
return true;
StringRef Name = F->getName();
// These will all likely lower to a single selection DAG node.
if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
Name == "fmin" || Name == "fminf" || Name == "fminl" ||
Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
return false;
// These are all likely to be optimized into something smaller.
if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
Name == "floorf" || Name == "ceil" || Name == "round" ||
Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
Name == "llabs")
return false;
return true;
}
void getUnrollingPreferences(Loop *, TTI::UnrollingPreferences &) {}
bool isLegalAddImmediate(int64_t Imm) { return false; }
bool isLegalICmpImmediate(int64_t Imm) { return false; }
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace) {
// Guess that only reg and reg+reg addressing is allowed. This heuristic is
// taken from the implementation of LSR.
return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
}
bool isLegalMaskedStore(Type *DataType, int Consecutive) { return false; }
bool isLegalMaskedLoad(Type *DataType, int Consecutive) { return false; }
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
// Guess that all legal addressing mode are free.
if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace))
return 0;
return -1;
}
bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
bool isProfitableToHoist(Instruction *I) { return true; }
bool isTypeLegal(Type *Ty) { return false; }
unsigned getJumpBufAlignment() { return 0; }
unsigned getJumpBufSize() { return 0; }
bool shouldBuildLookupTables() { return true; }
bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
return TTI::PSK_Software;
}
bool haveFastSqrt(Type *Ty) { return false; }
unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty) {
return TTI::TCC_Free;
}
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) {
return TTI::TCC_Free;
}
unsigned getNumberOfRegisters(bool Vector) { return 8; }
unsigned getRegisterBitWidth(bool Vector) { return 32; }
unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info,
TTI::OperandValueKind Opd2Info,
TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo) {
return 1;
}
unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
Type *SubTp) {
return 1;
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { return 1; }
unsigned getCFInstrCost(unsigned Opcode) { return 1; }
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
return 1;
}
unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
return 1;
}
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) {
return 1;
}
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) {
return 1;
}
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) {
return 1;
}
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) {
return 1;
}
unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
return 1;
}
unsigned getNumberOfParts(Type *Tp) { return 0; }
unsigned getAddressComputationCost(Type *Tp, bool) { return 0; }
unsigned getReductionCost(unsigned, Type *, bool) { return 1; }
unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
return false;
}
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) {
return nullptr;
}
bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const {
return (Caller->getFnAttribute("target-cpu") ==
Callee->getFnAttribute("target-cpu")) &&
(Caller->getFnAttribute("target-features") ==
Callee->getFnAttribute("target-features"));
}
};
/// \brief CRTP base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
template <typename T>
class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
private:
typedef TargetTransformInfoImplBase BaseT;
protected:
explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
public:
// Provide value semantics. MSVC requires that we spell all of these out.
TargetTransformInfoImplCRTPBase(const TargetTransformInfoImplCRTPBase &Arg)
: BaseT(static_cast<const BaseT &>(Arg)) {}
TargetTransformInfoImplCRTPBase(TargetTransformInfoImplCRTPBase &&Arg)
: BaseT(std::move(static_cast<BaseT &>(Arg))) {}
using BaseT::getCallCost;
unsigned getCallCost(const Function *F, int NumArgs) {
assert(F && "A concrete function must be provided to this routine.");
if (NumArgs < 0)
// Set the argument number to the number of explicit arguments in the
// function.
NumArgs = F->arg_size();
if (Intrinsic::ID IID = F->getIntrinsicID()) {
FunctionType *FTy = F->getFunctionType();
SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
return static_cast<T *>(this)
->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
}
if (!static_cast<T *>(this)->isLoweredToCall(F))
return TTI::TCC_Basic; // Give a basic cost if it will be lowered
// directly.
return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
}
unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) {
// Simply delegate to generic handling of the call.
// FIXME: We should use instsimplify or something else to catch calls which
// will constant fold with these arguments.
return static_cast<T *>(this)->getCallCost(F, Arguments.size());
}
using BaseT::getIntrinsicCost;
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) {
// Delegate to the generic intrinsic handling code. This mostly provides an
// opportunity for targets to (for example) special case the cost of
// certain intrinsics based on constants used as arguments.
SmallVector<Type *, 8> ParamTys;
ParamTys.reserve(Arguments.size());
for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
ParamTys.push_back(Arguments[Idx]->getType());
return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
}
unsigned getUserCost(const User *U) {
if (isa<PHINode>(U))
return TTI::TCC_Free; // Model all PHI nodes as free.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
SmallVector<const Value *, 4> Indices(GEP->idx_begin(), GEP->idx_end());
return static_cast<T *>(this)
->getGEPCost(GEP->getPointerOperand(), Indices);
}
if (auto CS = ImmutableCallSite(U)) {
const Function *F = CS.getCalledFunction();
if (!F) {
// Just use the called value type.
Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
return static_cast<T *>(this)
->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
}
SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
return static_cast<T *>(this)->getCallCost(F, Arguments);
}
if (const CastInst *CI = dyn_cast<CastInst>(U)) {
// Result of a cmp instruction is often extended (to be used by other
// cmp instructions, logical or return instructions). These are usually
// nop on most sane targets.
if (isa<CmpInst>(CI->getOperand(0)))
return TTI::TCC_Free;
}
return static_cast<T *>(this)->getOperationCost(
Operator::getOpcode(U), U->getType(),
U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/InstructionSimplify.h | //===-- InstructionSimplify.h - Fold instrs into simpler forms --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares routines for folding instructions into simpler forms
// that do not require creating new instructions. This does constant folding
// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
// ("and i32 %x, %x" -> "%x"). If the simplification is also an instruction
// then it dominates the original instruction.
//
// These routines implicitly resolve undef uses. The easiest way to be safe when
// using these routines to obtain simplified values for existing instructions is
// to always replace all uses of the instructions with the resulting simplified
// values. This will prevent other code from seeing the same undef uses and
// resolving them to different values.
//
// These routines are designed to tolerate moderately incomplete IR, such as
// instructions that are not connected to basic blocks yet. However, they do
// require that all the IR that they encounter be valid. In particular, they
// require that all non-constant values be defined in the same function, and the
// same call context of that function (and not split between caller and callee
// contexts of a directly recursive call, for example).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
#include "llvm/IR/User.h"
namespace llvm {
template<typename T>
class ArrayRef;
class AssumptionCache;
class DominatorTree;
class Instruction;
class DataLayout;
class FastMathFlags;
class TargetLibraryInfo;
class Type;
class Value;
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// Given operands for an FAdd, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// Given operands for an FSub, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// Given operands for an FMul, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyFDivInst - Given operands for an FDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyFRemInst - Given operands for an FRem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyShlInst - Given operands for a Shl, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyLShrInst - Given operands for a LShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyAShrInst - Given operands for a AShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
Instruction *CxtI = nullptr);
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
/// can fold the result. If not, this returns null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// \brief Given operands for an ExtractValueInst, see if we can fold the
/// result. If not, this returns null.
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// \brief Given operands for an ExtractElementInst, see if we can fold the
/// result. If not, this returns null.
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
//=== Helper functions for higher up the class hierarchy.
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// SimplifyFPBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const FastMathFlags &FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// \brief Given a function and iterators over arguments, see if we can fold
/// the result.
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
/// \brief Given a function and set of arguments, see if we can fold the
/// result.
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr);
// HLSL Change - Begin
Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
Type *Ty, const DataLayout &DL);
// HLSL Change - End
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *SimplifyInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
/// \brief Replace all uses of 'I' with 'SimpleV' and simplify the uses
/// recursively.
///
/// This first performs a normal RAUW of I with SimpleV. It then recursively
/// attempts to simplify those users updated by the operation. The 'I'
/// instruction must not be equal to the simplified value 'SimpleV'.
///
/// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
/// \brief Recursively attempt to simplify an instruction.
///
/// This routine uses SimplifyInstruction to simplify 'I', and if successful
/// replaces uses of 'I' with the simplified value. It then recurses on each
/// of the users impacted. It returns true if any simplifications were
/// performed.
bool recursivelySimplifyInstruction(Instruction *I,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DomPrinter.h | //===-- DomPrinter.h - Dom printer external interface ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the dominance tree printer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DOMPRINTER_H
#define LLVM_ANALYSIS_DOMPRINTER_H
namespace llvm {
class FunctionPass;
FunctionPass *createDomPrinterPass();
FunctionPass *createDomOnlyPrinterPass();
FunctionPass *createDomViewerPass();
FunctionPass *createDomOnlyViewerPass();
FunctionPass *createPostDomPrinterPass();
FunctionPass *createPostDomOnlyPrinterPass();
FunctionPass *createPostDomViewerPass();
FunctionPass *createPostDomOnlyViewerPass();
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LibCallSemantics.h | //===- LibCallSemantics.h - Describe library semantics --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines interfaces that can be used to describe language specific
// runtime library interfaces (e.g. libc, libm, etc) to LLVM optimizers.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LIBCALLSEMANTICS_H
#define LLVM_ANALYSIS_LIBCALLSEMANTICS_H
#include "llvm/Analysis/AliasAnalysis.h"
namespace llvm {
class InvokeInst;
/// LibCallLocationInfo - This struct describes a set of memory locations that
/// are accessed by libcalls. Identification of a location is doing with a
/// simple callback function.
///
/// For example, the LibCallInfo may be set up to model the behavior of
/// standard libm functions. The location that they may be interested in is
/// an abstract location that represents errno for the current target. In
/// this case, a location for errno is anything such that the predicate
/// returns true. On Mac OS X, this predicate would return true if the
/// pointer is the result of a call to "__error()".
///
/// Locations can also be defined in a constant-sensitive way. For example,
/// it is possible to define a location that returns true iff it is passed
/// into the call as a specific argument. This is useful for modeling things
/// like "printf", which can store to memory, but only through pointers passed
/// with a '%n' constraint.
///
struct LibCallLocationInfo {
// TODO: Flags: isContextSensitive etc.
/// isLocation - Return a LocResult if the specified pointer refers to this
/// location for the specified call site. This returns "Yes" if we can tell
/// that the pointer *does definitely* refer to the location, "No" if we can
/// tell that the location *definitely does not* refer to the location, and
/// returns "Unknown" if we cannot tell for certain.
enum LocResult {
Yes, No, Unknown
};
LocResult (*isLocation)(ImmutableCallSite CS, const MemoryLocation &Loc);
};
/// LibCallFunctionInfo - Each record in the array of FunctionInfo structs
/// records the behavior of one libcall that is known by the optimizer. This
/// captures things like the side effects of the call. Side effects are
/// modeled both universally (in the readnone/readonly) sense, but also
/// potentially against a set of abstract locations defined by the optimizer.
/// This allows an optimizer to define that some libcall (e.g. sqrt) is
/// side-effect free except that it might modify errno (thus, the call is
/// *not* universally readonly). Or it might say that the side effects
/// are unknown other than to say that errno is not modified.
///
struct LibCallFunctionInfo {
/// Name - This is the name of the libcall this describes.
const char *Name;
/// TODO: Constant folding function: Constant* vector -> Constant*.
/// UniversalBehavior - This captures the absolute mod/ref behavior without
/// any specific context knowledge. For example, if the function is known
/// to be readonly, this would be set to 'ref'. If known to be readnone,
/// this is set to NoModRef.
AliasAnalysis::ModRefResult UniversalBehavior;
/// LocationMRInfo - This pair captures info about whether a specific
/// location is modified or referenced by a libcall.
struct LocationMRInfo {
/// LocationID - ID # of the accessed location or ~0U for array end.
unsigned LocationID;
/// MRInfo - Mod/Ref info for this location.
AliasAnalysis::ModRefResult MRInfo;
};
/// DetailsType - Indicate the sense of the LocationDetails array. This
/// controls how the LocationDetails array is interpreted.
enum {
/// DoesOnly - If DetailsType is set to DoesOnly, then we know that the
/// *only* mod/ref behavior of this function is captured by the
/// LocationDetails array. If we are trying to say that 'sqrt' can only
/// modify errno, we'd have the {errnoloc,mod} in the LocationDetails
/// array and have DetailsType set to DoesOnly.
DoesOnly,
/// DoesNot - If DetailsType is set to DoesNot, then the sense of the
/// LocationDetails array is completely inverted. This means that we *do
/// not* know everything about the side effects of this libcall, but we do
/// know things that the libcall cannot do. This is useful for complex
/// functions like 'ctime' which have crazy mod/ref behavior, but are
/// known to never read or write errno. In this case, we'd have
/// {errnoloc,modref} in the LocationDetails array and DetailsType would
/// be set to DoesNot, indicating that ctime does not read or write the
/// errno location.
DoesNot
} DetailsType;
/// LocationDetails - This is a pointer to an array of LocationMRInfo
/// structs which indicates the behavior of the libcall w.r.t. specific
/// locations. For example, if this libcall is known to only modify
/// 'errno', it would have a LocationDetails array with the errno ID and
/// 'mod' in it. See the DetailsType field for how this is interpreted.
///
/// In the "DoesOnly" case, this information is 'may' information for: there
/// is no guarantee that the specified side effect actually does happen,
/// just that it could. In the "DoesNot" case, this is 'must not' info.
///
/// If this pointer is null, no details are known.
///
const LocationMRInfo *LocationDetails;
};
/// LibCallInfo - Abstract interface to query about library call information.
/// Instances of this class return known information about some set of
/// libcalls.
///
class LibCallInfo {
// Implementation details of this object, private.
mutable void *Impl;
mutable const LibCallLocationInfo *Locations;
mutable unsigned NumLocations;
public:
LibCallInfo() : Impl(nullptr), Locations(nullptr), NumLocations(0) {}
virtual ~LibCallInfo();
//===------------------------------------------------------------------===//
// Accessor Methods: Efficient access to contained data.
//===------------------------------------------------------------------===//
/// getLocationInfo - Return information about the specified LocationID.
const LibCallLocationInfo &getLocationInfo(unsigned LocID) const;
/// getFunctionInfo - Return the LibCallFunctionInfo object corresponding to
/// the specified function if we have it. If not, return null.
const LibCallFunctionInfo *getFunctionInfo(const Function *F) const;
//===------------------------------------------------------------------===//
// Implementation Methods: Subclasses should implement these.
//===------------------------------------------------------------------===//
/// getLocationInfo - Return descriptors for the locations referenced by
/// this set of libcalls.
virtual unsigned getLocationInfo(const LibCallLocationInfo *&Array) const {
return 0;
}
/// getFunctionInfoArray - Return an array of descriptors that describe the
/// set of libcalls represented by this LibCallInfo object. This array is
/// terminated by an entry with a NULL name.
virtual const LibCallFunctionInfo *getFunctionInfoArray() const = 0;
};
enum class EHPersonality {
Unknown,
GNU_Ada,
GNU_C,
GNU_CXX,
GNU_ObjC,
MSVC_X86SEH,
MSVC_Win64SEH,
MSVC_CXX,
};
/// \brief See if the given exception handling personality function is one
/// that we understand. If so, return a description of it; otherwise return
/// Unknown.
EHPersonality classifyEHPersonality(const Value *Pers);
/// \brief Returns true if this personality function catches asynchronous
/// exceptions.
inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
// The two SEH personality functions can catch asynch exceptions. We assume
// unknown personalities don't catch asynch exceptions.
switch (Pers) {
case EHPersonality::MSVC_X86SEH:
case EHPersonality::MSVC_Win64SEH:
return true;
default: return false;
}
llvm_unreachable("invalid enum");
}
/// \brief Returns true if this is an MSVC personality function.
inline bool isMSVCEHPersonality(EHPersonality Pers) {
// The two SEH personality functions can catch asynch exceptions. We assume
// unknown personalities don't catch asynch exceptions.
switch (Pers) {
case EHPersonality::MSVC_CXX:
case EHPersonality::MSVC_X86SEH:
case EHPersonality::MSVC_Win64SEH:
return true;
default: return false;
}
llvm_unreachable("invalid enum");
}
/// \brief Return true if this personality may be safely removed if there
/// are no invoke instructions remaining in the current function.
inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
switch (Pers) {
case EHPersonality::Unknown:
return false;
// All known personalities currently have this behavior
default: return true;
}
llvm_unreachable("invalid enum");
}
bool canSimplifyInvokeNoUnwind(const Function *F);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/ConstantFolding.h | //===-- ConstantFolding.h - Fold instructions into constants ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares routines for folding instructions into constants when all
// operands are constants, for example "sub i32 1, 0" -> "1".
//
// Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file declares some additional folding routines that can make use of
// DataLayout information. These functions cannot go in VMCore due to library
// dependency issues.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H
#define LLVM_ANALYSIS_CONSTANTFOLDING_H
#include "dxc/WinAdapter.h" // HLSL Change
namespace llvm {
class Constant;
class ConstantFP;
class ConstantExpr;
class Instruction;
class DataLayout;
class TargetLibraryInfo;
class Function;
class Type;
template<typename T>
class ArrayRef;
/// ConstantFoldInstruction - Try to constant fold the specified instruction.
/// If successful, the constant result is returned, if not, null is returned.
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *
ConstantFoldConstantExpression(const ConstantExpr *CE, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
/// specified operands. If successful, the constant result is returned, if not,
/// null is returned. Note that this function can fail when attempting to
/// fold instructions like loads and stores, which have no constant expression
/// form.
///
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
/// returns a constant expression of the specified operands.
///
Constant *
ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS,
Constant *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
/// instruction with the specified operands and indices. The constant result is
/// returned if successful; if not, null is returned.
Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
ArrayRef<unsigned> Idxs);
/// \brief Attempt to constant fold an extractvalue instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
ArrayRef<unsigned> Idxs);
/// \brief Attempt to constant fold an extractelement instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout &DL);
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE);
/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
/// indices (with an *implied* zero pointer index that is not in the list),
/// return the constant value being addressed by a virtual load, or null if
/// something is funny and we can't decide.
Constant *ConstantFoldLoadThroughGEPIndices(Constant *C,
ArrayRef<Constant*> Indices);
/// canConstantFoldCallTo - Return true if its even possible to fold a call to
/// the specified function.
bool canConstantFoldCallTo(const Function *F);
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI = nullptr);
/// HLSL Change - make these functions external so we can call them from
/// DxilConstantFolding.cpp.
Constant *ConstantFoldFP(double(__cdecl *NativeFP)(double), double V, Type *Ty);
double getValueAsDouble(ConstantFP *Op);
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/MemoryLocation.h | //===- MemoryLocation.h - Memory location descriptions ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides utility analysis objects describing memory locations.
/// These are used both by the Alias Analysis infrastructure and more
/// specialized memory analysis layers.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
#define LLVM_ANALYSIS_MEMORYLOCATION_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Metadata.h"
namespace llvm {
class LoadInst;
class StoreInst;
class MemTransferInst;
class MemIntrinsic;
class TargetLibraryInfo;
/// Representation for a specific memory location.
///
/// This abstraction can be used to represent a specific location in memory.
/// The goal of the location is to represent enough information to describe
/// abstract aliasing, modification, and reference behaviors of whatever
/// value(s) are stored in memory at the particular location.
///
/// The primary user of this interface is LLVM's Alias Analysis, but other
/// memory analyses such as MemoryDependence can use it as well.
class MemoryLocation {
public:
/// UnknownSize - This is a special value which can be used with the
/// size arguments in alias queries to indicate that the caller does not
/// know the sizes of the potential memory references.
enum : uint64_t { UnknownSize = ~UINT64_C(0) };
/// The address of the start of the location.
const Value *Ptr;
/// The maximum size of the location, in address-units, or
/// UnknownSize if the size is not known.
///
/// Note that an unknown size does not mean the pointer aliases the entire
/// virtual address space, because there are restrictions on stepping out of
/// one object and into another. See
/// http://llvm.org/docs/LangRef.html#pointeraliasing
uint64_t Size;
/// The metadata nodes which describes the aliasing of the location (each
/// member is null if that kind of information is unavailable).
AAMDNodes AATags;
/// Return a location with information about the memory reference by the given
/// instruction.
static MemoryLocation get(const LoadInst *LI);
static MemoryLocation get(const StoreInst *SI);
static MemoryLocation get(const VAArgInst *VI);
static MemoryLocation get(const AtomicCmpXchgInst *CXI);
static MemoryLocation get(const AtomicRMWInst *RMWI);
static MemoryLocation get(const Instruction *Inst) {
if (auto *I = dyn_cast<LoadInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<StoreInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<VAArgInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<AtomicCmpXchgInst>(Inst))
return get(I);
else if (auto *I = dyn_cast<AtomicRMWInst>(Inst))
return get(I);
llvm_unreachable("unsupported memory instruction");
}
/// Return a location representing the source of a memory transfer.
static MemoryLocation getForSource(const MemTransferInst *MTI);
/// Return a location representing the destination of a memory set or
/// transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI);
/// Return a location representing a particular argument of a call.
static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
const TargetLibraryInfo &TLI);
explicit MemoryLocation(const Value *Ptr = nullptr,
uint64_t Size = UnknownSize,
const AAMDNodes &AATags = AAMDNodes())
: Ptr(Ptr), Size(Size), AATags(AATags) {}
MemoryLocation getWithNewPtr(const Value *NewPtr) const {
MemoryLocation Copy(*this);
Copy.Ptr = NewPtr;
return Copy;
}
MemoryLocation getWithNewSize(uint64_t NewSize) const {
MemoryLocation Copy(*this);
Copy.Size = NewSize;
return Copy;
}
MemoryLocation getWithoutAATags() const {
MemoryLocation Copy(*this);
Copy.AATags = AAMDNodes();
return Copy;
}
bool operator==(const MemoryLocation &Other) const {
return Ptr == Other.Ptr && Size == Other.Size && AATags == Other.AATags;
}
};
// Specialize DenseMapInfo for MemoryLocation.
template <> struct DenseMapInfo<MemoryLocation> {
static inline MemoryLocation getEmptyKey() {
return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(), 0);
}
static inline MemoryLocation getTombstoneKey() {
return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(), 0);
}
static unsigned getHashValue(const MemoryLocation &Val) {
return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
DenseMapInfo<uint64_t>::getHashValue(Val.Size) ^
DenseMapInfo<AAMDNodes>::getHashValue(Val.AATags);
}
static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) {
return LHS == RHS;
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/MemoryBuiltins.h | //===- llvm/Analysis/MemoryBuiltins.h- Calls to memory builtins -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This family of functions identifies calls to builtin functions that allocate
// or free memory.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
#define LLVM_ANALYSIS_MEMORYBUILTINS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
class CallInst;
class PointerType;
class DataLayout;
class TargetLibraryInfo;
class Type;
class Value;
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc).
bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory and never returns null (such as operator new).
bool isOperatorNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
//===----------------------------------------------------------------------===//
// malloc Call Utility Functions.
//
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
const CallInst *extractMallocCall(const Value *I, const TargetLibraryInfo *TLI);
static inline CallInst *extractMallocCall(Value *I,
const TargetLibraryInfo *TLI) {
return const_cast<CallInst*>(extractMallocCall((const Value*)I, TLI));
}
/// getMallocType - Returns the PointerType resulting from the malloc call.
/// The PointerType depends on the number of bitcast uses of the malloc call:
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
PointerType *getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
/// The Type depends on the number of bitcast uses of the malloc call:
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// getMallocArraySize - Returns the array size of a malloc call. If the
/// argument passed to malloc is a multiple of the size of the malloced type,
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
Value *getMallocArraySize(CallInst *CI, const DataLayout &DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false);
//===----------------------------------------------------------------------===//
// calloc Call Utility Functions.
//
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
const CallInst *extractCallocCall(const Value *I, const TargetLibraryInfo *TLI);
static inline CallInst *extractCallocCall(Value *I,
const TargetLibraryInfo *TLI) {
return const_cast<CallInst*>(extractCallocCall((const Value*)I, TLI));
}
//===----------------------------------------------------------------------===//
// free Call Utility Functions.
//
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
const CallInst *isFreeCall(const Value *I, const TargetLibraryInfo *TLI);
static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
return const_cast<CallInst*>(isFreeCall((const Value*)I, TLI));
}
// //
///////////////////////////////////////////////////////////////////////////////
// Utility functions to compute size of objects.
//
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise. In this context, by
/// object we mean the region of memory starting at Ptr to the end of the
/// underlying object pointed to by Ptr.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, bool RoundToAlign = false);
typedef std::pair<APInt, APInt> SizeOffsetType;
/// \brief Evaluate the size and offset of an object pointed to by a Value*
/// statically. Fails if size or offset are not known at compile time.
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
const DataLayout &DL;
const TargetLibraryInfo *TLI;
bool RoundToAlign;
unsigned IntTyBits;
APInt Zero;
SmallPtrSet<Instruction *, 8> SeenInsts;
APInt align(APInt Size, uint64_t Align);
SizeOffsetType unknown() {
return std::make_pair(APInt(), APInt());
}
public:
ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
LLVMContext &Context, bool RoundToAlign = false);
SizeOffsetType compute(Value *V);
bool knownSize(SizeOffsetType &SizeOffset) {
return SizeOffset.first.getBitWidth() > 1;
}
bool knownOffset(SizeOffsetType &SizeOffset) {
return SizeOffset.second.getBitWidth() > 1;
}
bool bothKnown(SizeOffsetType &SizeOffset) {
return knownSize(SizeOffset) && knownOffset(SizeOffset);
}
// These are "private", except they can't actually be made private. Only
// compute() should be used by external users.
SizeOffsetType visitAllocaInst(AllocaInst &I);
SizeOffsetType visitArgument(Argument &A);
SizeOffsetType visitCallSite(CallSite CS);
SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
SizeOffsetType visitExtractElementInst(ExtractElementInst &I);
SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
SizeOffsetType visitGEPOperator(GEPOperator &GEP);
SizeOffsetType visitGlobalAlias(GlobalAlias &GA);
SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
SizeOffsetType visitLoadInst(LoadInst &I);
SizeOffsetType visitPHINode(PHINode&);
SizeOffsetType visitSelectInst(SelectInst &I);
SizeOffsetType visitUndefValue(UndefValue&);
SizeOffsetType visitInstruction(Instruction &I);
};
typedef std::pair<Value*, Value*> SizeOffsetEvalType;
/// \brief Evaluate the size and offset of an object pointed to by a Value*.
/// May create code to compute the result at run-time.
class ObjectSizeOffsetEvaluator
: public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
typedef IRBuilder<true, TargetFolder> BuilderTy;
typedef std::pair<WeakTrackingVH, WeakTrackingVH> WeakEvalType;
typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
typedef SmallPtrSet<const Value*, 8> PtrSetTy;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
LLVMContext &Context;
BuilderTy Builder;
IntegerType *IntTy;
Value *Zero;
CacheMapTy CacheMap;
PtrSetTy SeenVals;
bool RoundToAlign;
SizeOffsetEvalType unknown() {
return std::make_pair(nullptr, nullptr);
}
SizeOffsetEvalType compute_(Value *V);
public:
ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
LLVMContext &Context, bool RoundToAlign = false);
SizeOffsetEvalType compute(Value *V);
bool knownSize(SizeOffsetEvalType SizeOffset) {
return SizeOffset.first;
}
bool knownOffset(SizeOffsetEvalType SizeOffset) {
return SizeOffset.second;
}
bool anyKnown(SizeOffsetEvalType SizeOffset) {
return knownSize(SizeOffset) || knownOffset(SizeOffset);
}
bool bothKnown(SizeOffsetEvalType SizeOffset) {
return knownSize(SizeOffset) && knownOffset(SizeOffset);
}
// The individual instruction visitors should be treated as private.
SizeOffsetEvalType visitAllocaInst(AllocaInst &I);
SizeOffsetEvalType visitCallSite(CallSite CS);
SizeOffsetEvalType visitExtractElementInst(ExtractElementInst &I);
SizeOffsetEvalType visitExtractValueInst(ExtractValueInst &I);
SizeOffsetEvalType visitGEPOperator(GEPOperator &GEP);
SizeOffsetEvalType visitIntToPtrInst(IntToPtrInst&);
SizeOffsetEvalType visitLoadInst(LoadInst &I);
SizeOffsetEvalType visitPHINode(PHINode &PHI);
SizeOffsetEvalType visitSelectInst(SelectInst &I);
SizeOffsetEvalType visitInstruction(Instruction &I);
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DominanceFrontierImpl.h | //===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the generic implementation of the DominanceFrontier class, which
// calculate and holds the dominance frontier for a function for.
//
// This should be considered deprecated, don't add any more uses of this data
// structure.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
#define LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GenericDomTree.h"
namespace llvm {
template <class BlockT>
class DFCalculateWorkObject {
public:
typedef DomTreeNodeBase<BlockT> DomTreeNodeT;
DFCalculateWorkObject(BlockT *B, BlockT *P, const DomTreeNodeT *N,
const DomTreeNodeT *PN)
: currentBB(B), parentBB(P), Node(N), parentNode(PN) {}
BlockT *currentBB;
BlockT *parentBB;
const DomTreeNodeT *Node;
const DomTreeNodeT *parentNode;
};
template <class BlockT>
void DominanceFrontierBase<BlockT>::removeBlock(BlockT *BB) {
assert(find(BB) != end() && "Block is not in DominanceFrontier!");
for (iterator I = begin(), E = end(); I != E; ++I)
I->second.erase(BB);
Frontiers.erase(BB);
}
template <class BlockT>
void DominanceFrontierBase<BlockT>::addToFrontier(iterator I,
BlockT *Node) {
assert(I != end() && "BB is not in DominanceFrontier!");
assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
I->second.erase(Node);
}
template <class BlockT>
void DominanceFrontierBase<BlockT>::removeFromFrontier(iterator I,
BlockT *Node) {
assert(I != end() && "BB is not in DominanceFrontier!");
assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
I->second.erase(Node);
}
template <class BlockT>
bool DominanceFrontierBase<BlockT>::compareDomSet(DomSetType &DS1,
const DomSetType &DS2) const {
std::set<BlockT *> tmpSet;
for (BlockT *BB : DS2)
tmpSet.insert(BB);
for (typename DomSetType::const_iterator I = DS1.begin(), E = DS1.end();
I != E;) {
BlockT *Node = *I++;
if (tmpSet.erase(Node) == 0)
// Node is in DS1 but tnot in DS2.
return true;
}
if (!tmpSet.empty()) {
// There are nodes that are in DS2 but not in DS1.
return true;
}
// DS1 and DS2 matches.
return false;
}
template <class BlockT>
bool DominanceFrontierBase<BlockT>::compare(
DominanceFrontierBase<BlockT> &Other) const {
DomSetMapType tmpFrontiers;
for (typename DomSetMapType::const_iterator I = Other.begin(),
E = Other.end();
I != E; ++I)
tmpFrontiers.insert(std::make_pair(I->first, I->second));
for (typename DomSetMapType::iterator I = tmpFrontiers.begin(),
E = tmpFrontiers.end();
I != E;) {
BlockT *Node = I->first;
const_iterator DFI = find(Node);
if (DFI == end())
return true;
if (compareDomSet(I->second, DFI->second))
return true;
++I;
tmpFrontiers.erase(Node);
}
if (!tmpFrontiers.empty())
return true;
return false;
}
template <class BlockT>
void DominanceFrontierBase<BlockT>::print(raw_ostream &OS) const {
for (const_iterator I = begin(), E = end(); I != E; ++I) {
OS << " DomFrontier for BB ";
if (I->first)
I->first->printAsOperand(OS, false);
else
OS << " <<exit node>>";
OS << " is:\t";
const std::set<BlockT *> &BBs = I->second;
for (const BlockT *BB : BBs) {
OS << ' ';
if (BB)
BB->printAsOperand(OS, false);
else
OS << "<<exit node>>";
}
OS << '\n';
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <class BlockT>
void DominanceFrontierBase<BlockT>::dump() const {
print(dbgs());
}
#endif
template <class BlockT>
const typename ForwardDominanceFrontierBase<BlockT>::DomSetType &
ForwardDominanceFrontierBase<BlockT>::calculate(const DomTreeT &DT,
const DomTreeNodeT *Node) {
BlockT *BB = Node->getBlock();
DomSetType *Result = nullptr;
std::vector<DFCalculateWorkObject<BlockT>> workList;
SmallPtrSet<BlockT *, 32> visited;
workList.push_back(DFCalculateWorkObject<BlockT>(BB, nullptr, Node, nullptr));
do {
DFCalculateWorkObject<BlockT> *currentW = &workList.back();
assert(currentW && "Missing work object.");
BlockT *currentBB = currentW->currentBB;
BlockT *parentBB = currentW->parentBB;
const DomTreeNodeT *currentNode = currentW->Node;
const DomTreeNodeT *parentNode = currentW->parentNode;
assert(currentBB && "Invalid work object. Missing current Basic Block");
assert(currentNode && "Invalid work object. Missing current Node");
DomSetType &S = this->Frontiers[currentBB];
// Visit each block only once.
if (visited.insert(currentBB).second) {
// Loop over CFG successors to calculate DFlocal[currentNode]
for (auto SI = BlockTraits::child_begin(currentBB),
SE = BlockTraits::child_end(currentBB);
SI != SE; ++SI) {
// Does Node immediately dominate this successor?
if (DT[*SI]->getIDom() != currentNode)
S.insert(*SI);
}
}
// At this point, S is DFlocal. Now we union in DFup's of our children...
// Loop through and visit the nodes that Node immediately dominates (Node's
// children in the IDomTree)
bool visitChild = false;
for (typename DomTreeNodeT::const_iterator NI = currentNode->begin(),
NE = currentNode->end();
NI != NE; ++NI) {
DomTreeNodeT *IDominee = *NI;
BlockT *childBB = IDominee->getBlock();
if (visited.count(childBB) == 0) {
workList.push_back(DFCalculateWorkObject<BlockT>(
childBB, currentBB, IDominee, currentNode));
visitChild = true;
}
}
// If all children are visited or there is any child then pop this block
// from the workList.
if (!visitChild) {
if (!parentBB) {
Result = &S;
break;
}
typename DomSetType::const_iterator CDFI = S.begin(), CDFE = S.end();
DomSetType &parentSet = this->Frontiers[parentBB];
for (; CDFI != CDFE; ++CDFI) {
if (!DT.properlyDominates(parentNode, DT[*CDFI]))
parentSet.insert(*CDFI);
}
workList.pop_back();
}
} while (!workList.empty());
return *Result;
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LoopAccessAnalysis.h | //===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for the loop memory dependence framework that
// was originally developed for the Loop Vectorizer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
#define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
class Value;
class DataLayout;
class AliasAnalysis;
class ScalarEvolution;
class Loop;
class SCEV;
/// Optimization analysis message produced during vectorization. Messages inform
/// the user why vectorization did not occur.
class LoopAccessReport {
std::string Message;
const Instruction *Instr;
protected:
LoopAccessReport(const Twine &Message, const Instruction *I)
: Message(Message.str()), Instr(I) {}
public:
LoopAccessReport(const Instruction *I = nullptr) : Instr(I) {}
template <typename A> LoopAccessReport &operator<<(const A &Value) {
raw_string_ostream Out(Message);
Out << Value;
return *this;
}
const Instruction *getInstr() const { return Instr; }
std::string &str() { return Message; }
const std::string &str() const { return Message; }
operator Twine() { return Message; }
/// \brief Emit an analysis note for \p PassName with the debug location from
/// the instruction in \p Message if available. Otherwise use the location of
/// \p TheLoop.
static void emitAnalysis(const LoopAccessReport &Message,
const Function *TheFunction,
const Loop *TheLoop,
const char *PassName);
};
/// \brief Collection of parameters shared beetween the Loop Vectorizer and the
/// Loop Access Analysis.
struct VectorizerParams {
/// \brief Maximum SIMD width.
static const unsigned MaxVectorWidth;
/// \brief VF as overridden by the user.
static unsigned VectorizationFactor;
/// \brief Interleave factor as overridden by the user.
static unsigned VectorizationInterleave;
/// \brief True if force-vector-interleave was specified by the user.
static bool isInterleaveForced();
/// \\brief When performing memory disambiguation checks at runtime do not
/// make more than this number of comparisons.
static unsigned RuntimeMemoryCheckThreshold;
};
/// \brief Checks memory dependences among accesses to the same underlying
/// object to determine whether there vectorization is legal or not (and at
/// which vectorization factor).
///
/// Note: This class will compute a conservative dependence for access to
/// different underlying pointers. Clients, such as the loop vectorizer, will
/// sometimes deal these potential dependencies by emitting runtime checks.
///
/// We use the ScalarEvolution framework to symbolically evalutate access
/// functions pairs. Since we currently don't restructure the loop we can rely
/// on the program order of memory accesses to determine their safety.
/// At the moment we will only deem accesses as safe for:
/// * A negative constant distance assuming program order.
///
/// Safe: tmp = a[i + 1]; OR a[i + 1] = x;
/// a[i] = tmp; y = a[i];
///
/// The latter case is safe because later checks guarantuee that there can't
/// be a cycle through a phi node (that is, we check that "x" and "y" is not
/// the same variable: a header phi can only be an induction or a reduction, a
/// reduction can't have a memory sink, an induction can't have a memory
/// source). This is important and must not be violated (or we have to
/// resort to checking for cycles through memory).
///
/// * A positive constant distance assuming program order that is bigger
/// than the biggest memory access.
///
/// tmp = a[i] OR b[i] = x
/// a[i+2] = tmp y = b[i+2];
///
/// Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
///
/// * Zero distances and all accesses have the same size.
///
class MemoryDepChecker {
public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
/// \brief Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
/// \brief Dependece between memory access instructions.
struct Dependence {
/// \brief The type of the dependence.
enum DepType {
// No dependence.
NoDep,
// We couldn't determine the direction or the distance.
Unknown,
// Lexically forward.
Forward,
// Forward, but if vectorized, is likely to prevent store-to-load
// forwarding.
ForwardButPreventsForwarding,
// Lexically backward.
Backward,
// Backward, but the distance allows a vectorization factor of
// MaxSafeDepDistBytes.
BackwardVectorizable,
// Same, but may prevent store-to-load forwarding.
BackwardVectorizableButPreventsForwarding
};
/// \brief String version of the types.
static const char *DepName[];
/// \brief Index of the source of the dependence in the InstMap vector.
unsigned Source;
/// \brief Index of the destination of the dependence in the InstMap vector.
unsigned Destination;
/// \brief The type of the dependence.
DepType Type;
Dependence(unsigned Source, unsigned Destination, DepType Type)
: Source(Source), Destination(Destination), Type(Type) {}
/// \brief Dependence types that don't prevent vectorization.
static bool isSafeForVectorization(DepType Type);
/// \brief Dependence types that can be queried from the analysis.
static bool isInterestingDependence(DepType Type);
/// \brief Lexically backward dependence types.
bool isPossiblyBackward() const;
/// \brief Print the dependence. \p Instr is used to map the instruction
/// indices to instructions.
void print(raw_ostream &OS, unsigned Depth,
const SmallVectorImpl<Instruction *> &Instrs) const;
};
MemoryDepChecker(ScalarEvolution *Se, const Loop *L)
: SE(Se), InnermostLoop(L), AccessIdx(0),
ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
RecordInterestingDependences(true) {}
/// \brief Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(StoreInst *SI) {
Value *Ptr = SI->getPointerOperand();
Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
InstMap.push_back(SI);
++AccessIdx;
}
/// \brief Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(LoadInst *LI) {
Value *Ptr = LI->getPointerOperand();
Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
InstMap.push_back(LI);
++AccessIdx;
}
/// \brief Check whether the dependencies between the accesses are safe.
///
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoSet &CheckDeps,
const ValueToValueMap &Strides);
/// \brief No memory dependence was encountered that would inhibit
/// vectorization.
bool isSafeForVectorization() const { return SafeForVectorization; }
/// \brief The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
unsigned getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
/// \brief In same cases when the dependency check fails we can still
/// vectorize the loop with a dynamic array access check.
bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
/// \brief Returns the interesting dependences. If null is returned we
/// exceeded the MaxInterestingDependence threshold and this information is
/// not available.
const SmallVectorImpl<Dependence> *getInterestingDependences() const {
return RecordInterestingDependences ? &InterestingDependences : nullptr;
}
void clearInterestingDependences() { InterestingDependences.clear(); }
/// \brief The vector of memory access instructions. The indices are used as
/// instruction identifiers in the Dependence class.
const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
return InstMap;
}
/// \brief Find the set of instructions that read or write via \p Ptr.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const;
private:
ScalarEvolution *SE;
const Loop *InnermostLoop;
/// \brief Maps access locations (ptr, read/write) to program order.
DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
/// \brief Memory access instructions in program order.
SmallVector<Instruction *, 16> InstMap;
/// \brief The program order index to be used for the next instruction.
unsigned AccessIdx;
// We can access this many bytes in parallel safely.
unsigned MaxSafeDepDistBytes;
/// \brief If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
bool ShouldRetryWithRuntimeCheck;
/// \brief No memory dependence was encountered that would inhibit
/// vectorization.
bool SafeForVectorization;
//// \brief True if InterestingDependences reflects the dependences in the
//// loop. If false we exceeded MaxInterestingDependence and
//// InterestingDependences is invalid.
bool RecordInterestingDependences;
/// \brief Interesting memory dependences collected during the analysis as
/// defined by isInterestingDependence. Only valid if
/// RecordInterestingDependences is true.
SmallVector<Dependence, 8> InterestingDependences;
/// \brief Check whether there is a plausible dependence between the two
/// accesses.
///
/// Access \p A must happen before \p B in program order. The two indices
/// identify the index into the program order map.
///
/// This function checks whether there is a plausible dependence (or the
/// absence of such can't be proved) between the two accesses. If there is a
/// plausible dependence but the dependence distance is bigger than one
/// element access it records this distance in \p MaxSafeDepDistBytes (if this
/// distance is smaller than any other distance encountered so far).
/// Otherwise, this function returns true signaling a possible dependence.
Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
const MemAccessInfo &B, unsigned BIdx,
const ValueToValueMap &Strides);
/// \brief Check whether the data dependence could prevent store-load
/// forwarding.
bool couldPreventStoreLoadForward(unsigned Distance, unsigned TypeByteSize);
};
/// \brief Holds information about the memory runtime legality checks to verify
/// that a group of pointers do not overlap.
class RuntimePointerChecking {
public:
struct PointerInfo {
/// Holds the pointer value that we need to check.
TrackingVH<Value> PointerValue;
/// Holds the pointer value at the beginning of the loop.
const SCEV *Start;
/// Holds the pointer value at the end of the loop.
const SCEV *End;
/// Holds the information if this pointer is used for writing to memory.
bool IsWritePtr;
/// Holds the id of the set of pointers that could be dependent because of a
/// shared underlying object.
unsigned DependencySetId;
/// Holds the id of the disjoint alias set to which this pointer belongs.
unsigned AliasSetId;
/// SCEV for the access.
const SCEV *Expr;
PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End,
bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
const SCEV *Expr)
: PointerValue(PointerValue), Start(Start), End(End),
IsWritePtr(IsWritePtr), DependencySetId(DependencySetId),
AliasSetId(AliasSetId), Expr(Expr) {}
};
RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {}
/// Reset the state of the pointer runtime information.
void reset() {
Need = false;
Pointers.clear();
}
/// Insert a pointer and calculate the start and end SCEVs.
void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
unsigned ASId, const ValueToValueMap &Strides);
/// \brief No run-time memory checking is necessary.
bool empty() const { return Pointers.empty(); }
/// A grouping of pointers. A single memcheck is required between
/// two groups.
struct CheckingPtrGroup {
/// \brief Create a new pointer checking group containing a single
/// pointer, with index \p Index in RtCheck.
CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
: RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
Low(RtCheck.Pointers[Index].Start) {
Members.push_back(Index);
}
/// \brief Tries to add the pointer recorded in RtCheck at index
/// \p Index to this pointer checking group. We can only add a pointer
/// to a checking group if we will still be able to get
/// the upper and lower bounds of the check. Returns true in case
/// of success, false otherwise.
bool addPointer(unsigned Index);
/// Constitutes the context of this pointer checking group. For each
/// pointer that is a member of this group we will retain the index
/// at which it appears in RtCheck.
RuntimePointerChecking &RtCheck;
/// The SCEV expression which represents the upper bound of all the
/// pointers in this group.
const SCEV *High;
/// The SCEV expression which represents the lower bound of all the
/// pointers in this group.
const SCEV *Low;
/// Indices of all the pointers that constitute this grouping.
SmallVector<unsigned, 2> Members;
};
/// \brief Groups pointers such that a single memcheck is required
/// between two different groups. This will clear the CheckingGroups vector
/// and re-compute it. We will only group dependecies if \p UseDependencies
/// is true, otherwise we will create a separate group for each pointer.
void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
bool UseDependencies);
/// \brief Decide if we need to add a check between two groups of pointers,
/// according to needsChecking.
bool needsChecking(const CheckingPtrGroup &M, const CheckingPtrGroup &N,
const SmallVectorImpl<int> *PtrPartition) const;
/// \brief Return true if any pointer requires run-time checking according
/// to needsChecking.
bool needsAnyChecking(const SmallVectorImpl<int> *PtrPartition) const;
/// \brief Returns the number of run-time checks required according to
/// needsChecking.
unsigned getNumberOfChecks(const SmallVectorImpl<int> *PtrPartition) const;
/// \brief Print the list run-time memory checks necessary.
///
/// If \p PtrPartition is set, it contains the partition number for
/// pointers (-1 if the pointer belongs to multiple partitions). In this
/// case omit checks between pointers belonging to the same partition.
void print(raw_ostream &OS, unsigned Depth = 0,
const SmallVectorImpl<int> *PtrPartition = nullptr) const;
/// This flag indicates if we need to add the runtime check.
bool Need;
/// Information about the pointers that may require checking.
SmallVector<PointerInfo, 2> Pointers;
/// Holds a partitioning of pointers into "check groups".
SmallVector<CheckingPtrGroup, 2> CheckingGroups;
private:
/// \brief Decide whether we need to issue a run-time check for pointer at
/// index \p I and \p J to prove their independence.
///
/// If \p PtrPartition is set, it contains the partition number for
/// pointers (-1 if the pointer belongs to multiple partitions). In this
/// case omit checks between pointers belonging to the same partition.
bool needsChecking(unsigned I, unsigned J,
const SmallVectorImpl<int> *PtrPartition) const;
/// Holds a pointer to the ScalarEvolution analysis.
ScalarEvolution *SE;
};
/// \brief Drive the analysis of memory accesses in the loop
///
/// This class is responsible for analyzing the memory accesses of a loop. It
/// collects the accesses and then its main helper the AccessAnalysis class
/// finds and categorizes the dependences in buildDependenceSets.
///
/// For memory dependences that can be analyzed at compile time, it determines
/// whether the dependence is part of cycle inhibiting vectorization. This work
/// is delegated to the MemoryDepChecker class.
///
/// For memory dependences that cannot be determined at compile time, it
/// generates run-time checks to prove independence. This is done by
/// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
/// RuntimePointerCheck class.
class LoopAccessInfo {
public:
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout &DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT, LoopInfo *LI,
const ValueToValueMap &Strides);
/// Return true we can analyze the memory accesses in the loop and there are
/// no memory dependence cycles.
bool canVectorizeMemory() const { return CanVecMem; }
const RuntimePointerChecking *getRuntimePointerChecking() const {
return &PtrRtChecking;
}
/// \brief Number of memchecks required to prove independence of otherwise
/// may-alias pointers.
unsigned getNumRuntimePointerChecks(
const SmallVectorImpl<int> *PtrPartition = nullptr) const {
return PtrRtChecking.getNumberOfChecks(PtrPartition);
}
/// Return true if the block BB needs to be predicated in order for the loop
/// to be vectorized.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
DominatorTree *DT);
/// Returns true if the value V is uniform within the loop.
bool isUniform(Value *V) const;
unsigned getMaxSafeDepDistBytes() const { return MaxSafeDepDistBytes; }
unsigned getNumStores() const { return NumStores; }
unsigned getNumLoads() const { return NumLoads;}
/// \brief Add code that checks at runtime if the accessed arrays overlap.
///
/// Returns a pair of instructions where the first element is the first
/// instruction generated in possibly a sequence of instructions and the
/// second value is the final comparator value or NULL if no check is needed.
///
/// If \p PtrPartition is set, it contains the partition number for pointers
/// (-1 if the pointer belongs to multiple partitions). In this case omit
/// checks between pointers belonging to the same partition.
std::pair<Instruction *, Instruction *>
addRuntimeCheck(Instruction *Loc,
const SmallVectorImpl<int> *PtrPartition = nullptr) const;
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
const Optional<LoopAccessReport> &getReport() const { return Report; }
/// \brief the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
const MemoryDepChecker &getDepChecker() const { return DepChecker; }
/// \brief Return the list of instructions that use \p Ptr to read or write
/// memory.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const {
return DepChecker.getInstructionsForAccess(Ptr, isWrite);
}
/// \brief Print the information about the memory accesses in the loop.
void print(raw_ostream &OS, unsigned Depth = 0) const;
/// \brief Used to ensure that if the analysis was run with speculating the
/// value of symbolic strides, the client queries it with the same assumption.
/// Only used in DEBUG build but we don't want NDEBUG-dependent ABI.
unsigned NumSymbolicStrides;
/// \brief Checks existence of store to invariant address inside loop.
/// If the loop has any store to invariant address, then it returns true,
/// else returns false.
bool hasStoreToLoopInvariantAddress() const {
return StoreToLoopInvariantAddress;
}
private:
/// \brief Analyze the loop. Substitute symbolic strides using Strides.
void analyzeLoop(const ValueToValueMap &Strides);
/// \brief Check if the structure of the loop allows it to be analyzed by this
/// pass.
bool canAnalyzeLoop();
void emitAnalysis(LoopAccessReport &Message);
/// We need to check that all of the pointers in this list are disjoint
/// at runtime.
RuntimePointerChecking PtrRtChecking;
/// \brief the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
MemoryDepChecker DepChecker;
Loop *TheLoop;
ScalarEvolution *SE;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
AliasAnalysis *AA;
DominatorTree *DT;
LoopInfo *LI;
unsigned NumLoads;
unsigned NumStores;
unsigned MaxSafeDepDistBytes;
/// \brief Cache the result of analyzeLoop.
bool CanVecMem;
/// \brief Indicator for storing to uniform addresses.
/// If a loop has write to a loop invariant address then it should be true.
bool StoreToLoopInvariantAddress;
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
Optional<LoopAccessReport> Report;
};
Value *stripIntegerCast(Value *V);
///\brief Return the SCEV corresponding to a pointer with the symbolic stride
///replaced with constant one.
///
/// If \p OrigPtr is not null, use it to look up the stride value instead of \p
/// Ptr. \p PtrToStride provides the mapping between the pointer value and its
/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
const SCEV *replaceSymbolicStrideSCEV(ScalarEvolution *SE,
const ValueToValueMap &PtrToStride,
Value *Ptr, Value *OrigPtr = nullptr);
/// \brief Check the stride of the pointer and ensure that it does not wrap in
/// the address space.
int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap);
/// \brief This analysis provides dependence information for the memory accesses
/// of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by
/// querying the loop access info via LAA::getInfo. getInfo return a
/// LoopAccessInfo object. See this class for the specifics of what information
/// is provided.
class LoopAccessAnalysis : public FunctionPass {
public:
static char ID;
LoopAccessAnalysis() : FunctionPass(ID) {
initializeLoopAccessAnalysisPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// \brief Query the result of the loop access information for the loop \p L.
///
/// If the client speculates (and then issues run-time checks) for the values
/// of symbolic strides, \p Strides provides the mapping (see
/// replaceSymbolicStrideSCEV). If there is no cached result available run
/// the analysis.
const LoopAccessInfo &getInfo(Loop *L, const ValueToValueMap &Strides);
void releaseMemory() override {
// Invalidate the cache when the pass is freed.
LoopAccessInfoMap.clear();
}
/// \brief Print the result of the analysis when invoked with -analyze.
void print(raw_ostream &OS, const Module *M = nullptr) const override;
private:
/// \brief The cache.
DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
// The used analysis passes.
ScalarEvolution *SE;
const TargetLibraryInfo *TLI;
AliasAnalysis *AA;
DominatorTree *DT;
LoopInfo *LI;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/AliasAnalysis.h | //===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the generic AliasAnalysis interface, which is used as the
// common interface used by all clients of alias analysis information, and
// implemented by all alias analysis implementations. Mod/Ref information is
// also captured by this interface.
//
// Implementations of this interface must implement the various virtual methods,
// which automatically provides functionality for the entire suite of client
// APIs.
//
// This API identifies memory regions with the MemoryLocation class. The pointer
// component specifies the base memory address of the region. The Size specifies
// the maximum size (in address units) of the memory region, or
// MemoryLocation::UnknownSize if the size is not known. The TBAA tag
// identifies the "type" of the memory reference; see the
// TypeBasedAliasAnalysis class for details.
//
// Some non-obvious details include:
// - Pointers that point to two completely different objects in memory never
// alias, regardless of the value of the Size component.
// - NoAlias doesn't imply inequal pointers. The most obvious example of this
// is two pointers to constant memory. Even if they are equal, constant
// memory is never stored to, so there will never be any dependencies.
// In this and other situations, the pointers may be both NoAlias and
// MustAlias at the same time. The current API can only return one result,
// though this is rarely a problem in practice.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
#define LLVM_ANALYSIS_ALIASANALYSIS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Analysis/MemoryLocation.h"
namespace llvm {
class LoadInst;
class StoreInst;
class VAArgInst;
class DataLayout;
class TargetLibraryInfo;
class Pass;
class AnalysisUsage;
class MemTransferInst;
class MemIntrinsic;
class DominatorTree;
/// The possible results of an alias query.
///
/// These results are always computed between two MemoryLocation objects as
/// a query to some alias analysis.
///
/// Note that these are unscoped enumerations because we would like to support
/// implicitly testing a result for the existence of any possible aliasing with
/// a conversion to bool, but an "enum class" doesn't support this. The
/// canonical names from the literature are suffixed and unique anyways, and so
/// they serve as global constants in LLVM for these results.
///
/// See docs/AliasAnalysis.html for more information on the specific meanings
/// of these values.
enum AliasResult {
/// The two locations do not alias at all.
///
/// This value is arranged to convert to false, while all other values
/// convert to true. This allows a boolean context to convert the result to
/// a binary flag indicating whether there is the possibility of aliasing.
NoAlias = 0,
/// The two locations may or may not alias. This is the least precise result.
MayAlias,
/// The two locations alias, but only due to a partial overlap.
PartialAlias,
/// The two locations precisely alias each other.
MustAlias,
};
class AliasAnalysis {
protected:
const DataLayout *DL;
const TargetLibraryInfo *TLI;
private:
AliasAnalysis *AA; // Previous Alias Analysis to chain to.
protected:
/// InitializeAliasAnalysis - Subclasses must call this method to initialize
/// the AliasAnalysis interface before any other methods are called. This is
/// typically called by the run* methods of these subclasses. This may be
/// called multiple times.
///
void InitializeAliasAnalysis(Pass *P, const DataLayout *DL);
/// getAnalysisUsage - All alias analysis implementations should invoke this
/// directly (using AliasAnalysis::getAnalysisUsage(AU)).
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
public:
static char ID; // Class identification, replacement for typeinfo
AliasAnalysis() : DL(nullptr), TLI(nullptr), AA(nullptr) {}
virtual ~AliasAnalysis(); // We want to be subclassed
/// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
/// object, or null if no TargetLibraryInfo object is available.
///
const TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
/// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise.
///
uint64_t getTypeStoreSize(Type *Ty);
//===--------------------------------------------------------------------===//
/// Alias Queries...
///
/// alias - The main low level interface to the alias analysis implementation.
/// Returns an AliasResult indicating whether the two pointers are aliased to
/// each other. This is the interface that must be implemented by specific
/// alias analysis implementations.
virtual AliasResult alias(const MemoryLocation &LocA,
const MemoryLocation &LocB);
/// alias - A convenience wrapper.
AliasResult alias(const Value *V1, uint64_t V1Size,
const Value *V2, uint64_t V2Size) {
return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
/// alias - A convenience wrapper.
AliasResult alias(const Value *V1, const Value *V2) {
return alias(V1, MemoryLocation::UnknownSize, V2,
MemoryLocation::UnknownSize);
}
/// isNoAlias - A trivial helper function to check to see if the specified
/// pointers are no-alias.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
return alias(LocA, LocB) == NoAlias;
}
/// isNoAlias - A convenience wrapper.
bool isNoAlias(const Value *V1, uint64_t V1Size,
const Value *V2, uint64_t V2Size) {
return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
/// isNoAlias - A convenience wrapper.
bool isNoAlias(const Value *V1, const Value *V2) {
return isNoAlias(MemoryLocation(V1), MemoryLocation(V2));
}
/// isMustAlias - A convenience wrapper.
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
return alias(LocA, LocB) == MustAlias;
}
/// isMustAlias - A convenience wrapper.
bool isMustAlias(const Value *V1, const Value *V2) {
return alias(V1, 1, V2, 1) == MustAlias;
}
/// pointsToConstantMemory - If the specified memory location is
/// known to be constant, return true. If OrLocal is true and the
/// specified memory location is known to be "local" (derived from
/// an alloca), return true. Otherwise return false.
virtual bool pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal = false);
/// pointsToConstantMemory - A convenient wrapper.
bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
return pointsToConstantMemory(MemoryLocation(P), OrLocal);
}
//===--------------------------------------------------------------------===//
/// Simple mod/ref information...
///
/// ModRefResult - Represent the result of a mod/ref query. Mod and Ref are
/// bits which may be or'd together.
///
enum ModRefResult { NoModRef = 0, Ref = 1, Mod = 2, ModRef = 3 };
/// These values define additional bits used to define the
/// ModRefBehavior values.
enum { Nowhere = 0, ArgumentPointees = 4, Anywhere = 8 | ArgumentPointees };
/// ModRefBehavior - Summary of how a function affects memory in the program.
/// Loads from constant globals are not considered memory accesses for this
/// interface. Also, functions may freely modify stack space local to their
/// invocation without having to report it through these interfaces.
enum ModRefBehavior {
/// DoesNotAccessMemory - This function does not perform any non-local loads
/// or stores to memory.
///
/// This property corresponds to the GCC 'const' attribute.
/// This property corresponds to the LLVM IR 'readnone' attribute.
/// This property corresponds to the IntrNoMem LLVM intrinsic flag.
DoesNotAccessMemory = Nowhere | NoModRef,
/// OnlyReadsArgumentPointees - The only memory references in this function
/// (if it has any) are non-volatile loads from objects pointed to by its
/// pointer-typed arguments, with arbitrary offsets.
///
/// This property corresponds to the LLVM IR 'argmemonly' attribute combined
/// with 'readonly' attribute.
/// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
OnlyReadsArgumentPointees = ArgumentPointees | Ref,
/// OnlyAccessesArgumentPointees - The only memory references in this
/// function (if it has any) are non-volatile loads and stores from objects
/// pointed to by its pointer-typed arguments, with arbitrary offsets.
///
/// This property corresponds to the LLVM IR 'argmemonly' attribute.
/// This property corresponds to the IntrReadWriteArgMem LLVM intrinsic flag.
OnlyAccessesArgumentPointees = ArgumentPointees | ModRef,
/// OnlyReadsMemory - This function does not perform any non-local stores or
/// volatile loads, but may read from any memory location.
///
/// This property corresponds to the GCC 'pure' attribute.
/// This property corresponds to the LLVM IR 'readonly' attribute.
/// This property corresponds to the IntrReadMem LLVM intrinsic flag.
OnlyReadsMemory = Anywhere | Ref,
/// UnknownModRefBehavior - This indicates that the function could not be
/// classified into one of the behaviors above.
UnknownModRefBehavior = Anywhere | ModRef
};
/// Get the ModRef info associated with a pointer argument of a callsite. The
/// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
/// that these bits do not necessarily account for the overall behavior of
/// the function, but rather only provide additional per-argument
/// information.
virtual ModRefResult getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
/// getModRefBehavior - Return the behavior when calling the given call site.
virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
/// getModRefBehavior - Return the behavior when calling the given function.
/// For use when the call site is not known.
virtual ModRefBehavior getModRefBehavior(const Function *F);
/// doesNotAccessMemory - If the specified call is known to never read or
/// write memory, return true. If the call only reads from known-constant
/// memory, it is also legal to return true. Calls that unwind the stack
/// are legal for this predicate.
///
/// Many optimizations (such as CSE and LICM) can be performed on such calls
/// without worrying about aliasing properties, and many calls have this
/// property (e.g. calls to 'sin' and 'cos').
///
/// This property corresponds to the GCC 'const' attribute.
///
bool doesNotAccessMemory(ImmutableCallSite CS) {
return getModRefBehavior(CS) == DoesNotAccessMemory;
}
/// doesNotAccessMemory - If the specified function is known to never read or
/// write memory, return true. For use when the call site is not known.
///
bool doesNotAccessMemory(const Function *F) {
return getModRefBehavior(F) == DoesNotAccessMemory;
}
/// onlyReadsMemory - If the specified call is known to only read from
/// non-volatile memory (or not access memory at all), return true. Calls
/// that unwind the stack are legal for this predicate.
///
/// This property allows many common optimizations to be performed in the
/// absence of interfering store instructions, such as CSE of strlen calls.
///
/// This property corresponds to the GCC 'pure' attribute.
///
bool onlyReadsMemory(ImmutableCallSite CS) {
return onlyReadsMemory(getModRefBehavior(CS));
}
/// onlyReadsMemory - If the specified function is known to only read from
/// non-volatile memory (or not access memory at all), return true. For use
/// when the call site is not known.
///
bool onlyReadsMemory(const Function *F) {
return onlyReadsMemory(getModRefBehavior(F));
}
/// onlyReadsMemory - Return true if functions with the specified behavior are
/// known to only read from non-volatile memory (or not access memory at all).
///
static bool onlyReadsMemory(ModRefBehavior MRB) {
return !(MRB & Mod);
}
/// onlyAccessesArgPointees - Return true if functions with the specified
/// behavior are known to read and write at most from objects pointed to by
/// their pointer-typed arguments (with arbitrary offsets).
///
static bool onlyAccessesArgPointees(ModRefBehavior MRB) {
return !(MRB & Anywhere & ~ArgumentPointees);
}
/// doesAccessArgPointees - Return true if functions with the specified
/// behavior are known to potentially read or write from objects pointed
/// to be their pointer-typed arguments (with arbitrary offsets).
///
static bool doesAccessArgPointees(ModRefBehavior MRB) {
return (MRB & ModRef) && (MRB & ArgumentPointees);
}
/// getModRefInfo - Return information about whether or not an
/// instruction may read or write memory (without regard to a
/// specific location)
ModRefResult getModRefInfo(const Instruction *I) {
if (auto CS = ImmutableCallSite(I)) {
auto MRB = getModRefBehavior(CS);
if (MRB & ModRef)
return ModRef;
else if (MRB & Ref)
return Ref;
else if (MRB & Mod)
return Mod;
return NoModRef;
}
return getModRefInfo(I, MemoryLocation());
}
/// getModRefInfo - Return information about whether or not an instruction may
/// read or write the specified memory location. An instruction
/// that doesn't read or write memory may be trivially LICM'd for example.
ModRefResult getModRefInfo(const Instruction *I, const MemoryLocation &Loc) {
switch (I->getOpcode()) {
case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, Loc);
case Instruction::Load: return getModRefInfo((const LoadInst*)I, Loc);
case Instruction::Store: return getModRefInfo((const StoreInst*)I, Loc);
case Instruction::Fence: return getModRefInfo((const FenceInst*)I, Loc);
case Instruction::AtomicCmpXchg:
return getModRefInfo((const AtomicCmpXchgInst*)I, Loc);
case Instruction::AtomicRMW:
return getModRefInfo((const AtomicRMWInst*)I, Loc);
case Instruction::Call: return getModRefInfo((const CallInst*)I, Loc);
case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,Loc);
default: return NoModRef;
}
}
/// getModRefInfo - A convenience wrapper.
ModRefResult getModRefInfo(const Instruction *I,
const Value *P, uint64_t Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// getModRefInfo (for call sites) - Return information about whether
/// a particular call site modifies or reads the specified memory location.
virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc);
/// getModRefInfo (for call sites) - A convenience wrapper.
ModRefResult getModRefInfo(ImmutableCallSite CS,
const Value *P, uint64_t Size) {
return getModRefInfo(CS, MemoryLocation(P, Size));
}
/// getModRefInfo (for calls) - Return information about whether
/// a particular call modifies or reads the specified memory location.
ModRefResult getModRefInfo(const CallInst *C, const MemoryLocation &Loc) {
return getModRefInfo(ImmutableCallSite(C), Loc);
}
/// getModRefInfo (for calls) - A convenience wrapper.
ModRefResult getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) {
return getModRefInfo(C, MemoryLocation(P, Size));
}
/// getModRefInfo (for invokes) - Return information about whether
/// a particular invoke modifies or reads the specified memory location.
ModRefResult getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) {
return getModRefInfo(ImmutableCallSite(I), Loc);
}
/// getModRefInfo (for invokes) - A convenience wrapper.
ModRefResult getModRefInfo(const InvokeInst *I,
const Value *P, uint64_t Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// getModRefInfo (for loads) - Return information about whether
/// a particular load modifies or reads the specified memory location.
ModRefResult getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
/// getModRefInfo (for loads) - A convenience wrapper.
ModRefResult getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) {
return getModRefInfo(L, MemoryLocation(P, Size));
}
/// getModRefInfo (for stores) - Return information about whether
/// a particular store modifies or reads the specified memory location.
ModRefResult getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
/// getModRefInfo (for stores) - A convenience wrapper.
ModRefResult getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size){
return getModRefInfo(S, MemoryLocation(P, Size));
}
/// getModRefInfo (for fences) - Return information about whether
/// a particular store modifies or reads the specified memory location.
ModRefResult getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
// Conservatively correct. (We could possibly be a bit smarter if
// Loc is a alloca that doesn't escape.)
return ModRef;
}
/// getModRefInfo (for fences) - A convenience wrapper.
ModRefResult getModRefInfo(const FenceInst *S, const Value *P, uint64_t Size){
return getModRefInfo(S, MemoryLocation(P, Size));
}
/// getModRefInfo (for cmpxchges) - Return information about whether
/// a particular cmpxchg modifies or reads the specified memory location.
ModRefResult getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc);
/// getModRefInfo (for cmpxchges) - A convenience wrapper.
ModRefResult getModRefInfo(const AtomicCmpXchgInst *CX,
const Value *P, unsigned Size) {
return getModRefInfo(CX, MemoryLocation(P, Size));
}
/// getModRefInfo (for atomicrmws) - Return information about whether
/// a particular atomicrmw modifies or reads the specified memory location.
ModRefResult getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc);
/// getModRefInfo (for atomicrmws) - A convenience wrapper.
ModRefResult getModRefInfo(const AtomicRMWInst *RMW,
const Value *P, unsigned Size) {
return getModRefInfo(RMW, MemoryLocation(P, Size));
}
/// getModRefInfo (for va_args) - Return information about whether
/// a particular va_arg modifies or reads the specified memory location.
ModRefResult getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
/// getModRefInfo (for va_args) - A convenience wrapper.
ModRefResult getModRefInfo(const VAArgInst* I, const Value* P, uint64_t Size){
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// getModRefInfo - Return information about whether a call and an instruction
/// may refer to the same memory locations.
ModRefResult getModRefInfo(Instruction *I,
ImmutableCallSite Call);
/// getModRefInfo - Return information about whether two call sites may refer
/// to the same set of memory locations. See
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
/// for details.
virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2);
/// callCapturesBefore - Return information about whether a particular call
/// site modifies or reads the specified memory location.
ModRefResult callCapturesBefore(const Instruction *I,
const MemoryLocation &MemLoc,
DominatorTree *DT);
/// callCapturesBefore - A convenience wrapper.
ModRefResult callCapturesBefore(const Instruction *I, const Value *P,
uint64_t Size, DominatorTree *DT) {
return callCapturesBefore(I, MemoryLocation(P, Size), DT);
}
//===--------------------------------------------------------------------===//
/// Higher level methods for querying mod/ref information.
///
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the location Loc.
bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
/// canBasicBlockModify - A convenience wrapper.
bool canBasicBlockModify(const BasicBlock &BB, const Value *P, uint64_t Size){
return canBasicBlockModify(BB, MemoryLocation(P, Size));
}
/// canInstructionRangeModRef - Return true if it is possible for the
/// execution of the specified instructions to mod\ref (according to the
/// mode) the location Loc. The instructions to consider are all
/// of the instructions in the range of [I1,I2] INCLUSIVE.
/// I1 and I2 must be in the same basic block.
bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
const MemoryLocation &Loc,
const ModRefResult Mode);
/// canInstructionRangeModRef - A convenience wrapper.
bool canInstructionRangeModRef(const Instruction &I1,
const Instruction &I2, const Value *Ptr,
uint64_t Size, const ModRefResult Mode) {
return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
}
//===--------------------------------------------------------------------===//
/// Methods that clients should call when they transform the program to allow
/// alias analyses to update their internal data structures. Note that these
/// methods may be called on any instruction, regardless of whether or not
/// they have pointer-analysis implications.
///
/// deleteValue - This method should be called whenever an LLVM Value is
/// deleted from the program, for example when an instruction is found to be
/// redundant and is eliminated.
///
virtual void deleteValue(Value *V);
/// addEscapingUse - This method should be used whenever an escaping use is
/// added to a pointer value. Analysis implementations may either return
/// conservative responses for that value in the future, or may recompute
/// some or all internal state to continue providing precise responses.
///
/// Escaping uses are considered by anything _except_ the following:
/// - GEPs or bitcasts of the pointer
/// - Loads through the pointer
/// - Stores through (but not of) the pointer
virtual void addEscapingUse(Use &U);
/// replaceWithNewValue - This method is the obvious combination of the two
/// above, and it provided as a helper to simplify client code.
///
void replaceWithNewValue(Value *Old, Value *New) {
deleteValue(Old);
}
};
/// isNoAliasCall - Return true if this pointer is returned by a noalias
/// function.
bool isNoAliasCall(const Value *V);
/// isNoAliasArgument - Return true if this is an argument with the noalias
/// attribute.
bool isNoAliasArgument(const Value *V);
/// isIdentifiedObject - Return true if this pointer refers to a distinct and
/// identifiable object. This returns true for:
/// Global Variables and Functions (but not Global Aliases)
/// Allocas
/// ByVal and NoAlias Arguments
/// NoAlias returns (e.g. calls to malloc)
///
bool isIdentifiedObject(const Value *V);
/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
/// at the function-level. Different IdentifiedFunctionLocals can't alias.
/// Further, an IdentifiedFunctionLocal can not alias with any function
/// arguments other than itself, which is not necessarily true for
/// IdentifiedObjects.
bool isIdentifiedFunctionLocal(const Value *V);
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DxilValueCache.h | //===--------- DxilValueCache.h - Dxil Constant Value Cache --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DXILVALUECACHE_H
#define LLVM_ANALYSIS_DXILVALUECACHE_H
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
namespace llvm {
class Module;
class DominatorTree;
class Constant;
class ConstantInt;
class PHINode;
struct DxilValueCache : public ImmutablePass {
static char ID;
// Special Weak Value to Weak Value map.
struct WeakValueMap {
struct ValueVH : public CallbackVH {
ValueVH(Value *V) : CallbackVH(V) {}
void allUsesReplacedWith(Value *) override { setValPtr(nullptr); }
};
struct ValueEntry {
WeakTrackingVH Value;
ValueVH Self;
ValueEntry() : Value(nullptr), Self(nullptr) {}
inline void Set(llvm::Value *Key, llvm::Value *V) {
Self = Key;
Value = V;
}
inline bool IsStale() const { return Self == nullptr; }
};
ValueMap<const Value *, ValueEntry> Map;
Value *Get(Value *V);
void Set(Value *Key, Value *V);
bool Seen(Value *v);
void SetSentinel(Value *V);
void ResetUnknowns();
void ResetAll();
void dump() const;
private:
Value *GetSentinel(LLVMContext &Ctx);
std::unique_ptr<PHINode> Sentinel;
};
private:
WeakValueMap Map;
bool (*ShouldSkipCallback)(Value *V) = nullptr;
void MarkUnreachable(BasicBlock *BB);
bool IsUnreachable_(BasicBlock *BB);
bool MayBranchTo(BasicBlock *A, BasicBlock *B);
Value *TryGetCachedValue(Value *V);
Value *ProcessValue(Value *V, DominatorTree *DT);
Value *ProcessAndSimplify_PHI(Instruction *I, DominatorTree *DT);
Value *ProcessAndSimplify_Br(Instruction *I, DominatorTree *DT);
Value *ProcessAndSimplify_Switch(Instruction *I, DominatorTree *DT);
Value *ProcessAndSimplify_Load(Instruction *LI, DominatorTree *DT);
Value *SimplifyAndCacheResult(Instruction *I, DominatorTree *DT);
public:
StringRef getPassName() const override;
DxilValueCache();
void getAnalysisUsage(AnalysisUsage &) const override;
void dump() const;
Value *GetValue(Value *V, DominatorTree *DT = nullptr);
Constant *GetConstValue(Value *V, DominatorTree *DT = nullptr);
ConstantInt *GetConstInt(Value *V, DominatorTree *DT = nullptr);
void ResetUnknowns() { Map.ResetUnknowns(); }
void ResetAll() { Map.ResetAll(); }
bool IsUnreachable(BasicBlock *BB, DominatorTree *DT = nullptr);
void SetShouldSkipCallback(bool (*Callback)(Value *V)) {
ShouldSkipCallback = Callback;
};
};
void initializeDxilValueCachePass(class llvm::PassRegistry &);
Pass *createDxilValueCachePass();
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DominanceFrontier.h | //===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the DominanceFrontier class, which calculate and holds the
// dominance frontier for a function.
//
// This should be considered deprecated, don't add any more uses of this data
// structure.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIER_H
#define LLVM_ANALYSIS_DOMINANCEFRONTIER_H
#include "llvm/IR/Dominators.h"
#include <map>
#include <set>
namespace llvm {
// //
///////////////////////////////////////////////////////////////////////////////
/// DominanceFrontierBase - Common base class for computing forward and inverse
/// dominance frontiers for a function.
///
template <class BlockT>
class DominanceFrontierBase {
public:
typedef std::set<BlockT *> DomSetType; // Dom set for a bb
typedef std::map<BlockT *, DomSetType> DomSetMapType; // Dom set map
protected:
typedef GraphTraits<BlockT *> BlockTraits;
DomSetMapType Frontiers;
std::vector<BlockT *> Roots;
const bool IsPostDominators;
public:
DominanceFrontierBase(bool isPostDom) : IsPostDominators(isPostDom) {}
/// getRoots - Return the root blocks of the current CFG. This may include
/// multiple blocks if we are computing post dominators. For forward
/// dominators, this will always be a single block (the entry node).
///
inline const std::vector<BlockT *> &getRoots() const {
return Roots;
}
BlockT *getRoot() const {
assert(Roots.size() == 1 && "Should always have entry node!");
return Roots[0];
}
/// isPostDominator - Returns true if analysis based of postdoms
///
bool isPostDominator() const {
return IsPostDominators;
}
void releaseMemory() {
Frontiers.clear();
}
// Accessor interface:
typedef typename DomSetMapType::iterator iterator;
typedef typename DomSetMapType::const_iterator const_iterator;
iterator begin() { return Frontiers.begin(); }
const_iterator begin() const { return Frontiers.begin(); }
iterator end() { return Frontiers.end(); }
const_iterator end() const { return Frontiers.end(); }
iterator find(BlockT *B) { return Frontiers.find(B); }
const_iterator find(BlockT *B) const { return Frontiers.find(B); }
iterator addBasicBlock(BlockT *BB, const DomSetType &frontier) {
assert(find(BB) == end() && "Block already in DominanceFrontier!");
return Frontiers.insert(std::make_pair(BB, frontier)).first;
}
/// removeBlock - Remove basic block BB's frontier.
void removeBlock(BlockT *BB);
void addToFrontier(iterator I, BlockT *Node);
void removeFromFrontier(iterator I, BlockT *Node);
/// compareDomSet - Return false if two domsets match. Otherwise
/// return true;
bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const;
/// compare - Return true if the other dominance frontier base matches
/// this dominance frontier base. Otherwise return false.
bool compare(DominanceFrontierBase<BlockT> &Other) const;
/// print - Convert to human readable form
///
void print(raw_ostream &OS) const;
/// dump - Dump the dominance frontier to dbgs().
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void dump() const;
#endif
};
//===-------------------------------------
/// DominanceFrontier Class - Concrete subclass of DominanceFrontierBase that is
/// used to compute a forward dominator frontiers.
///
template <class BlockT>
class ForwardDominanceFrontierBase : public DominanceFrontierBase<BlockT> {
private:
typedef GraphTraits<BlockT *> BlockTraits;
public:
typedef DominatorTreeBase<BlockT> DomTreeT;
typedef DomTreeNodeBase<BlockT> DomTreeNodeT;
typedef typename DominanceFrontierBase<BlockT>::DomSetType DomSetType;
ForwardDominanceFrontierBase() : DominanceFrontierBase<BlockT>(false) {}
void analyze(DomTreeT &DT) {
this->Roots = DT.getRoots();
assert(this->Roots.size() == 1 &&
"Only one entry block for forward domfronts!");
calculate(DT, DT[this->Roots[0]]);
}
const DomSetType &calculate(const DomTreeT &DT, const DomTreeNodeT *Node);
};
class DominanceFrontier : public FunctionPass {
ForwardDominanceFrontierBase<BasicBlock> Base;
public:
typedef DominatorTreeBase<BasicBlock> DomTreeT;
typedef DomTreeNodeBase<BasicBlock> DomTreeNodeT;
typedef DominanceFrontierBase<BasicBlock>::DomSetType DomSetType;
typedef DominanceFrontierBase<BasicBlock>::iterator iterator;
typedef DominanceFrontierBase<BasicBlock>::const_iterator const_iterator;
static char ID; // Pass ID, replacement for typeid
DominanceFrontier();
ForwardDominanceFrontierBase<BasicBlock> &getBase() { return Base; }
inline const std::vector<BasicBlock *> &getRoots() const {
return Base.getRoots();
}
BasicBlock *getRoot() const { return Base.getRoot(); }
bool isPostDominator() const { return Base.isPostDominator(); }
iterator begin() { return Base.begin(); }
const_iterator begin() const { return Base.begin(); }
iterator end() { return Base.end(); }
const_iterator end() const { return Base.end(); }
iterator find(BasicBlock *B) { return Base.find(B); }
const_iterator find(BasicBlock *B) const { return Base.find(B); }
iterator addBasicBlock(BasicBlock *BB, const DomSetType &frontier) {
return Base.addBasicBlock(BB, frontier);
}
void removeBlock(BasicBlock *BB) { return Base.removeBlock(BB); }
void addToFrontier(iterator I, BasicBlock *Node) {
return Base.addToFrontier(I, Node);
}
void removeFromFrontier(iterator I, BasicBlock *Node) {
return Base.removeFromFrontier(I, Node);
}
bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
return Base.compareDomSet(DS1, DS2);
}
bool compare(DominanceFrontierBase<BasicBlock> &Other) const {
return Base.compare(Other);
}
void releaseMemory() override;
bool runOnFunction(Function &) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
void print(raw_ostream &OS, const Module * = nullptr) const override;
void dump() const;
};
extern template class DominanceFrontierBase<BasicBlock>;
extern template class ForwardDominanceFrontierBase<BasicBlock>;
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/ReducibilityAnalysis.h | ///////////////////////////////////////////////////////////////////////////////
// //
// ReducibilityAnalysis.h //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// Implements reducibility analysis pass. //
// //
///////////////////////////////////////////////////////////////////////////////
#pragma once
namespace llvm {
class Module;
class Function;
class PassRegistry;
class FunctionPass;
enum class IrreducibilityAction {
ThrowException,
PrintLog,
Ignore,
};
extern char &ReducibilityAnalysisID;
llvm::FunctionPass *createReducibilityAnalysisPass(
IrreducibilityAction Action = IrreducibilityAction::ThrowException);
void initializeReducibilityAnalysisPass(llvm::PassRegistry &);
bool IsReducible(
const llvm::Module &M,
IrreducibilityAction Action = IrreducibilityAction::ThrowException);
bool IsReducible(
const llvm::Function &F,
IrreducibilityAction Action = IrreducibilityAction::ThrowException);
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/TargetTransformInfo.h | //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This pass exposes codegen information to IR-level passes. Every
/// transformation that uses codegen information is broken into three parts:
/// 1. The IR-level analysis pass.
/// 2. The IR-level transformation interface which provides the needed
/// information.
/// 3. Codegen-level implementation which uses target-specific hooks.
///
/// This file defines #2, which is the interface that IR-level transformations
/// use for querying the codegen.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
#include "llvm/ADT/Optional.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Pass.h"
#include "llvm/Support/DataTypes.h"
#include <functional>
namespace llvm {
class Function;
class GlobalValue;
class Loop;
class PreservedAnalyses;
class Type;
class User;
class Value;
/// \brief Information about a load/store intrinsic defined by the target.
struct MemIntrinsicInfo {
MemIntrinsicInfo()
: ReadMem(false), WriteMem(false), Vol(false), MatchingId(0),
NumMemRefs(0), PtrVal(nullptr) {}
bool ReadMem;
bool WriteMem;
bool Vol;
// Same Id is set by the target for corresponding load/store intrinsics.
unsigned short MatchingId;
int NumMemRefs;
Value *PtrVal;
};
/// \brief This pass provides access to the codegen interfaces that are needed
/// for IR-level transformations.
class TargetTransformInfo {
public:
/// \brief Construct a TTI object using a type implementing the \c Concept
/// API below.
///
/// This is used by targets to construct a TTI wrapping their target-specific
/// implementaion that encodes appropriate costs for their target.
template <typename T> TargetTransformInfo(T Impl);
/// \brief Construct a baseline TTI object using a minimal implementation of
/// the \c Concept API below.
///
/// The TTI implementation will reflect the information in the DataLayout
/// provided if non-null.
explicit TargetTransformInfo(const DataLayout &DL);
// Provide move semantics.
TargetTransformInfo(TargetTransformInfo &&Arg);
TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
// We need to define the destructor out-of-line to define our sub-classes
// out-of-line.
~TargetTransformInfo();
/// \brief Handle the invalidation of this information.
///
/// When used as a result of \c TargetIRAnalysis this method will be called
/// when the function this was computed for changes. When it returns false,
/// the information is preserved across those changes.
bool invalidate(Function &, const PreservedAnalyses &) {
// FIXME: We should probably in some way ensure that the subtarget
// information for a function hasn't changed.
return false;
}
/// \name Generic Target Information
/// @{
/// \brief Underlying constants for 'cost' values in this interface.
///
/// Many APIs in this interface return a cost. This enum defines the
/// fundamental values that should be used to interpret (and produce) those
/// costs. The costs are returned as an unsigned rather than a member of this
/// enumeration because it is expected that the cost of one IR instruction
/// may have a multiplicative factor to it or otherwise won't fit directly
/// into the enum. Moreover, it is common to sum or average costs which works
/// better as simple integral values. Thus this enum only provides constants.
///
/// Note that these costs should usually reflect the intersection of code-size
/// cost and execution cost. A free instruction is typically one that folds
/// into another instruction. For example, reg-to-reg moves can often be
/// skipped by renaming the registers in the CPU, but they still are encoded
/// and thus wouldn't be considered 'free' here.
enum TargetCostConstants {
TCC_Free = 0, ///< Expected to fold away in lowering.
TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
};
/// \brief Estimate the cost of a specific operation when lowered.
///
/// Note that this is designed to work on an arbitrary synthetic opcode, and
/// thus work for hypothetical queries before an instruction has even been
/// formed. However, this does *not* work for GEPs, and must not be called
/// for a GEP instruction. Instead, use the dedicated getGEPCost interface as
/// analyzing a GEP's cost required more information.
///
/// Typically only the result type is required, and the operand type can be
/// omitted. However, if the opcode is one of the cast instructions, the
/// operand type is required.
///
/// The returned cost is defined in terms of \c TargetCostConstants, see its
/// comments for a detailed explanation of the cost values.
unsigned getOperationCost(unsigned Opcode, Type *Ty,
Type *OpTy = nullptr) const;
/// \brief Estimate the cost of a GEP operation when lowered.
///
/// The contract for this function is the same as \c getOperationCost except
/// that it supports an interface that provides extra information specific to
/// the GEP operation.
unsigned getGEPCost(const Value *Ptr, ArrayRef<const Value *> Operands) const;
/// \brief Estimate the cost of a function call when lowered.
///
/// The contract for this is the same as \c getOperationCost except that it
/// supports an interface that provides extra information specific to call
/// instructions.
///
/// This is the most basic query for estimating call cost: it only knows the
/// function type and (potentially) the number of arguments at the call site.
/// The latter is only interesting for varargs function types.
unsigned getCallCost(FunctionType *FTy, int NumArgs = -1) const;
/// \brief Estimate the cost of calling a specific function when lowered.
///
/// This overload adds the ability to reason about the particular function
/// being called in the event it is a library call with special lowering.
unsigned getCallCost(const Function *F, int NumArgs = -1) const;
/// \brief Estimate the cost of calling a specific function when lowered.
///
/// This overload allows specifying a set of candidate argument values.
unsigned getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) const;
/// \brief Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) const;
/// \brief Estimate the cost of an intrinsic when lowered.
///
/// Mirrors the \c getCallCost method but uses an intrinsic identifier.
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) const;
/// \brief Estimate the cost of a given IR user when lowered.
///
/// This can estimate the cost of either a ConstantExpr or Instruction when
/// lowered. It has two primary advantages over the \c getOperationCost and
/// \c getGEPCost above, and one significant disadvantage: it can only be
/// used when the IR construct has already been formed.
///
/// The advantages are that it can inspect the SSA use graph to reason more
/// accurately about the cost. For example, all-constant-GEPs can often be
/// folded into a load or other instruction, but if they are used in some
/// other context they may not be folded. This routine can distinguish such
/// cases.
///
/// The returned cost is defined in terms of \c TargetCostConstants, see its
/// comments for a detailed explanation of the cost values.
unsigned getUserCost(const User *U) const;
/// \brief Return true if branch divergence exists.
///
/// Branch divergence has a significantly negative impact on GPU performance
/// when threads in the same wavefront take different paths due to conditional
/// branches.
bool hasBranchDivergence() const;
/// \brief Returns whether V is a source of divergence.
///
/// This function provides the target-dependent information for
/// the target-independent DivergenceAnalysis. DivergenceAnalysis first
/// builds the dependency graph, and then runs the reachability algorithm
/// starting with the sources of divergence.
bool isSourceOfDivergence(const Value *V) const;
/// \brief Test whether calls to a function lower to actual program function
/// calls.
///
/// The idea is to test whether the program is likely to require a 'call'
/// instruction or equivalent in order to call the given function.
///
/// FIXME: It's not clear that this is a good or useful query API. Client's
/// should probably move to simpler cost metrics using the above.
/// Alternatively, we could split the cost interface into distinct code-size
/// and execution-speed costs. This would allow modelling the core of this
/// query more accurately as a call is a single small instruction, but
/// incurs significant execution cost.
bool isLoweredToCall(const Function *F) const;
/// Parameters that control the generic loop unrolling transformation.
struct UnrollingPreferences {
/// The cost threshold for the unrolled loop. Should be relative to the
/// getUserCost values returned by this API, and the expectation is that
/// the unrolled loop's instructions when run through that interface should
/// not exceed this cost. However, this is only an estimate. Also, specific
/// loops may be unrolled even with a cost above this threshold if deemed
/// profitable. Set this to UINT_MAX to disable the loop body cost
/// restriction.
unsigned Threshold;
/// If complete unrolling will reduce the cost of the loop below its
/// expected dynamic cost while rolled by this percentage, apply a discount
/// (below) to its unrolled cost.
unsigned PercentDynamicCostSavedThreshold;
/// The discount applied to the unrolled cost when the *dynamic* cost
/// savings of unrolling exceed the \c PercentDynamicCostSavedThreshold.
unsigned DynamicCostSavingsDiscount;
/// The cost threshold for the unrolled loop when optimizing for size (set
/// to UINT_MAX to disable).
unsigned OptSizeThreshold;
/// The cost threshold for the unrolled loop, like Threshold, but used
/// for partial/runtime unrolling (set to UINT_MAX to disable).
unsigned PartialThreshold;
/// The cost threshold for the unrolled loop when optimizing for size, like
/// OptSizeThreshold, but used for partial/runtime unrolling (set to
/// UINT_MAX to disable).
unsigned PartialOptSizeThreshold;
/// A forced unrolling factor (the number of concatenated bodies of the
/// original loop in the unrolled loop body). When set to 0, the unrolling
/// transformation will select an unrolling factor based on the current cost
/// threshold and other factors.
unsigned Count;
// Set the maximum unrolling factor. The unrolling factor may be selected
// using the appropriate cost threshold, but may not exceed this number
// (set to UINT_MAX to disable). This does not apply in cases where the
// loop is being fully unrolled.
unsigned MaxCount;
/// Allow partial unrolling (unrolling of loops to expand the size of the
/// loop body, not only to eliminate small constant-trip-count loops).
bool Partial;
/// Allow runtime unrolling (unrolling of loops to expand the size of the
/// loop body even when the number of loop iterations is not known at
/// compile time).
bool Runtime;
/// Allow emitting expensive instructions (such as divisions) when computing
/// the trip count of a loop for runtime unrolling.
bool AllowExpensiveTripCount;
};
/// \brief Get target-customized preferences for the generic loop unrolling
/// transformation. The caller will initialize UP with the current
/// target-independent defaults.
void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
/// @}
/// \name Scalar Target Information
/// @{
/// \brief Flags indicating the kind of support for population count.
///
/// Compared to the SW implementation, HW support is supposed to
/// significantly boost the performance when the population is dense, and it
/// may or may not degrade performance if the population is sparse. A HW
/// support is considered as "Fast" if it can outperform, or is on a par
/// with, SW implementation when the population is sparse; otherwise, it is
/// considered as "Slow".
enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
/// \brief Return true if the specified immediate is legal add immediate, that
/// is the target has add instructions which can add a register with the
/// immediate without having to materialize the immediate into a register.
bool isLegalAddImmediate(int64_t Imm) const;
/// \brief Return true if the specified immediate is legal icmp immediate,
/// that is the target has icmp instructions which can compare a register
/// against the immediate without having to materialize the immediate into a
/// register.
bool isLegalICmpImmediate(int64_t Imm) const;
/// \brief Return true if the addressing mode represented by AM is legal for
/// this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
/// mode is legal for a load/store of any legal type.
/// TODO: Handle pre/postinc as well.
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace = 0) const;
/// \brief Return true if the target works with masked instruction
/// AVX2 allows masks for consecutive load and store for i32 and i64 elements.
/// AVX-512 architecture will also allow masks for non-consecutive memory
/// accesses.
bool isLegalMaskedStore(Type *DataType, int Consecutive) const;
bool isLegalMaskedLoad(Type *DataType, int Consecutive) const;
/// \brief Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store
/// of the specified type.
/// If the AM is supported, the return value must be >= 0.
/// If the AM is not supported, it returns a negative value.
/// TODO: Handle pre/postinc as well.
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace = 0) const;
/// \brief Return true if it's free to truncate a value of type Ty1 to type
/// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
/// by referencing its sub-register AX.
bool isTruncateFree(Type *Ty1, Type *Ty2) const;
/// \brief Return true if it is profitable to hoist instruction in the
/// then/else to before if.
bool isProfitableToHoist(Instruction *I) const;
/// \brief Return true if this type is legal.
bool isTypeLegal(Type *Ty) const;
/// \brief Returns the target's jmp_buf alignment in bytes.
unsigned getJumpBufAlignment() const;
/// \brief Returns the target's jmp_buf size in bytes.
unsigned getJumpBufSize() const;
/// \brief Return true if switches should be turned into lookup tables for the
/// target.
bool shouldBuildLookupTables() const;
/// \brief Don't restrict interleaved unrolling to small loops.
bool enableAggressiveInterleaving(bool LoopHasReductions) const;
/// \brief Return hardware support for population count.
PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
/// \brief Return true if the hardware has a fast square-root instruction.
bool haveFastSqrt(Type *Ty) const;
/// \brief Return the expected cost of supporting the floating point operation
/// of the specified type.
unsigned getFPOpCost(Type *Ty) const;
/// \brief Return the expected cost of materializing for the given integer
/// immediate of the specified type.
unsigned getIntImmCost(const APInt &Imm, Type *Ty) const;
/// \brief Return the expected cost of materialization for the given integer
/// immediate of the specified type for a given instruction. The cost can be
/// zero if the immediate can be folded into the specified instruction.
unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) const;
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) const;
/// @}
/// \name Vector Target Information
/// @{
/// \brief The various kinds of shuffle patterns for vector queries.
enum ShuffleKind {
SK_Broadcast, ///< Broadcast element 0 to all other elements.
SK_Reverse, ///< Reverse the order of the vector.
SK_Alternate, ///< Choose alternate elements from vector.
SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
SK_ExtractSubvector ///< ExtractSubvector Index indicates start offset.
};
/// \brief Additional information about an operand's possible values.
enum OperandValueKind {
OK_AnyValue, // Operand can have any value.
OK_UniformValue, // Operand is uniform (splat of a value).
OK_UniformConstantValue, // Operand is uniform constant.
OK_NonUniformConstantValue // Operand is a non uniform constant value.
};
/// \brief Additional properties of an operand's values.
enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
/// \return The number of scalar or vector registers that the target has.
/// If 'Vectors' is true, it returns the number of vector registers. If it is
/// set to false, it returns the number of scalar registers.
unsigned getNumberOfRegisters(bool Vector) const;
/// \return The width of the largest scalar or vector register type.
unsigned getRegisterBitWidth(bool Vector) const;
/// \return The maximum interleave factor that any transform should try to
/// perform for this target. This number depends on the level of parallelism
/// and the number of execution units in the CPU.
unsigned getMaxInterleaveFactor(unsigned VF) const;
/// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
unsigned
getArithmeticInstrCost(unsigned Opcode, Type *Ty,
OperandValueKind Opd1Info = OK_AnyValue,
OperandValueKind Opd2Info = OK_AnyValue,
OperandValueProperties Opd1PropInfo = OP_None,
OperandValueProperties Opd2PropInfo = OP_None) const;
/// \return The cost of a shuffle instruction of kind Kind and of type Tp.
/// The index and subtype parameters are used by the subvector insertion and
/// extraction shuffle kinds.
unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
Type *SubTp = nullptr) const;
/// \return The expected cost of cast instructions, such as bitcast, trunc,
/// zext, etc.
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const;
/// \return The expected cost of control-flow related instructions such as
/// Phi, Ret, Br.
unsigned getCFInstrCost(unsigned Opcode) const;
/// \returns The expected cost of compare and select instructions.
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy = nullptr) const;
/// \return The expected cost of vector Insert and Extract.
/// Use -1 to indicate that there is no information on the index value.
unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index = -1) const;
/// \return The cost of Load and Store instructions.
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const;
/// \return The cost of masked Load and Store instructions.
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) const;
/// \return The cost of the interleaved memory operation.
/// \p Opcode is the memory operation code
/// \p VecTy is the vector type of the interleaved access.
/// \p Factor is the interleave factor
/// \p Indices is the indices for interleaved load members (as interleaved
/// load allows gaps)
/// \p Alignment is the alignment of the memory operation
/// \p AddressSpace is address space of the pointer.
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) const;
/// \brief Calculate the cost of performing a vector reduction.
///
/// This is the cost of reducing the vector value of type \p Ty to a scalar
/// value using the operation denoted by \p Opcode. The form of the reduction
/// can either be a pairwise reduction or a reduction that splits the vector
/// at every reduction level.
///
/// Pairwise:
/// (v0, v1, v2, v3)
/// ((v0+v1), (v2, v3), undef, undef)
/// Split:
/// (v0, v1, v2, v3)
/// ((v0+v2), (v1+v3), undef, undef)
unsigned getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) const;
/// \returns The cost of Intrinsic instructions.
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) const;
/// \returns The cost of Call instructions.
unsigned getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) const;
/// \returns The number of pieces into which the provided type must be
/// split during legalization. Zero is returned when the answer is unknown.
unsigned getNumberOfParts(Type *Tp) const;
/// \returns The cost of the address computation. For most targets this can be
/// merged into the instruction indexing mode. Some targets might want to
/// distinguish between address computation for memory operations on vector
/// types and scalar types. Such targets should override this function.
/// The 'IsComplex' parameter is a hint that the address computation is likely
/// to involve multiple instructions and as such unlikely to be merged into
/// the address indexing mode.
unsigned getAddressComputationCost(Type *Ty, bool IsComplex = false) const;
/// \returns The cost, if any, of keeping values of the given types alive
/// over a callsite.
///
/// Some types may require the use of register classes that do not have
/// any callee-saved registers, so would require a spill and fill.
unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
/// \returns True if the intrinsic is a supported memory intrinsic. Info
/// will contain additional information - whether the intrinsic may write
/// or read to memory, volatility and the pointer. Info is undefined
/// if false is returned.
bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
/// \returns A value which is the result of the given memory intrinsic. New
/// instructions may be created to extract the result from the given intrinsic
/// memory operation. Returns nullptr if the target cannot create a result
/// from the given intrinsic.
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) const;
/// \returns True if the two functions have compatible attributes for inlining
/// purposes.
bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const;
/// @}
private:
/// \brief The abstract base class used to type erase specific TTI
/// implementations.
class Concept;
/// \brief The template model for the base class which wraps a concrete
/// implementation in a type erased interface.
template <typename T> class Model;
std::unique_ptr<Concept> TTIImpl;
};
class TargetTransformInfo::Concept {
public:
virtual ~Concept() = 0;
virtual const DataLayout &getDataLayout() const = 0;
virtual unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
virtual unsigned getGEPCost(const Value *Ptr,
ArrayRef<const Value *> Operands) = 0;
virtual unsigned getCallCost(FunctionType *FTy, int NumArgs) = 0;
virtual unsigned getCallCost(const Function *F, int NumArgs) = 0;
virtual unsigned getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) = 0;
virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) = 0;
virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) = 0;
virtual unsigned getUserCost(const User *U) = 0;
virtual bool hasBranchDivergence() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
virtual bool isLoweredToCall(const Function *F) = 0;
virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
virtual bool isLegalAddImmediate(int64_t Imm) = 0;
virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset, bool HasBaseReg,
int64_t Scale,
unsigned AddrSpace) = 0;
virtual bool isLegalMaskedStore(Type *DataType, int Consecutive) = 0;
virtual bool isLegalMaskedLoad(Type *DataType, int Consecutive) = 0;
virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset, bool HasBaseReg,
int64_t Scale, unsigned AddrSpace) = 0;
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
virtual bool isProfitableToHoist(Instruction *I) = 0;
virtual bool isTypeLegal(Type *Ty) = 0;
virtual unsigned getJumpBufAlignment() = 0;
virtual unsigned getJumpBufSize() = 0;
virtual bool shouldBuildLookupTables() = 0;
virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
virtual bool haveFastSqrt(Type *Ty) = 0;
virtual unsigned getFPOpCost(Type *Ty) = 0;
virtual unsigned getIntImmCost(const APInt &Imm, Type *Ty) = 0;
virtual unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) = 0;
virtual unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) = 0;
virtual unsigned getNumberOfRegisters(bool Vector) = 0;
virtual unsigned getRegisterBitWidth(bool Vector) = 0;
virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
virtual unsigned
getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info,
OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) = 0;
virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) = 0;
virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
virtual unsigned getCFInstrCost(unsigned Opcode) = 0;
virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) = 0;
virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) = 0;
virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) = 0;
virtual unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) = 0;
virtual unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) = 0;
virtual unsigned getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) = 0;
virtual unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) = 0;
virtual unsigned getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) = 0;
virtual unsigned getNumberOfParts(Type *Tp) = 0;
virtual unsigned getAddressComputationCost(Type *Ty, bool IsComplex) = 0;
virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
MemIntrinsicInfo &Info) = 0;
virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) = 0;
virtual bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const = 0;
};
template <typename T>
class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
T Impl;
public:
Model(T Impl) : Impl(std::move(Impl)) {}
~Model() override {}
const DataLayout &getDataLayout() const override {
return Impl.getDataLayout();
}
unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
return Impl.getOperationCost(Opcode, Ty, OpTy);
}
unsigned getGEPCost(const Value *Ptr,
ArrayRef<const Value *> Operands) override {
return Impl.getGEPCost(Ptr, Operands);
}
unsigned getCallCost(FunctionType *FTy, int NumArgs) override {
return Impl.getCallCost(FTy, NumArgs);
}
unsigned getCallCost(const Function *F, int NumArgs) override {
return Impl.getCallCost(F, NumArgs);
}
unsigned getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) override {
return Impl.getCallCost(F, Arguments);
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<Type *> ParamTys) override {
return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) override {
return Impl.getIntrinsicCost(IID, RetTy, Arguments);
}
unsigned getUserCost(const User *U) override { return Impl.getUserCost(U); }
bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
bool isSourceOfDivergence(const Value *V) override {
return Impl.isSourceOfDivergence(V);
}
bool isLoweredToCall(const Function *F) override {
return Impl.isLoweredToCall(F);
}
void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) override {
return Impl.getUnrollingPreferences(L, UP);
}
bool isLegalAddImmediate(int64_t Imm) override {
return Impl.isLegalAddImmediate(Imm);
}
bool isLegalICmpImmediate(int64_t Imm) override {
return Impl.isLegalICmpImmediate(Imm);
}
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace) override {
return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace);
}
bool isLegalMaskedStore(Type *DataType, int Consecutive) override {
return Impl.isLegalMaskedStore(DataType, Consecutive);
}
bool isLegalMaskedLoad(Type *DataType, int Consecutive) override {
return Impl.isLegalMaskedLoad(DataType, Consecutive);
}
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace) override {
return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace);
}
bool isTruncateFree(Type *Ty1, Type *Ty2) override {
return Impl.isTruncateFree(Ty1, Ty2);
}
bool isProfitableToHoist(Instruction *I) override {
return Impl.isProfitableToHoist(I);
}
bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
bool shouldBuildLookupTables() override {
return Impl.shouldBuildLookupTables();
}
bool enableAggressiveInterleaving(bool LoopHasReductions) override {
return Impl.enableAggressiveInterleaving(LoopHasReductions);
}
PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
return Impl.getPopcntSupport(IntTyWidthInBit);
}
bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
unsigned getFPOpCost(Type *Ty) override {
return Impl.getFPOpCost(Ty);
}
unsigned getIntImmCost(const APInt &Imm, Type *Ty) override {
return Impl.getIntImmCost(Imm, Ty);
}
unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
Type *Ty) override {
return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
}
unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) override {
return Impl.getIntImmCost(IID, Idx, Imm, Ty);
}
unsigned getNumberOfRegisters(bool Vector) override {
return Impl.getNumberOfRegisters(Vector);
}
unsigned getRegisterBitWidth(bool Vector) override {
return Impl.getRegisterBitWidth(Vector);
}
unsigned getMaxInterleaveFactor(unsigned VF) override {
return Impl.getMaxInterleaveFactor(VF);
}
unsigned
getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info,
OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) override {
return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
Opd1PropInfo, Opd2PropInfo);
}
unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) override {
return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) override {
return Impl.getCastInstrCost(Opcode, Dst, Src);
}
unsigned getCFInstrCost(unsigned Opcode) override {
return Impl.getCFInstrCost(Opcode);
}
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) override {
return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) override {
return Impl.getVectorInstrCost(Opcode, Val, Index);
}
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) override {
return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
}
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
unsigned AddressSpace) override {
return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
}
unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
unsigned Factor,
ArrayRef<unsigned> Indices,
unsigned Alignment,
unsigned AddressSpace) override {
return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Alignment, AddressSpace);
}
unsigned getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) override {
return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
}
unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) override {
return Impl.getIntrinsicInstrCost(ID, RetTy, Tys);
}
unsigned getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) override {
return Impl.getCallInstrCost(F, RetTy, Tys);
}
unsigned getNumberOfParts(Type *Tp) override {
return Impl.getNumberOfParts(Tp);
}
unsigned getAddressComputationCost(Type *Ty, bool IsComplex) override {
return Impl.getAddressComputationCost(Ty, IsComplex);
}
unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
return Impl.getCostOfKeepingLiveOverCall(Tys);
}
bool getTgtMemIntrinsic(IntrinsicInst *Inst,
MemIntrinsicInfo &Info) override {
return Impl.getTgtMemIntrinsic(Inst, Info);
}
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) override {
return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
}
bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const override {
return Impl.hasCompatibleFunctionAttributes(Caller, Callee);
}
};
template <typename T>
TargetTransformInfo::TargetTransformInfo(T Impl)
: TTIImpl(new Model<T>(Impl)) {}
/// \brief Analysis pass providing the \c TargetTransformInfo.
///
/// The core idea of the TargetIRAnalysis is to expose an interface through
/// which LLVM targets can analyze and provide information about the middle
/// end's target-independent IR. This supports use cases such as target-aware
/// cost modeling of IR constructs.
///
/// This is a function analysis because much of the cost modeling for targets
/// is done in a subtarget specific way and LLVM supports compiling different
/// functions targeting different subtargets in order to support runtime
/// dispatch according to the observed subtarget.
class TargetIRAnalysis {
public:
typedef TargetTransformInfo Result;
/// \brief Opaque, unique identifier for this analysis pass.
static void *ID() { return (void *)&PassID; }
/// \brief Provide access to a name for this pass for debugging purposes.
static StringRef name() { return "TargetIRAnalysis"; }
/// \brief Default construct a target IR analysis.
///
/// This will use the module's datalayout to construct a baseline
/// conservative TTI result.
TargetIRAnalysis();
/// \brief Construct an IR analysis pass around a target-provide callback.
///
/// The callback will be called with a particular function for which the TTI
/// is needed and must return a TTI object for that function.
TargetIRAnalysis(std::function<Result(Function &)> TTICallback);
// Value semantics. We spell out the constructors for MSVC.
TargetIRAnalysis(const TargetIRAnalysis &Arg)
: TTICallback(Arg.TTICallback) {}
TargetIRAnalysis(TargetIRAnalysis &&Arg)
: TTICallback(std::move(Arg.TTICallback)) {}
TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
TTICallback = RHS.TTICallback;
return *this;
}
TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
TTICallback = std::move(RHS.TTICallback);
return *this;
}
Result run(Function &F);
private:
static char PassID;
/// \brief The callback used to produce a result.
///
/// We use a completely opaque callback so that targets can provide whatever
/// mechanism they desire for constructing the TTI for a given function.
///
/// FIXME: Should we really use std::function? It's relatively inefficient.
/// It might be possible to arrange for even stateful callbacks to outlive
/// the analysis and thus use a function_ref which would be lighter weight.
/// This may also be less error prone as the callback is likely to reference
/// the external TargetMachine, and that reference needs to never dangle.
std::function<Result(Function &)> TTICallback;
/// \brief Helper function used as the callback in the default constructor.
static Result getDefaultTTI(Function &F);
};
/// \brief Wrapper pass for TargetTransformInfo.
///
/// This pass can be constructed from a TTI object which it stores internally
/// and is queried by passes.
class TargetTransformInfoWrapperPass : public ImmutablePass {
TargetIRAnalysis TIRA;
Optional<TargetTransformInfo> TTI;
virtual void anchor();
public:
static char ID;
/// \brief We must provide a default constructor for the pass but it should
/// never be used.
///
/// Use the constructor below or call one of the creation routines.
TargetTransformInfoWrapperPass();
explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
TargetTransformInfo &getTTI(Function &F);
};
/// \brief Create an analysis pass wrapper around a TTI object.
///
/// This analysis pass just holds the TTI instance and makes it available to
/// clients.
ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/TargetLibraryInfo.h | //===-- TargetLibraryInfo.h - Library information ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_TARGETLIBRARYINFO_H
#define LLVM_ANALYSIS_TARGETLIBRARYINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
namespace llvm {
/// VecDesc - Describes a possible vectorization of a function.
/// Function 'VectorFnName' is equivalent to 'ScalarFnName' vectorized
/// by a factor 'VectorizationFactor'.
struct VecDesc {
const char *ScalarFnName;
const char *VectorFnName;
unsigned VectorizationFactor;
};
class PreservedAnalyses;
namespace LibFunc {
enum Func {
#define TLI_DEFINE_ENUM
#include "llvm/Analysis/TargetLibraryInfo.def"
NumLibFuncs
};
}
/// \brief Implementation of the target library information.
///
/// This class constructs tables that hold the target library information and
/// make it available. However, it is somewhat expensive to compute and only
/// depends on the triple. So users typicaly interact with the \c
/// TargetLibraryInfo wrapper below.
class TargetLibraryInfoImpl {
friend class TargetLibraryInfo;
unsigned char AvailableArray[(LibFunc::NumLibFuncs+3)/4];
llvm::DenseMap<unsigned, std::string> CustomNames;
static const char *const StandardNames[LibFunc::NumLibFuncs];
enum AvailabilityState {
StandardName = 3, // (memset to all ones)
CustomName = 1,
Unavailable = 0 // (memset to all zeros)
};
void setState(LibFunc::Func F, AvailabilityState State) {
AvailableArray[F/4] &= ~(3 << 2*(F&3));
AvailableArray[F/4] |= State << 2*(F&3);
}
AvailabilityState getState(LibFunc::Func F) const {
return static_cast<AvailabilityState>((AvailableArray[F/4] >> 2*(F&3)) & 3);
}
/// Vectorization descriptors - sorted by ScalarFnName.
std::vector<VecDesc> VectorDescs;
/// Scalarization descriptors - same content as VectorDescs but sorted based
/// on VectorFnName rather than ScalarFnName.
std::vector<VecDesc> ScalarDescs;
public:
/// \brief List of known vector-functions libraries.
///
/// The vector-functions library defines, which functions are vectorizable
/// and with which factor. The library can be specified by either frontend,
/// or a commandline option, and then used by
/// addVectorizableFunctionsFromVecLib for filling up the tables of
/// vectorizable functions.
enum VectorLibrary {
NoLibrary, // Don't use any vector library.
Accelerate // Use Accelerate framework.
};
TargetLibraryInfoImpl();
explicit TargetLibraryInfoImpl(const Triple &T);
// Provide value semantics.
TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI);
TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI);
TargetLibraryInfoImpl &operator=(const TargetLibraryInfoImpl &TLI);
TargetLibraryInfoImpl &operator=(TargetLibraryInfoImpl &&TLI);
/// \brief Searches for a particular function name.
///
/// If it is one of the known library functions, return true and set F to the
/// corresponding value.
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const;
/// \brief Forces a function to be marked as unavailable.
void setUnavailable(LibFunc::Func F) {
setState(F, Unavailable);
}
/// \brief Forces a function to be marked as available.
void setAvailable(LibFunc::Func F) {
setState(F, StandardName);
}
/// \brief Forces a function to be marked as available and provide an
/// alternate name that must be used.
void setAvailableWithName(LibFunc::Func F, StringRef Name) {
if (StandardNames[F] != Name) {
setState(F, CustomName);
CustomNames[F] = Name;
assert(CustomNames.find(F) != CustomNames.end());
} else {
setState(F, StandardName);
}
}
/// \brief Disables all builtins.
///
/// This can be used for options like -fno-builtin.
void disableAllFunctions();
/// addVectorizableFunctions - Add a set of scalar -> vector mappings,
/// queryable via getVectorizedFunction and getScalarizedFunction.
void addVectorizableFunctions(ArrayRef<VecDesc> Fns);
/// Calls addVectorizableFunctions with a known preset of functions for the
/// given vector library.
void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib);
/// isFunctionVectorizable - Return true if the function F has a
/// vector equivalent with vectorization factor VF.
bool isFunctionVectorizable(StringRef F, unsigned VF) const {
return !getVectorizedFunction(F, VF).empty();
}
/// isFunctionVectorizable - Return true if the function F has a
/// vector equivalent with any vectorization factor.
bool isFunctionVectorizable(StringRef F) const;
/// getVectorizedFunction - Return the name of the equivalent of
/// F, vectorized with factor VF. If no such mapping exists,
/// return the empty string.
StringRef getVectorizedFunction(StringRef F, unsigned VF) const;
/// isFunctionScalarizable - Return true if the function F has a
/// scalar equivalent, and set VF to be the vectorization factor.
bool isFunctionScalarizable(StringRef F, unsigned &VF) const {
return !getScalarizedFunction(F, VF).empty();
}
/// getScalarizedFunction - Return the name of the equivalent of
/// F, scalarized. If no such mapping exists, return the empty string.
///
/// Set VF to the vectorization factor.
StringRef getScalarizedFunction(StringRef F, unsigned &VF) const;
};
/// \brief Provides information about what library functions are available for
/// the current target.
///
/// This both allows optimizations to handle them specially and frontends to
/// disable such optimizations through -fno-builtin etc.
class TargetLibraryInfo {
friend class TargetLibraryAnalysis;
friend class TargetLibraryInfoWrapperPass;
const TargetLibraryInfoImpl *Impl;
public:
explicit TargetLibraryInfo(const TargetLibraryInfoImpl &Impl) : Impl(&Impl) {}
// Provide value semantics.
TargetLibraryInfo(const TargetLibraryInfo &TLI) : Impl(TLI.Impl) {}
TargetLibraryInfo(TargetLibraryInfo &&TLI) : Impl(TLI.Impl) {}
TargetLibraryInfo &operator=(const TargetLibraryInfo &TLI) {
Impl = TLI.Impl;
return *this;
}
TargetLibraryInfo &operator=(TargetLibraryInfo &&TLI) {
Impl = TLI.Impl;
return *this;
}
/// \brief Searches for a particular function name.
///
/// If it is one of the known library functions, return true and set F to the
/// corresponding value.
bool getLibFunc(StringRef funcName, LibFunc::Func &F) const {
return Impl->getLibFunc(funcName, F);
}
/// \brief Tests whether a library function is available.
bool has(LibFunc::Func F) const {
return Impl->getState(F) != TargetLibraryInfoImpl::Unavailable;
}
bool isFunctionVectorizable(StringRef F, unsigned VF) const {
return Impl->isFunctionVectorizable(F, VF);
};
bool isFunctionVectorizable(StringRef F) const {
return Impl->isFunctionVectorizable(F);
};
StringRef getVectorizedFunction(StringRef F, unsigned VF) const {
return Impl->getVectorizedFunction(F, VF);
};
/// \brief Tests if the function is both available and a candidate for
/// optimized code generation.
bool hasOptimizedCodeGen(LibFunc::Func F) const {
if (Impl->getState(F) == TargetLibraryInfoImpl::Unavailable)
return false;
switch (F) {
default: break;
case LibFunc::copysign: case LibFunc::copysignf: case LibFunc::copysignl:
case LibFunc::fabs: case LibFunc::fabsf: case LibFunc::fabsl:
case LibFunc::sin: case LibFunc::sinf: case LibFunc::sinl:
case LibFunc::cos: case LibFunc::cosf: case LibFunc::cosl:
case LibFunc::sqrt: case LibFunc::sqrtf: case LibFunc::sqrtl:
case LibFunc::sqrt_finite: case LibFunc::sqrtf_finite:
case LibFunc::sqrtl_finite:
case LibFunc::fmax: case LibFunc::fmaxf: case LibFunc::fmaxl:
case LibFunc::fmin: case LibFunc::fminf: case LibFunc::fminl:
case LibFunc::floor: case LibFunc::floorf: case LibFunc::floorl:
case LibFunc::nearbyint: case LibFunc::nearbyintf: case LibFunc::nearbyintl:
case LibFunc::ceil: case LibFunc::ceilf: case LibFunc::ceill:
case LibFunc::rint: case LibFunc::rintf: case LibFunc::rintl:
case LibFunc::round: case LibFunc::roundf: case LibFunc::roundl:
case LibFunc::trunc: case LibFunc::truncf: case LibFunc::truncl:
case LibFunc::log2: case LibFunc::log2f: case LibFunc::log2l:
case LibFunc::exp2: case LibFunc::exp2f: case LibFunc::exp2l:
case LibFunc::memcmp: case LibFunc::strcmp: case LibFunc::strcpy:
case LibFunc::stpcpy: case LibFunc::strlen: case LibFunc::strnlen:
case LibFunc::memchr:
return true;
}
return false;
}
StringRef getName(LibFunc::Func F) const {
auto State = Impl->getState(F);
if (State == TargetLibraryInfoImpl::Unavailable)
return StringRef();
if (State == TargetLibraryInfoImpl::StandardName)
return Impl->StandardNames[F];
assert(State == TargetLibraryInfoImpl::CustomName);
return Impl->CustomNames.find(F)->second;
}
/// \brief Handle invalidation from the pass manager.
///
/// If we try to invalidate this info, just return false. It cannot become
/// invalid even if the module changes.
bool invalidate(Module &, const PreservedAnalyses &) { return false; }
};
/// \brief Analysis pass providing the \c TargetLibraryInfo.
///
/// Note that this pass's result cannot be invalidated, it is immutable for the
/// life of the module.
class TargetLibraryAnalysis {
public:
typedef TargetLibraryInfo Result;
/// \brief Opaque, unique identifier for this analysis pass.
static void *ID() { return (void *)&PassID; }
/// \brief Default construct the library analysis.
///
/// This will use the module's triple to construct the library info for that
/// module.
TargetLibraryAnalysis() {}
/// \brief Construct a library analysis with preset info.
///
/// This will directly copy the preset info into the result without
/// consulting the module's triple.
TargetLibraryAnalysis(TargetLibraryInfoImpl PresetInfoImpl)
: PresetInfoImpl(std::move(PresetInfoImpl)) {}
// Move semantics. We spell out the constructors for MSVC.
TargetLibraryAnalysis(TargetLibraryAnalysis &&Arg)
: PresetInfoImpl(std::move(Arg.PresetInfoImpl)), Impls(std::move(Arg.Impls)) {}
TargetLibraryAnalysis &operator=(TargetLibraryAnalysis &&RHS) {
PresetInfoImpl = std::move(RHS.PresetInfoImpl);
Impls = std::move(RHS.Impls);
return *this;
}
TargetLibraryInfo run(Module &M);
TargetLibraryInfo run(Function &F);
/// \brief Provide access to a name for this pass for debugging purposes.
static StringRef name() { return "TargetLibraryAnalysis"; }
private:
static char PassID;
Optional<TargetLibraryInfoImpl> PresetInfoImpl;
StringMap<std::unique_ptr<TargetLibraryInfoImpl>> Impls;
TargetLibraryInfoImpl &lookupInfoImpl(Triple T);
};
class TargetLibraryInfoWrapperPass : public ImmutablePass {
TargetLibraryInfoImpl TLIImpl;
TargetLibraryInfo TLI;
virtual void anchor();
public:
static char ID;
TargetLibraryInfoWrapperPass();
explicit TargetLibraryInfoWrapperPass(const Triple &T);
explicit TargetLibraryInfoWrapperPass(const TargetLibraryInfoImpl &TLI);
TargetLibraryInfo &getTLI() { return TLI; }
const TargetLibraryInfo &getTLI() const { return TLI; }
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/ValueTracking.h | //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains routines that help analyze properties that chains of
// computations have.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_VALUETRACKING_H
#define LLVM_ANALYSIS_VALUETRACKING_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
class Value;
class Instruction;
class APInt;
class DataLayout;
class StringRef;
class MDNode;
class AssumptionCache;
class DominatorTree;
class TargetLibraryInfo;
class LoopInfo;
/// Determine which bits of V are known to be either zero or one and return
/// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers. In the case
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Compute known bits from the range metadata.
/// \p KnownZero the set of bits that are known to be zero
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
APInt &KnownZero);
/// Returns true if LHS and RHS have no common bits set.
bool haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around computeKnownBits.
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// isKnownToBeAPowerOfTwo - Return true if the given value is known to have
/// exactly one bit set when defined. For vectors return true if every
/// element is known to be a power of two when defined. Supports values with
/// integer or pointer type and vectors of integers. If 'OrZero' is set then
/// returns true if the given value is either a power of two or zero.
bool isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL,
bool OrZero = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// isKnownNonZero - Return true if the given value is known to be non-zero
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be
/// zero for bits that V cannot have.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers. In the case
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// ComputeNumSignBits - Return the number of times the sign bit of the
/// register is replicated into the other bits. We know that at least 1 bit
/// is always equal to the sign bit (itself), but other cases can give us
/// information. For example, immediately after an "ashr X, 2", we know that
/// the top 3 bits are all equal to each other, so we return 3.
///
/// 'Op' must have a scalar integer type.
///
unsigned ComputeNumSignBits(Value *Op, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// ComputeMultiple - This function computes the integer multiple of Base that
/// equals V. If successful, it returns true and returns the multiple in
/// Multiple. If unsuccessful, it returns false. Also, if V can be
/// simplified to an integer, then the simplified V is returned in Val. Look
/// through sext only if LookThroughSExt=true.
bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
bool LookThroughSExt = false,
unsigned Depth = 0);
/// CannotBeNegativeZero - Return true if we can prove that the specified FP
/// value is never equal to -0.0.
///
bool CannotBeNegativeZero(const Value *V, unsigned Depth = 0);
/// CannotBeOrderedLessThanZero - Return true if we can prove that the
/// specified FP value is either a NaN or never less than 0.0.
///
bool CannotBeOrderedLessThanZero(const Value *V, unsigned Depth = 0);
/// isBytewiseValue - If the specified value can be set by repeating the same
/// byte in memory, return the i8 value that it is represented with. This is
/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
/// byte store (e.g. i16 0x1234), return null.
Value *isBytewiseValue(Value *V);
/// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
/// the scalar value indexed is already around as a register, for example if
/// it were inserted directly into the aggregrate.
///
/// If InsertBefore is not null, this function will duplicate (modified)
/// insertvalues when a part of a nested struct is extracted.
Value *FindInsertedValue(Value *V,
ArrayRef<unsigned> idx_range,
Instruction *InsertBefore = nullptr);
/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const DataLayout &DL);
static inline const Value *
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
const DataLayout &DL) {
return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
DL);
}
/// getConstantStringInfo - This function computes the length of a
/// null-terminated C string pointed to by V. If successful, it returns true
/// and returns the string in Str. If unsuccessful, it returns false. This
/// does not include the trailing nul character by default. If TrimAtNul is
/// set to false, then this returns any trailing nul characters as well as any
/// other characters that come after it.
bool getConstantStringInfo(const Value *V, StringRef &Str,
uint64_t Offset = 0, bool TrimAtNul = true);
/// GetStringLength - If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'. If we can't, return 0.
uint64_t GetStringLength(Value *V);
/// GetUnderlyingObject - This method strips off any GEP address adjustments
/// and pointer casts from the specified value, returning the original object
/// being addressed. Note that the returned value has pointer type if the
/// specified value does. If the MaxLookup value is non-zero, it limits the
/// number of instructions to be stripped off.
Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup = 6);
static inline const Value *GetUnderlyingObject(const Value *V,
const DataLayout &DL,
unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
}
/// \brief This method is similar to GetUnderlyingObject except that it can
/// look through phi and select instructions and return multiple objects.
///
/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
/// accesses different objects in each iteration, we don't look through the
/// phi node. E.g. consider this loop nest:
///
/// int **A;
/// for (i)
/// for (j) {
/// A[i][j] = A[i-1][j] * B[j]
/// }
///
/// This is transformed by Load-PRE to stash away A[i] for the next iteration
/// of the outer loop:
///
/// Curr = A[0]; // Prev_0
/// for (i: 1..N) {
/// Prev = Curr; // Prev = PHI (Prev_0, Curr)
/// Curr = A[i];
/// for (j: 0..N) {
/// Curr[j] = Prev[j] * B[j]
/// }
/// }
///
/// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
/// should not assume that Curr and Prev share the same underlying object thus
/// it shouldn't look through the phi above.
void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
const DataLayout &DL, LoopInfo *LI = nullptr,
unsigned MaxLookup = 6);
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
/// are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);
/// isDereferenceablePointer - Return true if this is always a dereferenceable
/// pointer. If the context instruction is specified perform context-sensitive
/// analysis and return true if the pointer is dereferenceable at the
/// specified instruction.
bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
/// isSafeToSpeculativelyExecute - Return true if the instruction does not
/// have any effects besides calculating the result and does not have
/// undefined behavior.
///
/// This method never returns true for an instruction that returns true for
/// mayHaveSideEffects; however, this method also does some other checks in
/// addition. It checks for undefined behavior, like dividing by zero or
/// loading from an invalid pointer (but not for undefined results, like a
/// shift with a shift amount larger than the width of the result). It checks
/// for malloc and alloca because speculatively executing them might cause a
/// memory leak. It also returns false for instructions related to control
/// flow, specifically terminators and PHI nodes.
///
/// If the CtxI is specified this method performs context-sensitive analysis
/// and returns true if it is safe to execute the instruction immediately
/// before the CtxI.
///
/// If the CtxI is NOT specified this method only looks at the instruction
/// itself and its operands, so if this method returns true, it is safe to
/// move the instruction as long as the correct dominance relationships for
/// the operands and users hold.
///
/// This method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value.
bool isSafeToSpeculativelyExecute(const Value *V,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
/// isKnownNonNull - Return true if this pointer couldn't possibly be null by
/// its definition. This returns true for allocas, non-extern-weak globals
/// and byval arguments.
bool isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI = nullptr);
/// isKnownNonNullAt - Return true if this pointer couldn't possibly be null.
/// If the context instruction is specified perform context-sensitive analysis
/// and return true if the pointer couldn't possibly be null at the specified
/// instruction.
bool isKnownNonNullAt(const Value *V,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
/// Return true if it is valid to use the assumptions provided by an
/// assume intrinsic, I, at the point in the control-flow identified by the
/// context instruction, CxtI.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
const DominatorTree *DT = nullptr);
enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
/// \brief Specific patterns of select instructions we can match.
enum SelectPatternFlavor {
SPF_UNKNOWN = 0,
SPF_SMIN, // Signed minimum
SPF_UMIN, // Unsigned minimum
SPF_SMAX, // Signed maximum
SPF_UMAX, // Unsigned maximum
SPF_ABS, // Absolute value
SPF_NABS // Negated absolute value
};
/// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
/// and providing the out parameter results if we successfully match.
///
/// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
/// not match that of the original select. If this is the case, the cast
/// operation (one of Trunc,SExt,Zext) that must be done to transform the
/// type of LHS and RHS into the type of V is returned in CastOp.
///
/// For example:
/// %1 = icmp slt i32 %a, i32 4
/// %2 = sext i32 %a to i64
/// %3 = select i1 %1, i64 %2, i64 4
///
/// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
///
SelectPatternFlavor matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
Instruction::CastOps *CastOp = nullptr);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/TargetLibraryInfo.def | //===-- TargetLibraryInfo.def - Library information -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#if !(defined(TLI_DEFINE_ENUM) || defined(TLI_DEFINE_STRING))
#error "Must define TLI_DEFINE_ENUM or TLI_DEFINE_STRING for TLI .def."
#elif defined(TLI_DEFINE_ENUM) && defined(TLI_DEFINE_STRING)
#error "Can only define one of TLI_DEFINE_ENUM or TLI_DEFINE_STRING at a time."
#else
// One of TLI_DEFINE_ENUM/STRING are defined.
#if defined(TLI_DEFINE_ENUM)
#define TLI_DEFINE_ENUM_INTERNAL(enum_variant) enum_variant,
#define TLI_DEFINE_STRING_INTERNAL(string_repr)
#else
#define TLI_DEFINE_ENUM_INTERNAL(enum_variant)
#define TLI_DEFINE_STRING_INTERNAL(string_repr) string_repr,
#endif
/// int _IO_getc(_IO_FILE * __fp);
TLI_DEFINE_ENUM_INTERNAL(under_IO_getc)
TLI_DEFINE_STRING_INTERNAL("_IO_getc")
/// int _IO_putc(int __c, _IO_FILE * __fp);
TLI_DEFINE_ENUM_INTERNAL(under_IO_putc)
TLI_DEFINE_STRING_INTERNAL("_IO_putc")
/// void operator delete[](void*);
TLI_DEFINE_ENUM_INTERNAL(ZdaPv)
TLI_DEFINE_STRING_INTERNAL("_ZdaPv")
/// void operator delete[](void*, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvRKSt9nothrow_t")
/// void operator delete[](void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvj)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvj")
/// void operator delete[](void*, unsigned long);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvm)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvm")
/// void operator delete(void*);
TLI_DEFINE_ENUM_INTERNAL(ZdlPv)
TLI_DEFINE_STRING_INTERNAL("_ZdlPv")
/// void operator delete(void*, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvRKSt9nothrow_t")
/// void operator delete(void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvj)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvj")
/// void operator delete(void*, unsigned long);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvm)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvm")
/// void *new[](unsigned int);
TLI_DEFINE_ENUM_INTERNAL(Znaj)
TLI_DEFINE_STRING_INTERNAL("_Znaj")
/// void *new[](unsigned int, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnajRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnajRKSt9nothrow_t")
/// void *new[](unsigned long);
TLI_DEFINE_ENUM_INTERNAL(Znam)
TLI_DEFINE_STRING_INTERNAL("_Znam")
/// void *new[](unsigned long, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnamRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamRKSt9nothrow_t")
/// void *new(unsigned int);
TLI_DEFINE_ENUM_INTERNAL(Znwj)
TLI_DEFINE_STRING_INTERNAL("_Znwj")
/// void *new(unsigned int, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnwjRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwjRKSt9nothrow_t")
/// void *new(unsigned long);
TLI_DEFINE_ENUM_INTERNAL(Znwm)
TLI_DEFINE_STRING_INTERNAL("_Znwm")
/// void *new(unsigned long, nothrow);
TLI_DEFINE_ENUM_INTERNAL(ZnwmRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmRKSt9nothrow_t")
/// double __cospi(double x);
TLI_DEFINE_ENUM_INTERNAL(cospi)
TLI_DEFINE_STRING_INTERNAL("__cospi")
/// float __cospif(float x);
TLI_DEFINE_ENUM_INTERNAL(cospif)
TLI_DEFINE_STRING_INTERNAL("__cospif")
/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
TLI_DEFINE_ENUM_INTERNAL(cxa_atexit)
TLI_DEFINE_STRING_INTERNAL("__cxa_atexit")
/// void __cxa_guard_abort(guard_t *guard);
/// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
TLI_DEFINE_ENUM_INTERNAL(cxa_guard_abort)
TLI_DEFINE_STRING_INTERNAL("__cxa_guard_abort")
/// int __cxa_guard_acquire(guard_t *guard);
TLI_DEFINE_ENUM_INTERNAL(cxa_guard_acquire)
TLI_DEFINE_STRING_INTERNAL("__cxa_guard_acquire")
/// void __cxa_guard_release(guard_t *guard);
TLI_DEFINE_ENUM_INTERNAL(cxa_guard_release)
TLI_DEFINE_STRING_INTERNAL("__cxa_guard_release")
/// int __isoc99_scanf (const char *format, ...)
TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_scanf)
TLI_DEFINE_STRING_INTERNAL("__isoc99_scanf")
/// int __isoc99_sscanf(const char *s, const char *format, ...)
TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_sscanf)
TLI_DEFINE_STRING_INTERNAL("__isoc99_sscanf")
/// void *__memcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__memcpy_chk")
/// void *__memmove_chk(void *s1, const void *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memmove_chk)
TLI_DEFINE_STRING_INTERNAL("__memmove_chk")
/// void *__memset_chk(void *s, char v, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memset_chk)
TLI_DEFINE_STRING_INTERNAL("__memset_chk")
/// double __sincospi_stret(double x);
TLI_DEFINE_ENUM_INTERNAL(sincospi_stret)
TLI_DEFINE_STRING_INTERNAL("__sincospi_stret")
/// float __sincospif_stret(float x);
TLI_DEFINE_ENUM_INTERNAL(sincospif_stret)
TLI_DEFINE_STRING_INTERNAL("__sincospif_stret")
/// double __sinpi(double x);
TLI_DEFINE_ENUM_INTERNAL(sinpi)
TLI_DEFINE_STRING_INTERNAL("__sinpi")
/// float __sinpif(float x);
TLI_DEFINE_ENUM_INTERNAL(sinpif)
TLI_DEFINE_STRING_INTERNAL("__sinpif")
/// double __sqrt_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(sqrt_finite)
TLI_DEFINE_STRING_INTERNAL("__sqrt_finite")
/// float __sqrt_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(sqrtf_finite)
TLI_DEFINE_STRING_INTERNAL("__sqrtf_finite")
/// long double __sqrt_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(sqrtl_finite)
TLI_DEFINE_STRING_INTERNAL("__sqrtl_finite")
/// char *__stpcpy_chk(char *s1, const char *s2, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(stpcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__stpcpy_chk")
/// char *__stpncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(stpncpy_chk)
TLI_DEFINE_STRING_INTERNAL("__stpncpy_chk")
/// char *__strcpy_chk(char *s1, const char *s2, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(strcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__strcpy_chk")
/// char * __strdup(const char *s);
TLI_DEFINE_ENUM_INTERNAL(dunder_strdup)
TLI_DEFINE_STRING_INTERNAL("__strdup")
/// char *__strncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(strncpy_chk)
TLI_DEFINE_STRING_INTERNAL("__strncpy_chk")
/// char *__strndup(const char *s, size_t n);
TLI_DEFINE_ENUM_INTERNAL(dunder_strndup)
TLI_DEFINE_STRING_INTERNAL("__strndup")
/// char * __strtok_r(char *s, const char *delim, char **save_ptr);
TLI_DEFINE_ENUM_INTERNAL(dunder_strtok_r)
TLI_DEFINE_STRING_INTERNAL("__strtok_r")
/// int abs(int j);
TLI_DEFINE_ENUM_INTERNAL(abs)
TLI_DEFINE_STRING_INTERNAL("abs")
/// int access(const char *path, int amode);
TLI_DEFINE_ENUM_INTERNAL(access)
TLI_DEFINE_STRING_INTERNAL("access")
/// double acos(double x);
TLI_DEFINE_ENUM_INTERNAL(acos)
TLI_DEFINE_STRING_INTERNAL("acos")
/// float acosf(float x);
TLI_DEFINE_ENUM_INTERNAL(acosf)
TLI_DEFINE_STRING_INTERNAL("acosf")
/// double acosh(double x);
TLI_DEFINE_ENUM_INTERNAL(acosh)
TLI_DEFINE_STRING_INTERNAL("acosh")
/// float acoshf(float x);
TLI_DEFINE_ENUM_INTERNAL(acoshf)
TLI_DEFINE_STRING_INTERNAL("acoshf")
/// long double acoshl(long double x);
TLI_DEFINE_ENUM_INTERNAL(acoshl)
TLI_DEFINE_STRING_INTERNAL("acoshl")
/// long double acosl(long double x);
TLI_DEFINE_ENUM_INTERNAL(acosl)
TLI_DEFINE_STRING_INTERNAL("acosl")
/// double asin(double x);
TLI_DEFINE_ENUM_INTERNAL(asin)
TLI_DEFINE_STRING_INTERNAL("asin")
/// float asinf(float x);
TLI_DEFINE_ENUM_INTERNAL(asinf)
TLI_DEFINE_STRING_INTERNAL("asinf")
/// double asinh(double x);
TLI_DEFINE_ENUM_INTERNAL(asinh)
TLI_DEFINE_STRING_INTERNAL("asinh")
/// float asinhf(float x);
TLI_DEFINE_ENUM_INTERNAL(asinhf)
TLI_DEFINE_STRING_INTERNAL("asinhf")
/// long double asinhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(asinhl)
TLI_DEFINE_STRING_INTERNAL("asinhl")
/// long double asinl(long double x);
TLI_DEFINE_ENUM_INTERNAL(asinl)
TLI_DEFINE_STRING_INTERNAL("asinl")
/// double atan(double x);
TLI_DEFINE_ENUM_INTERNAL(atan)
TLI_DEFINE_STRING_INTERNAL("atan")
/// double atan2(double y, double x);
TLI_DEFINE_ENUM_INTERNAL(atan2)
TLI_DEFINE_STRING_INTERNAL("atan2")
/// float atan2f(float y, float x);
TLI_DEFINE_ENUM_INTERNAL(atan2f)
TLI_DEFINE_STRING_INTERNAL("atan2f")
/// long double atan2l(long double y, long double x);
TLI_DEFINE_ENUM_INTERNAL(atan2l)
TLI_DEFINE_STRING_INTERNAL("atan2l")
/// float atanf(float x);
TLI_DEFINE_ENUM_INTERNAL(atanf)
TLI_DEFINE_STRING_INTERNAL("atanf")
/// double atanh(double x);
TLI_DEFINE_ENUM_INTERNAL(atanh)
TLI_DEFINE_STRING_INTERNAL("atanh")
/// float atanhf(float x);
TLI_DEFINE_ENUM_INTERNAL(atanhf)
TLI_DEFINE_STRING_INTERNAL("atanhf")
/// long double atanhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(atanhl)
TLI_DEFINE_STRING_INTERNAL("atanhl")
/// long double atanl(long double x);
TLI_DEFINE_ENUM_INTERNAL(atanl)
TLI_DEFINE_STRING_INTERNAL("atanl")
/// double atof(const char *str);
TLI_DEFINE_ENUM_INTERNAL(atof)
TLI_DEFINE_STRING_INTERNAL("atof")
/// int atoi(const char *str);
TLI_DEFINE_ENUM_INTERNAL(atoi)
TLI_DEFINE_STRING_INTERNAL("atoi")
/// long atol(const char *str);
TLI_DEFINE_ENUM_INTERNAL(atol)
TLI_DEFINE_STRING_INTERNAL("atol")
/// long long atoll(const char *nptr);
TLI_DEFINE_ENUM_INTERNAL(atoll)
TLI_DEFINE_STRING_INTERNAL("atoll")
/// int bcmp(const void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(bcmp)
TLI_DEFINE_STRING_INTERNAL("bcmp")
/// void bcopy(const void *s1, void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(bcopy)
TLI_DEFINE_STRING_INTERNAL("bcopy")
/// void bzero(void *s, size_t n);
TLI_DEFINE_ENUM_INTERNAL(bzero)
TLI_DEFINE_STRING_INTERNAL("bzero")
/// void *calloc(size_t count, size_t size);
TLI_DEFINE_ENUM_INTERNAL(calloc)
TLI_DEFINE_STRING_INTERNAL("calloc")
/// double cbrt(double x);
TLI_DEFINE_ENUM_INTERNAL(cbrt)
TLI_DEFINE_STRING_INTERNAL("cbrt")
/// float cbrtf(float x);
TLI_DEFINE_ENUM_INTERNAL(cbrtf)
TLI_DEFINE_STRING_INTERNAL("cbrtf")
/// long double cbrtl(long double x);
TLI_DEFINE_ENUM_INTERNAL(cbrtl)
TLI_DEFINE_STRING_INTERNAL("cbrtl")
/// double ceil(double x);
TLI_DEFINE_ENUM_INTERNAL(ceil)
TLI_DEFINE_STRING_INTERNAL("ceil")
/// float ceilf(float x);
TLI_DEFINE_ENUM_INTERNAL(ceilf)
TLI_DEFINE_STRING_INTERNAL("ceilf")
/// long double ceill(long double x);
TLI_DEFINE_ENUM_INTERNAL(ceill)
TLI_DEFINE_STRING_INTERNAL("ceill")
/// int chmod(const char *path, mode_t mode);
TLI_DEFINE_ENUM_INTERNAL(chmod)
TLI_DEFINE_STRING_INTERNAL("chmod")
/// int chown(const char *path, uid_t owner, gid_t group);
TLI_DEFINE_ENUM_INTERNAL(chown)
TLI_DEFINE_STRING_INTERNAL("chown")
/// void clearerr(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(clearerr)
TLI_DEFINE_STRING_INTERNAL("clearerr")
/// int closedir(DIR *dirp);
TLI_DEFINE_ENUM_INTERNAL(closedir)
TLI_DEFINE_STRING_INTERNAL("closedir")
/// double copysign(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(copysign)
TLI_DEFINE_STRING_INTERNAL("copysign")
/// float copysignf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(copysignf)
TLI_DEFINE_STRING_INTERNAL("copysignf")
/// long double copysignl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(copysignl)
TLI_DEFINE_STRING_INTERNAL("copysignl")
/// double cos(double x);
TLI_DEFINE_ENUM_INTERNAL(cos)
TLI_DEFINE_STRING_INTERNAL("cos")
/// float cosf(float x);
TLI_DEFINE_ENUM_INTERNAL(cosf)
TLI_DEFINE_STRING_INTERNAL("cosf")
/// double cosh(double x);
TLI_DEFINE_ENUM_INTERNAL(cosh)
TLI_DEFINE_STRING_INTERNAL("cosh")
/// float coshf(float x);
TLI_DEFINE_ENUM_INTERNAL(coshf)
TLI_DEFINE_STRING_INTERNAL("coshf")
/// long double coshl(long double x);
TLI_DEFINE_ENUM_INTERNAL(coshl)
TLI_DEFINE_STRING_INTERNAL("coshl")
/// long double cosl(long double x);
TLI_DEFINE_ENUM_INTERNAL(cosl)
TLI_DEFINE_STRING_INTERNAL("cosl")
/// char *ctermid(char *s);
TLI_DEFINE_ENUM_INTERNAL(ctermid)
TLI_DEFINE_STRING_INTERNAL("ctermid")
/// double exp(double x);
TLI_DEFINE_ENUM_INTERNAL(exp)
TLI_DEFINE_STRING_INTERNAL("exp")
/// double exp10(double x);
TLI_DEFINE_ENUM_INTERNAL(exp10)
TLI_DEFINE_STRING_INTERNAL("exp10")
/// float exp10f(float x);
TLI_DEFINE_ENUM_INTERNAL(exp10f)
TLI_DEFINE_STRING_INTERNAL("exp10f")
/// long double exp10l(long double x);
TLI_DEFINE_ENUM_INTERNAL(exp10l)
TLI_DEFINE_STRING_INTERNAL("exp10l")
/// double exp2(double x);
TLI_DEFINE_ENUM_INTERNAL(exp2)
TLI_DEFINE_STRING_INTERNAL("exp2")
/// float exp2f(float x);
TLI_DEFINE_ENUM_INTERNAL(exp2f)
TLI_DEFINE_STRING_INTERNAL("exp2f")
/// long double exp2l(long double x);
TLI_DEFINE_ENUM_INTERNAL(exp2l)
TLI_DEFINE_STRING_INTERNAL("exp2l")
/// float expf(float x);
TLI_DEFINE_ENUM_INTERNAL(expf)
TLI_DEFINE_STRING_INTERNAL("expf")
/// long double expl(long double x);
TLI_DEFINE_ENUM_INTERNAL(expl)
TLI_DEFINE_STRING_INTERNAL("expl")
/// double expm1(double x);
TLI_DEFINE_ENUM_INTERNAL(expm1)
TLI_DEFINE_STRING_INTERNAL("expm1")
/// float expm1f(float x);
TLI_DEFINE_ENUM_INTERNAL(expm1f)
TLI_DEFINE_STRING_INTERNAL("expm1f")
/// long double expm1l(long double x);
TLI_DEFINE_ENUM_INTERNAL(expm1l)
TLI_DEFINE_STRING_INTERNAL("expm1l")
/// double fabs(double x);
TLI_DEFINE_ENUM_INTERNAL(fabs)
TLI_DEFINE_STRING_INTERNAL("fabs")
/// float fabsf(float x);
TLI_DEFINE_ENUM_INTERNAL(fabsf)
TLI_DEFINE_STRING_INTERNAL("fabsf")
/// long double fabsl(long double x);
TLI_DEFINE_ENUM_INTERNAL(fabsl)
TLI_DEFINE_STRING_INTERNAL("fabsl")
/// int fclose(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fclose)
TLI_DEFINE_STRING_INTERNAL("fclose")
/// FILE *fdopen(int fildes, const char *mode);
TLI_DEFINE_ENUM_INTERNAL(fdopen)
TLI_DEFINE_STRING_INTERNAL("fdopen")
/// int feof(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(feof)
TLI_DEFINE_STRING_INTERNAL("feof")
/// int ferror(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ferror)
TLI_DEFINE_STRING_INTERNAL("ferror")
/// int fflush(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fflush)
TLI_DEFINE_STRING_INTERNAL("fflush")
/// int ffs(int i);
TLI_DEFINE_ENUM_INTERNAL(ffs)
TLI_DEFINE_STRING_INTERNAL("ffs")
/// int ffsl(long int i);
TLI_DEFINE_ENUM_INTERNAL(ffsl)
TLI_DEFINE_STRING_INTERNAL("ffsl")
/// int ffsll(long long int i);
TLI_DEFINE_ENUM_INTERNAL(ffsll)
TLI_DEFINE_STRING_INTERNAL("ffsll")
/// int fgetc(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgetc)
TLI_DEFINE_STRING_INTERNAL("fgetc")
/// int fgetpos(FILE *stream, fpos_t *pos);
TLI_DEFINE_ENUM_INTERNAL(fgetpos)
TLI_DEFINE_STRING_INTERNAL("fgetpos")
/// char *fgets(char *s, int n, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgets)
TLI_DEFINE_STRING_INTERNAL("fgets")
/// int fileno(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fileno)
TLI_DEFINE_STRING_INTERNAL("fileno")
/// int fiprintf(FILE *stream, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(fiprintf)
TLI_DEFINE_STRING_INTERNAL("fiprintf")
/// void flockfile(FILE *file);
TLI_DEFINE_ENUM_INTERNAL(flockfile)
TLI_DEFINE_STRING_INTERNAL("flockfile")
/// double floor(double x);
TLI_DEFINE_ENUM_INTERNAL(floor)
TLI_DEFINE_STRING_INTERNAL("floor")
/// float floorf(float x);
TLI_DEFINE_ENUM_INTERNAL(floorf)
TLI_DEFINE_STRING_INTERNAL("floorf")
/// long double floorl(long double x);
TLI_DEFINE_ENUM_INTERNAL(floorl)
TLI_DEFINE_STRING_INTERNAL("floorl")
/// double fmax(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(fmax)
TLI_DEFINE_STRING_INTERNAL("fmax")
/// float fmaxf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(fmaxf)
TLI_DEFINE_STRING_INTERNAL("fmaxf")
/// long double fmaxl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(fmaxl)
TLI_DEFINE_STRING_INTERNAL("fmaxl")
/// double fmin(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(fmin)
TLI_DEFINE_STRING_INTERNAL("fmin")
/// float fminf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(fminf)
TLI_DEFINE_STRING_INTERNAL("fminf")
/// long double fminl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(fminl)
TLI_DEFINE_STRING_INTERNAL("fminl")
/// double fmod(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(fmod)
TLI_DEFINE_STRING_INTERNAL("fmod")
/// float fmodf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(fmodf)
TLI_DEFINE_STRING_INTERNAL("fmodf")
/// long double fmodl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(fmodl)
TLI_DEFINE_STRING_INTERNAL("fmodl")
/// FILE *fopen(const char *filename, const char *mode);
TLI_DEFINE_ENUM_INTERNAL(fopen)
TLI_DEFINE_STRING_INTERNAL("fopen")
#if 0 // HLSL Change Starts - Exclude potentially duplicate 64bit versions
/// FILE *fopen64(const char *filename, const char *opentype)
TLI_DEFINE_ENUM_INTERNAL(fopen64)
TLI_DEFINE_STRING_INTERNAL("fopen64")
#endif // HLSL Change Ends
/// int fprintf(FILE *stream, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(fprintf)
TLI_DEFINE_STRING_INTERNAL("fprintf")
/// int fputc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputc)
TLI_DEFINE_STRING_INTERNAL("fputc")
/// int fputs(const char *s, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputs)
TLI_DEFINE_STRING_INTERNAL("fputs")
/// size_t fread(void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fread)
TLI_DEFINE_STRING_INTERNAL("fread")
/// void free(void *ptr);
TLI_DEFINE_ENUM_INTERNAL(free)
TLI_DEFINE_STRING_INTERNAL("free")
/// double frexp(double num, int *exp);
TLI_DEFINE_ENUM_INTERNAL(frexp)
TLI_DEFINE_STRING_INTERNAL("frexp")
/// float frexpf(float num, int *exp);
TLI_DEFINE_ENUM_INTERNAL(frexpf)
TLI_DEFINE_STRING_INTERNAL("frexpf")
/// long double frexpl(long double num, int *exp);
TLI_DEFINE_ENUM_INTERNAL(frexpl)
TLI_DEFINE_STRING_INTERNAL("frexpl")
/// int fscanf(FILE *stream, const char *format, ... );
TLI_DEFINE_ENUM_INTERNAL(fscanf)
TLI_DEFINE_STRING_INTERNAL("fscanf")
/// int fseek(FILE *stream, long offset, int whence);
TLI_DEFINE_ENUM_INTERNAL(fseek)
TLI_DEFINE_STRING_INTERNAL("fseek")
/// int fseeko(FILE *stream, off_t offset, int whence);
TLI_DEFINE_ENUM_INTERNAL(fseeko)
TLI_DEFINE_STRING_INTERNAL("fseeko")
#if 0 // HLSL Change Starts - Exclude potentially duplicate 64bit versions
/// int fseeko64(FILE *stream, off64_t offset, int whence)
TLI_DEFINE_ENUM_INTERNAL(fseeko64)
TLI_DEFINE_STRING_INTERNAL("fseeko64")
#endif // HLSL Change Ends
/// int fsetpos(FILE *stream, const fpos_t *pos);
TLI_DEFINE_ENUM_INTERNAL(fsetpos)
TLI_DEFINE_STRING_INTERNAL("fsetpos")
/// int fstat(int fildes, struct stat *buf);
TLI_DEFINE_ENUM_INTERNAL(fstat)
TLI_DEFINE_STRING_INTERNAL("fstat")
/// int fstat64(int filedes, struct stat64 *buf)
TLI_DEFINE_ENUM_INTERNAL(fstat64)
TLI_DEFINE_STRING_INTERNAL("fstat64")
/// int fstatvfs(int fildes, struct statvfs *buf);
TLI_DEFINE_ENUM_INTERNAL(fstatvfs)
TLI_DEFINE_STRING_INTERNAL("fstatvfs")
/// int fstatvfs64(int fildes, struct statvfs64 *buf);
TLI_DEFINE_ENUM_INTERNAL(fstatvfs64)
TLI_DEFINE_STRING_INTERNAL("fstatvfs64")
/// long ftell(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ftell)
TLI_DEFINE_STRING_INTERNAL("ftell")
/// off_t ftello(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ftello)
TLI_DEFINE_STRING_INTERNAL("ftello")
#if 0 // HLSL Change Starts - Exclude potentially duplicate 64bit versions
/// off64_t ftello64(FILE *stream)
TLI_DEFINE_ENUM_INTERNAL(ftello64)
TLI_DEFINE_STRING_INTERNAL("ftello64")
#endif // HLSL Change Ends
/// int ftrylockfile(FILE *file);
TLI_DEFINE_ENUM_INTERNAL(ftrylockfile)
TLI_DEFINE_STRING_INTERNAL("ftrylockfile")
/// void funlockfile(FILE *file);
TLI_DEFINE_ENUM_INTERNAL(funlockfile)
TLI_DEFINE_STRING_INTERNAL("funlockfile")
/// size_t fwrite(const void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fwrite)
TLI_DEFINE_STRING_INTERNAL("fwrite")
/// int getc(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(getc)
TLI_DEFINE_STRING_INTERNAL("getc")
/// int getc_unlocked(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(getc_unlocked)
TLI_DEFINE_STRING_INTERNAL("getc_unlocked")
/// int getchar(void);
TLI_DEFINE_ENUM_INTERNAL(getchar)
TLI_DEFINE_STRING_INTERNAL("getchar")
/// char *getenv(const char *name);
TLI_DEFINE_ENUM_INTERNAL(getenv)
TLI_DEFINE_STRING_INTERNAL("getenv")
/// int getitimer(int which, struct itimerval *value);
TLI_DEFINE_ENUM_INTERNAL(getitimer)
TLI_DEFINE_STRING_INTERNAL("getitimer")
/// int getlogin_r(char *name, size_t namesize);
TLI_DEFINE_ENUM_INTERNAL(getlogin_r)
TLI_DEFINE_STRING_INTERNAL("getlogin_r")
/// struct passwd *getpwnam(const char *name);
TLI_DEFINE_ENUM_INTERNAL(getpwnam)
TLI_DEFINE_STRING_INTERNAL("getpwnam")
/// char *gets(char *s);
TLI_DEFINE_ENUM_INTERNAL(gets)
TLI_DEFINE_STRING_INTERNAL("gets")
/// int gettimeofday(struct timeval *tp, void *tzp);
TLI_DEFINE_ENUM_INTERNAL(gettimeofday)
TLI_DEFINE_STRING_INTERNAL("gettimeofday")
/// uint32_t htonl(uint32_t hostlong);
TLI_DEFINE_ENUM_INTERNAL(htonl)
TLI_DEFINE_STRING_INTERNAL("htonl")
/// uint16_t htons(uint16_t hostshort);
TLI_DEFINE_ENUM_INTERNAL(htons)
TLI_DEFINE_STRING_INTERNAL("htons")
/// int iprintf(const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(iprintf)
TLI_DEFINE_STRING_INTERNAL("iprintf")
/// int isascii(int c);
TLI_DEFINE_ENUM_INTERNAL(isascii)
TLI_DEFINE_STRING_INTERNAL("isascii")
/// int isdigit(int c);
TLI_DEFINE_ENUM_INTERNAL(isdigit)
TLI_DEFINE_STRING_INTERNAL("isdigit")
/// long int labs(long int j);
TLI_DEFINE_ENUM_INTERNAL(labs)
TLI_DEFINE_STRING_INTERNAL("labs")
/// int lchown(const char *path, uid_t owner, gid_t group);
TLI_DEFINE_ENUM_INTERNAL(lchown)
TLI_DEFINE_STRING_INTERNAL("lchown")
/// double ldexp(double x, int n);
TLI_DEFINE_ENUM_INTERNAL(ldexp)
TLI_DEFINE_STRING_INTERNAL("ldexp")
/// float ldexpf(float x, int n);
TLI_DEFINE_ENUM_INTERNAL(ldexpf)
TLI_DEFINE_STRING_INTERNAL("ldexpf")
/// long double ldexpl(long double x, int n);
TLI_DEFINE_ENUM_INTERNAL(ldexpl)
TLI_DEFINE_STRING_INTERNAL("ldexpl")
/// long long int llabs(long long int j);
TLI_DEFINE_ENUM_INTERNAL(llabs)
TLI_DEFINE_STRING_INTERNAL("llabs")
/// double log(double x);
TLI_DEFINE_ENUM_INTERNAL(log)
TLI_DEFINE_STRING_INTERNAL("log")
/// double log10(double x);
TLI_DEFINE_ENUM_INTERNAL(log10)
TLI_DEFINE_STRING_INTERNAL("log10")
/// float log10f(float x);
TLI_DEFINE_ENUM_INTERNAL(log10f)
TLI_DEFINE_STRING_INTERNAL("log10f")
/// long double log10l(long double x);
TLI_DEFINE_ENUM_INTERNAL(log10l)
TLI_DEFINE_STRING_INTERNAL("log10l")
/// double log1p(double x);
TLI_DEFINE_ENUM_INTERNAL(log1p)
TLI_DEFINE_STRING_INTERNAL("log1p")
/// float log1pf(float x);
TLI_DEFINE_ENUM_INTERNAL(log1pf)
TLI_DEFINE_STRING_INTERNAL("log1pf")
/// long double log1pl(long double x);
TLI_DEFINE_ENUM_INTERNAL(log1pl)
TLI_DEFINE_STRING_INTERNAL("log1pl")
/// double log2(double x);
TLI_DEFINE_ENUM_INTERNAL(log2)
TLI_DEFINE_STRING_INTERNAL("log2")
/// float log2f(float x);
TLI_DEFINE_ENUM_INTERNAL(log2f)
TLI_DEFINE_STRING_INTERNAL("log2f")
/// double long double log2l(long double x);
TLI_DEFINE_ENUM_INTERNAL(log2l)
TLI_DEFINE_STRING_INTERNAL("log2l")
/// double logb(double x);
TLI_DEFINE_ENUM_INTERNAL(logb)
TLI_DEFINE_STRING_INTERNAL("logb")
/// float logbf(float x);
TLI_DEFINE_ENUM_INTERNAL(logbf)
TLI_DEFINE_STRING_INTERNAL("logbf")
/// long double logbl(long double x);
TLI_DEFINE_ENUM_INTERNAL(logbl)
TLI_DEFINE_STRING_INTERNAL("logbl")
/// float logf(float x);
TLI_DEFINE_ENUM_INTERNAL(logf)
TLI_DEFINE_STRING_INTERNAL("logf")
/// long double logl(long double x);
TLI_DEFINE_ENUM_INTERNAL(logl)
TLI_DEFINE_STRING_INTERNAL("logl")
/// int lstat(const char *path, struct stat *buf);
TLI_DEFINE_ENUM_INTERNAL(lstat)
TLI_DEFINE_STRING_INTERNAL("lstat")
/// int lstat64(const char *path, struct stat64 *buf);
TLI_DEFINE_ENUM_INTERNAL(lstat64)
TLI_DEFINE_STRING_INTERNAL("lstat64")
/// void *malloc(size_t size);
TLI_DEFINE_ENUM_INTERNAL(malloc)
TLI_DEFINE_STRING_INTERNAL("malloc")
/// void *memalign(size_t boundary, size_t size);
TLI_DEFINE_ENUM_INTERNAL(memalign)
TLI_DEFINE_STRING_INTERNAL("memalign")
/// void *memccpy(void *s1, const void *s2, int c, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memccpy)
TLI_DEFINE_STRING_INTERNAL("memccpy")
/// void *memchr(const void *s, int c, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memchr)
TLI_DEFINE_STRING_INTERNAL("memchr")
/// int memcmp(const void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memcmp)
TLI_DEFINE_STRING_INTERNAL("memcmp")
/// void *memcpy(void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memcpy)
TLI_DEFINE_STRING_INTERNAL("memcpy")
/// void *memmove(void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memmove)
TLI_DEFINE_STRING_INTERNAL("memmove")
// void *memrchr(const void *s, int c, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memrchr)
TLI_DEFINE_STRING_INTERNAL("memrchr")
/// void *memset(void *b, int c, size_t len);
TLI_DEFINE_ENUM_INTERNAL(memset)
TLI_DEFINE_STRING_INTERNAL("memset")
/// void memset_pattern16(void *b, const void *pattern16, size_t len);
TLI_DEFINE_ENUM_INTERNAL(memset_pattern16)
TLI_DEFINE_STRING_INTERNAL("memset_pattern16")
/// int mkdir(const char *path, mode_t mode);
TLI_DEFINE_ENUM_INTERNAL(mkdir)
TLI_DEFINE_STRING_INTERNAL("mkdir")
/// time_t mktime(struct tm *timeptr);
TLI_DEFINE_ENUM_INTERNAL(mktime)
TLI_DEFINE_STRING_INTERNAL("mktime")
/// double modf(double x, double *iptr);
TLI_DEFINE_ENUM_INTERNAL(modf)
TLI_DEFINE_STRING_INTERNAL("modf")
/// float modff(float, float *iptr);
TLI_DEFINE_ENUM_INTERNAL(modff)
TLI_DEFINE_STRING_INTERNAL("modff")
/// long double modfl(long double value, long double *iptr);
TLI_DEFINE_ENUM_INTERNAL(modfl)
TLI_DEFINE_STRING_INTERNAL("modfl")
/// double nearbyint(double x);
TLI_DEFINE_ENUM_INTERNAL(nearbyint)
TLI_DEFINE_STRING_INTERNAL("nearbyint")
/// float nearbyintf(float x);
TLI_DEFINE_ENUM_INTERNAL(nearbyintf)
TLI_DEFINE_STRING_INTERNAL("nearbyintf")
/// long double nearbyintl(long double x);
TLI_DEFINE_ENUM_INTERNAL(nearbyintl)
TLI_DEFINE_STRING_INTERNAL("nearbyintl")
/// uint32_t ntohl(uint32_t netlong);
TLI_DEFINE_ENUM_INTERNAL(ntohl)
TLI_DEFINE_STRING_INTERNAL("ntohl")
/// uint16_t ntohs(uint16_t netshort);
TLI_DEFINE_ENUM_INTERNAL(ntohs)
TLI_DEFINE_STRING_INTERNAL("ntohs")
/// int open(const char *path, int oflag, ... );
TLI_DEFINE_ENUM_INTERNAL(open)
TLI_DEFINE_STRING_INTERNAL("open")
/// int open64(const char *filename, int flags[, mode_t mode])
TLI_DEFINE_ENUM_INTERNAL(open64)
TLI_DEFINE_STRING_INTERNAL("open64")
/// DIR *opendir(const char *dirname);
TLI_DEFINE_ENUM_INTERNAL(opendir)
TLI_DEFINE_STRING_INTERNAL("opendir")
/// int pclose(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(pclose)
TLI_DEFINE_STRING_INTERNAL("pclose")
/// void perror(const char *s);
TLI_DEFINE_ENUM_INTERNAL(perror)
TLI_DEFINE_STRING_INTERNAL("perror")
/// FILE *popen(const char *command, const char *mode);
TLI_DEFINE_ENUM_INTERNAL(popen)
TLI_DEFINE_STRING_INTERNAL("popen")
/// int posix_memalign(void **memptr, size_t alignment, size_t size);
TLI_DEFINE_ENUM_INTERNAL(posix_memalign)
TLI_DEFINE_STRING_INTERNAL("posix_memalign")
/// double pow(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(pow)
TLI_DEFINE_STRING_INTERNAL("pow")
/// float powf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(powf)
TLI_DEFINE_STRING_INTERNAL("powf")
/// long double powl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(powl)
TLI_DEFINE_STRING_INTERNAL("powl")
/// ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset);
TLI_DEFINE_ENUM_INTERNAL(pread)
TLI_DEFINE_STRING_INTERNAL("pread")
/// int printf(const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(printf)
TLI_DEFINE_STRING_INTERNAL("printf")
/// int putc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(putc)
TLI_DEFINE_STRING_INTERNAL("putc")
/// int putchar(int c);
TLI_DEFINE_ENUM_INTERNAL(putchar)
TLI_DEFINE_STRING_INTERNAL("putchar")
/// int puts(const char *s);
TLI_DEFINE_ENUM_INTERNAL(puts)
TLI_DEFINE_STRING_INTERNAL("puts")
/// ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
TLI_DEFINE_ENUM_INTERNAL(pwrite)
TLI_DEFINE_STRING_INTERNAL("pwrite")
/// void qsort(void *base, size_t nel, size_t width,
/// int (*compar)(const void *, const void *));
TLI_DEFINE_ENUM_INTERNAL(qsort)
TLI_DEFINE_STRING_INTERNAL("qsort")
/// ssize_t read(int fildes, void *buf, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(read)
TLI_DEFINE_STRING_INTERNAL("read")
/// ssize_t readlink(const char *path, char *buf, size_t bufsize);
TLI_DEFINE_ENUM_INTERNAL(readlink)
TLI_DEFINE_STRING_INTERNAL("readlink")
/// void *realloc(void *ptr, size_t size);
TLI_DEFINE_ENUM_INTERNAL(realloc)
TLI_DEFINE_STRING_INTERNAL("realloc")
/// void *reallocf(void *ptr, size_t size);
TLI_DEFINE_ENUM_INTERNAL(reallocf)
TLI_DEFINE_STRING_INTERNAL("reallocf")
/// char *realpath(const char *file_name, char *resolved_name);
TLI_DEFINE_ENUM_INTERNAL(realpath)
TLI_DEFINE_STRING_INTERNAL("realpath")
/// int remove(const char *path);
TLI_DEFINE_ENUM_INTERNAL(remove)
TLI_DEFINE_STRING_INTERNAL("remove")
/// int rename(const char *old, const char *new);
TLI_DEFINE_ENUM_INTERNAL(rename)
TLI_DEFINE_STRING_INTERNAL("rename")
/// void rewind(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(rewind)
TLI_DEFINE_STRING_INTERNAL("rewind")
/// double rint(double x);
TLI_DEFINE_ENUM_INTERNAL(rint)
TLI_DEFINE_STRING_INTERNAL("rint")
/// float rintf(float x);
TLI_DEFINE_ENUM_INTERNAL(rintf)
TLI_DEFINE_STRING_INTERNAL("rintf")
/// long double rintl(long double x);
TLI_DEFINE_ENUM_INTERNAL(rintl)
TLI_DEFINE_STRING_INTERNAL("rintl")
/// int rmdir(const char *path);
TLI_DEFINE_ENUM_INTERNAL(rmdir)
TLI_DEFINE_STRING_INTERNAL("rmdir")
/// double round(double x);
TLI_DEFINE_ENUM_INTERNAL(round)
TLI_DEFINE_STRING_INTERNAL("round")
/// float roundf(float x);
TLI_DEFINE_ENUM_INTERNAL(roundf)
TLI_DEFINE_STRING_INTERNAL("roundf")
/// long double roundl(long double x);
TLI_DEFINE_ENUM_INTERNAL(roundl)
TLI_DEFINE_STRING_INTERNAL("roundl")
/// int scanf(const char *restrict format, ... );
TLI_DEFINE_ENUM_INTERNAL(scanf)
TLI_DEFINE_STRING_INTERNAL("scanf")
/// void setbuf(FILE *stream, char *buf);
TLI_DEFINE_ENUM_INTERNAL(setbuf)
TLI_DEFINE_STRING_INTERNAL("setbuf")
/// int setitimer(int which, const struct itimerval *value,
/// struct itimerval *ovalue);
TLI_DEFINE_ENUM_INTERNAL(setitimer)
TLI_DEFINE_STRING_INTERNAL("setitimer")
/// int setvbuf(FILE *stream, char *buf, int type, size_t size);
TLI_DEFINE_ENUM_INTERNAL(setvbuf)
TLI_DEFINE_STRING_INTERNAL("setvbuf")
/// double sin(double x);
TLI_DEFINE_ENUM_INTERNAL(sin)
TLI_DEFINE_STRING_INTERNAL("sin")
/// float sinf(float x);
TLI_DEFINE_ENUM_INTERNAL(sinf)
TLI_DEFINE_STRING_INTERNAL("sinf")
/// double sinh(double x);
TLI_DEFINE_ENUM_INTERNAL(sinh)
TLI_DEFINE_STRING_INTERNAL("sinh")
/// float sinhf(float x);
TLI_DEFINE_ENUM_INTERNAL(sinhf)
TLI_DEFINE_STRING_INTERNAL("sinhf")
/// long double sinhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(sinhl)
TLI_DEFINE_STRING_INTERNAL("sinhl")
/// long double sinl(long double x);
TLI_DEFINE_ENUM_INTERNAL(sinl)
TLI_DEFINE_STRING_INTERNAL("sinl")
/// int siprintf(char *str, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(siprintf)
TLI_DEFINE_STRING_INTERNAL("siprintf")
/// int snprintf(char *s, size_t n, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(snprintf)
TLI_DEFINE_STRING_INTERNAL("snprintf")
/// int sprintf(char *str, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(sprintf)
TLI_DEFINE_STRING_INTERNAL("sprintf")
/// double sqrt(double x);
TLI_DEFINE_ENUM_INTERNAL(sqrt)
TLI_DEFINE_STRING_INTERNAL("sqrt")
/// float sqrtf(float x);
TLI_DEFINE_ENUM_INTERNAL(sqrtf)
TLI_DEFINE_STRING_INTERNAL("sqrtf")
/// long double sqrtl(long double x);
TLI_DEFINE_ENUM_INTERNAL(sqrtl)
TLI_DEFINE_STRING_INTERNAL("sqrtl")
/// int sscanf(const char *s, const char *format, ... );
TLI_DEFINE_ENUM_INTERNAL(sscanf)
TLI_DEFINE_STRING_INTERNAL("sscanf")
/// int stat(const char *path, struct stat *buf);
TLI_DEFINE_ENUM_INTERNAL(stat)
TLI_DEFINE_STRING_INTERNAL("stat")
/// int stat64(const char *path, struct stat64 *buf);
TLI_DEFINE_ENUM_INTERNAL(stat64)
TLI_DEFINE_STRING_INTERNAL("stat64")
/// int statvfs(const char *path, struct statvfs *buf);
TLI_DEFINE_ENUM_INTERNAL(statvfs)
TLI_DEFINE_STRING_INTERNAL("statvfs")
/// int statvfs64(const char *path, struct statvfs64 *buf)
TLI_DEFINE_ENUM_INTERNAL(statvfs64)
TLI_DEFINE_STRING_INTERNAL("statvfs64")
/// char *stpcpy(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(stpcpy)
TLI_DEFINE_STRING_INTERNAL("stpcpy")
/// char *stpncpy(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(stpncpy)
TLI_DEFINE_STRING_INTERNAL("stpncpy")
/// int strcasecmp(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcasecmp)
TLI_DEFINE_STRING_INTERNAL("strcasecmp")
/// char *strcat(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcat)
TLI_DEFINE_STRING_INTERNAL("strcat")
/// char *strchr(const char *s, int c);
TLI_DEFINE_ENUM_INTERNAL(strchr)
TLI_DEFINE_STRING_INTERNAL("strchr")
/// int strcmp(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcmp)
TLI_DEFINE_STRING_INTERNAL("strcmp")
/// int strcoll(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcoll)
TLI_DEFINE_STRING_INTERNAL("strcoll")
/// char *strcpy(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcpy)
TLI_DEFINE_STRING_INTERNAL("strcpy")
/// size_t strcspn(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcspn)
TLI_DEFINE_STRING_INTERNAL("strcspn")
/// char *strdup(const char *s1);
TLI_DEFINE_ENUM_INTERNAL(strdup)
TLI_DEFINE_STRING_INTERNAL("strdup")
/// size_t strlen(const char *s);
TLI_DEFINE_ENUM_INTERNAL(strlen)
TLI_DEFINE_STRING_INTERNAL("strlen")
/// int strncasecmp(const char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncasecmp)
TLI_DEFINE_STRING_INTERNAL("strncasecmp")
/// char *strncat(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncat)
TLI_DEFINE_STRING_INTERNAL("strncat")
/// int strncmp(const char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncmp)
TLI_DEFINE_STRING_INTERNAL("strncmp")
/// char *strncpy(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncpy)
TLI_DEFINE_STRING_INTERNAL("strncpy")
/// char *strndup(const char *s1, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strndup)
TLI_DEFINE_STRING_INTERNAL("strndup")
/// size_t strnlen(const char *s, size_t maxlen);
TLI_DEFINE_ENUM_INTERNAL(strnlen)
TLI_DEFINE_STRING_INTERNAL("strnlen")
/// char *strpbrk(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strpbrk)
TLI_DEFINE_STRING_INTERNAL("strpbrk")
/// char *strrchr(const char *s, int c);
TLI_DEFINE_ENUM_INTERNAL(strrchr)
TLI_DEFINE_STRING_INTERNAL("strrchr")
/// size_t strspn(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strspn)
TLI_DEFINE_STRING_INTERNAL("strspn")
/// char *strstr(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strstr)
TLI_DEFINE_STRING_INTERNAL("strstr")
/// double strtod(const char *nptr, char **endptr);
TLI_DEFINE_ENUM_INTERNAL(strtod)
TLI_DEFINE_STRING_INTERNAL("strtod")
/// float strtof(const char *nptr, char **endptr);
TLI_DEFINE_ENUM_INTERNAL(strtof)
TLI_DEFINE_STRING_INTERNAL("strtof")
// char *strtok(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strtok)
TLI_DEFINE_STRING_INTERNAL("strtok")
// char *strtok_r(char *s, const char *sep, char **lasts);
TLI_DEFINE_ENUM_INTERNAL(strtok_r)
TLI_DEFINE_STRING_INTERNAL("strtok_r")
/// long int strtol(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtol)
TLI_DEFINE_STRING_INTERNAL("strtol")
/// long double strtold(const char *nptr, char **endptr);
TLI_DEFINE_ENUM_INTERNAL(strtold)
TLI_DEFINE_STRING_INTERNAL("strtold")
/// long long int strtoll(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtoll)
TLI_DEFINE_STRING_INTERNAL("strtoll")
/// unsigned long int strtoul(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtoul)
TLI_DEFINE_STRING_INTERNAL("strtoul")
/// unsigned long long int strtoull(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtoull)
TLI_DEFINE_STRING_INTERNAL("strtoull")
/// size_t strxfrm(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strxfrm)
TLI_DEFINE_STRING_INTERNAL("strxfrm")
/// int system(const char *command);
TLI_DEFINE_ENUM_INTERNAL(system)
TLI_DEFINE_STRING_INTERNAL("system")
/// double tan(double x);
TLI_DEFINE_ENUM_INTERNAL(tan)
TLI_DEFINE_STRING_INTERNAL("tan")
/// float tanf(float x);
TLI_DEFINE_ENUM_INTERNAL(tanf)
TLI_DEFINE_STRING_INTERNAL("tanf")
/// double tanh(double x);
TLI_DEFINE_ENUM_INTERNAL(tanh)
TLI_DEFINE_STRING_INTERNAL("tanh")
/// float tanhf(float x);
TLI_DEFINE_ENUM_INTERNAL(tanhf)
TLI_DEFINE_STRING_INTERNAL("tanhf")
/// long double tanhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(tanhl)
TLI_DEFINE_STRING_INTERNAL("tanhl")
/// long double tanl(long double x);
TLI_DEFINE_ENUM_INTERNAL(tanl)
TLI_DEFINE_STRING_INTERNAL("tanl")
/// clock_t times(struct tms *buffer);
TLI_DEFINE_ENUM_INTERNAL(times)
TLI_DEFINE_STRING_INTERNAL("times")
/// FILE *tmpfile(void);
TLI_DEFINE_ENUM_INTERNAL(tmpfile)
TLI_DEFINE_STRING_INTERNAL("tmpfile")
#if 0 // HLSL Change Starts - Exclude potentially duplicate 64bit versions
/// FILE *tmpfile64(void)
TLI_DEFINE_ENUM_INTERNAL(tmpfile64)
TLI_DEFINE_STRING_INTERNAL("tmpfile64")
#endif // HLSL Change Ends
/// int toascii(int c);
TLI_DEFINE_ENUM_INTERNAL(toascii)
TLI_DEFINE_STRING_INTERNAL("toascii")
/// double trunc(double x);
TLI_DEFINE_ENUM_INTERNAL(trunc)
TLI_DEFINE_STRING_INTERNAL("trunc")
/// float truncf(float x);
TLI_DEFINE_ENUM_INTERNAL(truncf)
TLI_DEFINE_STRING_INTERNAL("truncf")
/// long double truncl(long double x);
TLI_DEFINE_ENUM_INTERNAL(truncl)
TLI_DEFINE_STRING_INTERNAL("truncl")
/// int uname(struct utsname *name);
TLI_DEFINE_ENUM_INTERNAL(uname)
TLI_DEFINE_STRING_INTERNAL("uname")
/// int ungetc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ungetc)
TLI_DEFINE_STRING_INTERNAL("ungetc")
/// int unlink(const char *path);
TLI_DEFINE_ENUM_INTERNAL(unlink)
TLI_DEFINE_STRING_INTERNAL("unlink")
/// int unsetenv(const char *name);
TLI_DEFINE_ENUM_INTERNAL(unsetenv)
TLI_DEFINE_STRING_INTERNAL("unsetenv")
/// int utime(const char *path, const struct utimbuf *times);
TLI_DEFINE_ENUM_INTERNAL(utime)
TLI_DEFINE_STRING_INTERNAL("utime")
/// int utimes(const char *path, const struct timeval times[2]);
TLI_DEFINE_ENUM_INTERNAL(utimes)
TLI_DEFINE_STRING_INTERNAL("utimes")
/// void *valloc(size_t size);
TLI_DEFINE_ENUM_INTERNAL(valloc)
TLI_DEFINE_STRING_INTERNAL("valloc")
/// int vfprintf(FILE *stream, const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vfprintf)
TLI_DEFINE_STRING_INTERNAL("vfprintf")
/// int vfscanf(FILE *stream, const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vfscanf)
TLI_DEFINE_STRING_INTERNAL("vfscanf")
/// int vprintf(const char *restrict format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vprintf)
TLI_DEFINE_STRING_INTERNAL("vprintf")
/// int vscanf(const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vscanf)
TLI_DEFINE_STRING_INTERNAL("vscanf")
/// int vsnprintf(char *s, size_t n, const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vsnprintf)
TLI_DEFINE_STRING_INTERNAL("vsnprintf")
/// int vsprintf(char *s, const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vsprintf)
TLI_DEFINE_STRING_INTERNAL("vsprintf")
/// int vsscanf(const char *s, const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vsscanf)
TLI_DEFINE_STRING_INTERNAL("vsscanf")
/// ssize_t write(int fildes, const void *buf, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(write)
TLI_DEFINE_STRING_INTERNAL("write")
#undef TLI_DEFINE_ENUM_INTERNAL
#undef TLI_DEFINE_STRING_INTERNAL
#endif // One of TLI_DEFINE_ENUM/STRING are defined.
#undef TLI_DEFINE_ENUM
#undef TLI_DEFINE_STRING
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/BlockFrequencyInfo.h | //===- BlockFrequencyInfo.h - Block Frequency Analysis ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
#define LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
#include "llvm/Pass.h"
#include "llvm/Support/BlockFrequency.h"
#include <climits>
namespace llvm {
class BranchProbabilityInfo;
template <class BlockT> class BlockFrequencyInfoImpl;
/// BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to
/// estimate IR basic block frequencies.
class BlockFrequencyInfo : public FunctionPass {
typedef BlockFrequencyInfoImpl<BasicBlock> ImplType;
std::unique_ptr<ImplType> BFI;
public:
static char ID;
BlockFrequencyInfo();
~BlockFrequencyInfo() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void print(raw_ostream &O, const Module *M) const override;
const Function *getFunction() const;
void view() const;
/// getblockFreq - Return block frequency. Return 0 if we don't have the
/// information. Please note that initial frequency is equal to ENTRY_FREQ. It
/// means that we should not rely on the value itself, but only on the
/// comparison to the other block frequencies. We do this to avoid using of
/// floating points.
BlockFrequency getBlockFreq(const BasicBlock *BB) const;
// Print the block frequency Freq to OS using the current functions entry
// frequency to convert freq into a relative decimal form.
raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
// Convenience method that attempts to look up the frequency associated with
// BB and print it to OS.
raw_ostream &printBlockFreq(raw_ostream &OS, const BasicBlock *BB) const;
uint64_t getEntryFreq() const;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/SparsePropagation.h | //===- SparsePropagation.h - Sparse Conditional Property Propagation ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an abstract sparse conditional propagation algorithm,
// modeled after SCCP, but with a customizable lattice function.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_SPARSEPROPAGATION_H
#define LLVM_ANALYSIS_SPARSEPROPAGATION_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <set>
#include <vector>
namespace llvm {
class Value;
class Constant;
class Argument;
class Instruction;
class PHINode;
class TerminatorInst;
class BasicBlock;
class Function;
class SparseSolver;
class raw_ostream;
template<typename T> class SmallVectorImpl;
/// AbstractLatticeFunction - This class is implemented by the dataflow instance
/// to specify what the lattice values are and how they handle merges etc.
/// This gives the client the power to compute lattice values from instructions,
/// constants, etc. The requirement is that lattice values must all fit into
/// a void*. If a void* is not sufficient, the implementation should use this
/// pointer to be a pointer into a uniquing set or something.
///
class AbstractLatticeFunction {
public:
typedef void *LatticeVal;
private:
LatticeVal UndefVal, OverdefinedVal, UntrackedVal;
public:
AbstractLatticeFunction(LatticeVal undefVal, LatticeVal overdefinedVal,
LatticeVal untrackedVal) {
UndefVal = undefVal;
OverdefinedVal = overdefinedVal;
UntrackedVal = untrackedVal;
}
virtual ~AbstractLatticeFunction();
LatticeVal getUndefVal() const { return UndefVal; }
LatticeVal getOverdefinedVal() const { return OverdefinedVal; }
LatticeVal getUntrackedVal() const { return UntrackedVal; }
/// IsUntrackedValue - If the specified Value is something that is obviously
/// uninteresting to the analysis (and would always return UntrackedVal),
/// this function can return true to avoid pointless work.
virtual bool IsUntrackedValue(Value *V) {
return false;
}
/// ComputeConstant - Given a constant value, compute and return a lattice
/// value corresponding to the specified constant.
virtual LatticeVal ComputeConstant(Constant *C) {
return getOverdefinedVal(); // always safe
}
/// IsSpecialCasedPHI - Given a PHI node, determine whether this PHI node is
/// one that the we want to handle through ComputeInstructionState.
virtual bool IsSpecialCasedPHI(PHINode *PN) {
return false;
}
/// GetConstant - If the specified lattice value is representable as an LLVM
/// constant value, return it. Otherwise return null. The returned value
/// must be in the same LLVM type as Val.
virtual Constant *GetConstant(LatticeVal LV, Value *Val, SparseSolver &SS) {
return nullptr;
}
/// ComputeArgument - Given a formal argument value, compute and return a
/// lattice value corresponding to the specified argument.
virtual LatticeVal ComputeArgument(Argument *I) {
return getOverdefinedVal(); // always safe
}
/// MergeValues - Compute and return the merge of the two specified lattice
/// values. Merging should only move one direction down the lattice to
/// guarantee convergence (toward overdefined).
virtual LatticeVal MergeValues(LatticeVal X, LatticeVal Y) {
return getOverdefinedVal(); // always safe, never useful.
}
/// ComputeInstructionState - Given an instruction and a vector of its operand
/// values, compute the result value of the instruction.
virtual LatticeVal ComputeInstructionState(Instruction &I, SparseSolver &SS) {
return getOverdefinedVal(); // always safe, never useful.
}
/// PrintValue - Render the specified lattice value to the specified stream.
virtual void PrintValue(LatticeVal V, raw_ostream &OS);
};
/// SparseSolver - This class is a general purpose solver for Sparse Conditional
/// Propagation with a programmable lattice function.
///
class SparseSolver {
typedef AbstractLatticeFunction::LatticeVal LatticeVal;
/// LatticeFunc - This is the object that knows the lattice and how to do
/// compute transfer functions.
AbstractLatticeFunction *LatticeFunc;
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
SmallPtrSet<BasicBlock*, 16> BBExecutable; // The bbs that are executable.
std::vector<Instruction*> InstWorkList; // Worklist of insts to process.
std::vector<BasicBlock*> BBWorkList; // The BasicBlock work list
/// KnownFeasibleEdges - Entries in this set are edges which have already had
/// PHI nodes retriggered.
typedef std::pair<BasicBlock*,BasicBlock*> Edge;
std::set<Edge> KnownFeasibleEdges;
SparseSolver(const SparseSolver&) = delete;
void operator=(const SparseSolver&) = delete;
public:
explicit SparseSolver(AbstractLatticeFunction *Lattice)
: LatticeFunc(Lattice) {}
~SparseSolver() {
delete LatticeFunc;
}
/// Solve - Solve for constants and executable blocks.
///
void Solve(Function &F);
void Print(Function &F, raw_ostream &OS) const;
/// getLatticeState - Return the LatticeVal object that corresponds to the
/// value. If an value is not in the map, it is returned as untracked,
/// unlike the getOrInitValueState method.
LatticeVal getLatticeState(Value *V) const {
DenseMap<Value*, LatticeVal>::const_iterator I = ValueState.find(V);
return I != ValueState.end() ? I->second : LatticeFunc->getUntrackedVal();
}
/// getOrInitValueState - Return the LatticeVal object that corresponds to the
/// value, initializing the value's state if it hasn't been entered into the
/// map yet. This function is necessary because not all values should start
/// out in the underdefined state... Arguments should be overdefined, and
/// constants should be marked as constants.
///
LatticeVal getOrInitValueState(Value *V);
/// isEdgeFeasible - Return true if the control flow edge from the 'From'
/// basic block to the 'To' basic block is currently feasible. If
/// AggressiveUndef is true, then this treats values with unknown lattice
/// values as undefined. This is generally only useful when solving the
/// lattice, not when querying it.
bool isEdgeFeasible(BasicBlock *From, BasicBlock *To,
bool AggressiveUndef = false);
/// isBlockExecutable - Return true if there are any known feasible
/// edges into the basic block. This is generally only useful when
/// querying the lattice.
bool isBlockExecutable(BasicBlock *BB) const {
return BBExecutable.count(BB);
}
private:
/// UpdateState - When the state for some instruction is potentially updated,
/// this function notices and adds I to the worklist if needed.
void UpdateState(Instruction &Inst, LatticeVal V);
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
void MarkBlockExecutable(BasicBlock *BB);
/// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
/// work list if it is not already executable.
void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
/// getFeasibleSuccessors - Return a vector of booleans to indicate which
/// successors are reachable from a given terminator instruction.
void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs,
bool AggressiveUndef);
void visitInst(Instruction &I);
void visitPHINode(PHINode &I);
void visitTerminatorInst(TerminatorInst &TI);
};
} // end namespace llvm
#endif // LLVM_ANALYSIS_SPARSEPROPAGATION_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LazyValueInfo.h | //===- LazyValueInfo.h - Value constraint analysis --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for lazy computation of value constraint
// information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LAZYVALUEINFO_H
#define LLVM_ANALYSIS_LAZYVALUEINFO_H
#include "llvm/Pass.h"
namespace llvm {
class AssumptionCache;
class Constant;
class DataLayout;
class DominatorTree;
class Instruction;
class TargetLibraryInfo;
class Value;
/// This pass computes, caches, and vends lazy value constraint information.
class LazyValueInfo : public FunctionPass {
AssumptionCache *AC;
class TargetLibraryInfo *TLI;
DominatorTree *DT;
void *PImpl;
LazyValueInfo(const LazyValueInfo&) = delete;
void operator=(const LazyValueInfo&) = delete;
public:
static char ID;
LazyValueInfo() : FunctionPass(ID), PImpl(nullptr) {
initializeLazyValueInfoPass(*PassRegistry::getPassRegistry());
}
~LazyValueInfo() override { assert(!PImpl && "releaseMemory not called"); }
/// This is used to return true/false/dunno results.
enum Tristate {
Unknown = -1, False = 0, True = 1
};
// Public query interface.
/// Determine whether the specified value comparison with a constant is known
/// to be true or false on the specified CFG edge.
/// Pred is a CmpInst predicate.
Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI = nullptr);
/// Determine whether the specified value comparison with a constant is known
/// to be true or false at the specified instruction
/// (from an assume intrinsic). Pred is a CmpInst predicate.
Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI);
/// Determine whether the specified value is known to be a
/// constant at the end of the specified block. Return null if not.
Constant *getConstant(Value *V, BasicBlock *BB, Instruction *CxtI = nullptr);
/// Determine whether the specified value is known to be a
/// constant on the specified edge. Return null if not.
Constant *getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI = nullptr);
/// Inform the analysis cache that we have threaded an edge from
/// PredBB to OldSucc to be from PredBB to NewSucc instead.
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc);
/// Inform the analysis cache that we have erased a block.
void eraseBlock(BasicBlock *BB);
// Implementation boilerplate.
void getAnalysisUsage(AnalysisUsage &AU) const override;
void releaseMemory() override;
bool runOnFunction(Function &F) override;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/IteratedDominanceFrontier.h | //===- IteratedDominanceFrontier.h - Calculate IDF --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \brief Compute iterated dominance frontiers using a linear time algorithm.
///
/// The algorithm used here is based on:
///
/// Sreedhar and Gao. A linear time algorithm for placing phi-nodes.
/// In Proceedings of the 22nd ACM SIGPLAN-SIGACT Symposium on Principles of
/// Programming Languages
/// POPL '95. ACM, New York, NY, 62-73.
///
/// It has been modified to not explicitly use the DJ graph data structure and
/// to directly compute pruned SSA using per-variable liveness information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_IDF_H
#define LLVM_ANALYSIS_IDF_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
class BasicBlock;
template <class T> class DomTreeNodeBase;
typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
class DominatorTree;
/// \brief Determine the iterated dominance frontier, given a set of defining
/// blocks, and optionally, a set of live-in blocks.
///
/// In turn, the results can be used to place phi nodes.
///
/// This algorithm is a linear time computation of Iterated Dominance Frontiers,
/// pruned using the live-in set.
/// By default, liveness is not used to prune the IDF computation.
class IDFCalculator {
public:
IDFCalculator(DominatorTree &DT) : DT(DT), useLiveIn(false) {}
/// \brief Give the IDF calculator the set of blocks in which the value is
/// defined. This is equivalent to the set of starting blocks it should be
/// calculating the IDF for (though later gets pruned based on liveness).
///
/// Note: This set *must* live for the entire lifetime of the IDF calculator.
void setDefiningBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
DefBlocks = &Blocks;
}
/// \brief Give the IDF calculator the set of blocks in which the value is
/// live on entry to the block. This is used to prune the IDF calculation to
/// not include blocks where any phi insertion would be dead.
///
/// Note: This set *must* live for the entire lifetime of the IDF calculator.
void setLiveInBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
LiveInBlocks = &Blocks;
useLiveIn = true;
}
/// \brief Reset the live-in block set to be empty, and tell the IDF
/// calculator to not use liveness anymore.
void resetLiveInBlocks() {
LiveInBlocks = nullptr;
useLiveIn = false;
}
/// \brief Calculate iterated dominance frontiers
///
/// This uses the linear-time phi algorithm based on DJ-graphs mentioned in
/// the file-level comment. It performs DF->IDF pruning using the live-in
/// set, to avoid computing the IDF for blocks where an inserted PHI node
/// would be dead.
void calculate(SmallVectorImpl<BasicBlock *> &IDFBlocks);
private:
DominatorTree &DT;
bool useLiveIn;
DenseMap<DomTreeNode *, unsigned> DomLevels;
const SmallPtrSetImpl<BasicBlock *> *LiveInBlocks;
const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
SmallVector<BasicBlock *, 32> PHIBlocks;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/InlineCost.h | //===- InlineCost.h - Cost analysis for inliner -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements heuristics for inlining decisions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_INLINECOST_H
#define LLVM_ANALYSIS_INLINECOST_H
#include "llvm/Analysis/CallGraphSCCPass.h"
#include <cassert>
#include <climits>
namespace llvm {
class AssumptionCacheTracker;
class CallSite;
class DataLayout;
class Function;
class TargetTransformInfoWrapperPass;
namespace InlineConstants {
// Various magic constants used to adjust heuristics.
const int InstrCost = 5;
const int IndirectCallThreshold = 100;
const int CallPenalty = 25;
const int LastCallToStaticBonus = -15000;
const int ColdccPenalty = 2000;
const int NoreturnPenalty = 10000;
/// Do not inline functions which allocate this many bytes on the stack
/// when the caller is recursive.
const unsigned TotalAllocaSizeRecursiveCaller = 1024;
}
/// \brief Represents the cost of inlining a function.
///
/// This supports special values for functions which should "always" or
/// "never" be inlined. Otherwise, the cost represents a unitless amount;
/// smaller values increase the likelihood of the function being inlined.
///
/// Objects of this type also provide the adjusted threshold for inlining
/// based on the information available for a particular callsite. They can be
/// directly tested to determine if inlining should occur given the cost and
/// threshold for this cost metric.
class InlineCost {
enum SentinelValues {
AlwaysInlineCost = INT_MIN,
NeverInlineCost = INT_MAX
};
/// \brief The estimated cost of inlining this callsite.
const int Cost;
/// \brief The adjusted threshold against which this cost was computed.
const int Threshold;
// Trivial constructor, interesting logic in the factory functions below.
InlineCost(int Cost, int Threshold) : Cost(Cost), Threshold(Threshold) {}
public:
static InlineCost get(int Cost, int Threshold) {
assert(Cost > AlwaysInlineCost && "Cost crosses sentinel value");
assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
return InlineCost(Cost, Threshold);
}
static InlineCost getAlways() {
return InlineCost(AlwaysInlineCost, 0);
}
static InlineCost getNever() {
return InlineCost(NeverInlineCost, 0);
}
/// \brief Test whether the inline cost is low enough for inlining.
explicit operator bool() const {
return Cost < Threshold;
}
bool isAlways() const { return Cost == AlwaysInlineCost; }
bool isNever() const { return Cost == NeverInlineCost; }
bool isVariable() const { return !isAlways() && !isNever(); }
/// \brief Get the inline cost estimate.
/// It is an error to call this on an "always" or "never" InlineCost.
int getCost() const {
assert(isVariable() && "Invalid access of InlineCost");
return Cost;
}
/// \brief Get the cost delta from the threshold for inlining.
/// Only valid if the cost is of the variable kind. Returns a negative
/// value if the cost is too high to inline.
int getCostDelta() const { return Threshold - getCost(); }
};
/// \brief Cost analyzer used by inliner.
class InlineCostAnalysis : public CallGraphSCCPass {
TargetTransformInfoWrapperPass *TTIWP;
AssumptionCacheTracker *ACT;
public:
static char ID;
InlineCostAnalysis();
~InlineCostAnalysis() override;
// Pass interface implementation.
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnSCC(CallGraphSCC &SCC) override;
/// \brief Get an InlineCost object representing the cost of inlining this
/// callsite.
///
/// Note that threshold is passed into this function. Only costs below the
/// threshold are computed with any accuracy. The threshold can be used to
/// bound the computation necessary to determine whether the cost is
/// sufficiently low to warrant inlining.
///
/// Also note that calling this function *dynamically* computes the cost of
/// inlining the callsite. It is an expensive, heavyweight call.
InlineCost getInlineCost(CallSite CS, int Threshold);
/// \brief Get an InlineCost with the callee explicitly specified.
/// This allows you to calculate the cost of inlining a function via a
/// pointer. This behaves exactly as the version with no explicit callee
/// parameter in all other respects.
//
// Note: This is used by out-of-tree passes, please do not remove without
// adding a replacement API.
InlineCost getInlineCost(CallSite CS, Function *Callee, int Threshold);
/// \brief Minimal filter to detect invalid constructs for inlining.
bool isInlineViable(Function &Callee);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LoopInfo.h | //===- llvm/Analysis/LoopInfo.h - Natural Loop Calculator -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the LoopInfo class that is used to identify natural loops
// and determine the loop depth of various nodes of the CFG. A natural loop
// has exactly one entry-point, which is called the header. Note that natural
// loops may actually be several loops that share the same header node.
//
// This analysis calculates the nesting structure of loops in a function. For
// each natural loop identified, this analysis identifies natural loops
// contained entirely within the loop and the basic blocks the make up the loop.
//
// It can calculate on the fly various bits of information, for example:
//
// * whether there is a preheader for the loop
// * the number of back edges to the header
// * whether or not a particular block branches out of the loop
// * the successor blocks of the loop
// * the loop depth
// * etc...
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPINFO_H
#define LLVM_ANALYSIS_LOOPINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Pass.h"
#include <algorithm>
namespace llvm {
// FIXME: Replace this brittle forward declaration with the include of the new
// PassManager.h when doing so doesn't break the PassManagerBuilder.
template <typename IRUnitT> class AnalysisManager;
class PreservedAnalyses;
class DominatorTree;
class LoopInfo;
class Loop;
class MDNode;
class PHINode;
class raw_ostream;
template<class N> class DominatorTreeBase;
template<class N, class M> class LoopInfoBase;
template<class N, class M> class LoopBase;
//===----------------------------------------------------------------------===//
/// LoopBase class - Instances of this class are used to represent loops that
/// are detected in the flow graph
///
template<class BlockT, class LoopT>
class LoopBase {
LoopT *ParentLoop;
// SubLoops - Loops contained entirely within this one.
std::vector<LoopT *> SubLoops;
// Blocks - The list of blocks in this loop. First entry is the header node.
std::vector<BlockT*> Blocks;
SmallPtrSet<const BlockT*, 8> DenseBlockSet;
LoopBase(const LoopBase<BlockT, LoopT> &) = delete;
const LoopBase<BlockT, LoopT>&
operator=(const LoopBase<BlockT, LoopT> &) = delete;
public:
/// Loop ctor - This creates an empty loop.
LoopBase() : ParentLoop(nullptr) {}
~LoopBase() {
for (size_t i = 0, e = SubLoops.size(); i != e; ++i)
delete SubLoops[i];
}
/// getLoopDepth - Return the nesting level of this loop. An outer-most
/// loop has depth 1, for consistency with loop depth values used for basic
/// blocks, where depth 0 is used for blocks not inside any loops.
unsigned getLoopDepth() const {
unsigned D = 1;
for (const LoopT *CurLoop = ParentLoop; CurLoop;
CurLoop = CurLoop->ParentLoop)
++D;
return D;
}
BlockT *getHeader() const { return Blocks.front(); }
LoopT *getParentLoop() const { return ParentLoop; }
/// setParentLoop is a raw interface for bypassing addChildLoop.
void setParentLoop(LoopT *L) { ParentLoop = L; }
/// contains - Return true if the specified loop is contained within in
/// this loop.
///
bool contains(const LoopT *L) const {
if (L == this) return true;
if (!L) return false;
#pragma warning(push)
#pragma warning(suppress: 6011) // HLSL Change - suppress incorrect warning about null dereference
return contains(L->getParentLoop());
#pragma warning(pop)
}
/// contains - Return true if the specified basic block is in this loop.
///
bool contains(const BlockT *BB) const {
return DenseBlockSet.count(BB);
}
/// contains - Return true if the specified instruction is in this loop.
///
template<class InstT>
bool contains(const InstT *Inst) const {
return contains(Inst->getParent());
}
/// iterator/begin/end - Return the loops contained entirely within this loop.
///
const std::vector<LoopT *> &getSubLoops() const { return SubLoops; }
std::vector<LoopT *> &getSubLoopsVector() { return SubLoops; }
typedef typename std::vector<LoopT *>::const_iterator iterator;
typedef typename std::vector<LoopT *>::const_reverse_iterator
reverse_iterator;
iterator begin() const { return SubLoops.begin(); }
iterator end() const { return SubLoops.end(); }
reverse_iterator rbegin() const { return SubLoops.rbegin(); }
reverse_iterator rend() const { return SubLoops.rend(); }
bool empty() const { return SubLoops.empty(); }
/// getBlocks - Get a list of the basic blocks which make up this loop.
///
const std::vector<BlockT*> &getBlocks() const { return Blocks; }
typedef typename std::vector<BlockT*>::const_iterator block_iterator;
block_iterator block_begin() const { return Blocks.begin(); }
block_iterator block_end() const { return Blocks.end(); }
/// getNumBlocks - Get the number of blocks in this loop in constant time.
unsigned getNumBlocks() const {
return Blocks.size();
}
/// isLoopExiting - True if terminator in the block can branch to another
/// block that is outside of the current loop.
///
bool isLoopExiting(const BlockT *BB) const {
typedef GraphTraits<const BlockT*> BlockTraits;
for (typename BlockTraits::ChildIteratorType SI =
BlockTraits::child_begin(BB),
SE = BlockTraits::child_end(BB); SI != SE; ++SI) {
if (!contains(*SI))
return true;
}
return false;
}
/// getNumBackEdges - Calculate the number of back edges to the loop header
///
unsigned getNumBackEdges() const {
unsigned NumBackEdges = 0;
BlockT *H = getHeader();
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType I =
InvBlockTraits::child_begin(H),
E = InvBlockTraits::child_end(H); I != E; ++I)
if (contains(*I))
++NumBackEdges;
return NumBackEdges;
}
//===--------------------------------------------------------------------===//
// APIs for simple analysis of the loop.
//
// Note that all of these methods can fail on general loops (ie, there may not
// be a preheader, etc). For best success, the loop simplification and
// induction variable canonicalization pass should be used to normalize loops
// for easy analysis. These methods assume canonical loops.
/// getExitingBlocks - Return all blocks inside the loop that have successors
/// outside of the loop. These are the blocks _inside of the current loop_
/// which branch out. The returned list is always unique.
///
void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const;
/// getExitingBlock - If getExitingBlocks would return exactly one block,
/// return that block. Otherwise return null.
BlockT *getExitingBlock() const;
/// getExitBlocks - Return all of the successor blocks of this loop. These
/// are the blocks _outside of the current loop_ which are branched to.
///
void getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const;
/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
BlockT *getExitBlock() const;
/// Edge type.
typedef std::pair<const BlockT*, const BlockT*> Edge;
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
void getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const;
/// getLoopPreheader - If there is a preheader for this loop, return it. A
/// loop has a preheader if there is only one edge to the header of the loop
/// from outside of the loop. If this is the case, the block branching to the
/// header of the loop is the preheader node.
///
/// This method returns null if there is no preheader for the loop.
///
BlockT *getLoopPreheader() const;
/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
/// This is less strict that the loop "preheader" concept, which requires
/// the predecessor to have exactly one successor.
///
BlockT *getLoopPredecessor() const;
/// getLoopLatch - If there is a single latch block for this loop, return it.
/// A latch block is a block that contains a branch back to the header.
BlockT *getLoopLatch() const;
/// getLoopLatches - Return all loop latch blocks of this loop. A latch block
/// is a block that contains a branch back to the header.
void getLoopLatches(SmallVectorImpl<BlockT *> &LoopLatches) const {
BlockT *H = getHeader();
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType I =
InvBlockTraits::child_begin(H),
E = InvBlockTraits::child_end(H); I != E; ++I)
if (contains(*I))
LoopLatches.push_back(*I);
}
//===--------------------------------------------------------------------===//
// APIs for updating loop information after changing the CFG
//
/// addBasicBlockToLoop - This method is used by other analyses to update loop
/// information. NewBB is set to be a new member of the current loop.
/// Because of this, it is added as a member of all parent loops, and is added
/// to the specified LoopInfo object as being in the current basic block. It
/// is not valid to replace the loop header with this method.
///
void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LI);
/// replaceChildLoopWith - This is used when splitting loops up. It replaces
/// the OldChild entry in our children list with NewChild, and updates the
/// parent pointer of OldChild to be null and the NewChild to be this loop.
/// This updates the loop depth of the new child.
void replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild);
/// addChildLoop - Add the specified loop to be a child of this loop. This
/// updates the loop depth of the new child.
///
void addChildLoop(LoopT *NewChild) {
assert(!NewChild->ParentLoop && "NewChild already has a parent!");
NewChild->ParentLoop = static_cast<LoopT *>(this);
SubLoops.push_back(NewChild);
}
/// removeChildLoop - This removes the specified child from being a subloop of
/// this loop. The loop is not deleted, as it will presumably be inserted
/// into another loop.
LoopT *removeChildLoop(iterator I) {
assert(I != SubLoops.end() && "Cannot remove end iterator!");
LoopT *Child = *I;
assert(Child->ParentLoop == this && "Child is not a child of this loop!");
SubLoops.erase(SubLoops.begin()+(I-begin()));
Child->ParentLoop = nullptr;
return Child;
}
/// addBlockEntry - This adds a basic block directly to the basic block list.
/// This should only be used by transformations that create new loops. Other
/// transformations should use addBasicBlockToLoop.
void addBlockEntry(BlockT *BB) {
Blocks.push_back(BB);
DenseBlockSet.insert(BB);
}
/// reverseBlocks - interface to reverse Blocks[from, end of loop] in this loop
void reverseBlock(unsigned from) {
std::reverse(Blocks.begin() + from, Blocks.end());
}
/// reserveBlocks- interface to do reserve() for Blocks
void reserveBlocks(unsigned size) {
Blocks.reserve(size);
}
/// moveToHeader - This method is used to move BB (which must be part of this
/// loop) to be the loop header of the loop (the block that dominates all
/// others).
void moveToHeader(BlockT *BB) {
if (Blocks[0] == BB) return;
for (unsigned i = 0; ; ++i) {
assert(i != Blocks.size() && "Loop does not contain BB!");
if (Blocks[i] == BB) {
Blocks[i] = Blocks[0];
Blocks[0] = BB;
return;
}
}
}
/// removeBlockFromLoop - This removes the specified basic block from the
/// current loop, updating the Blocks as appropriate. This does not update
/// the mapping in the LoopInfo class.
void removeBlockFromLoop(BlockT *BB) {
auto I = std::find(Blocks.begin(), Blocks.end(), BB);
assert(I != Blocks.end() && "N is not in this list!");
Blocks.erase(I);
DenseBlockSet.erase(BB);
}
/// verifyLoop - Verify loop structure
void verifyLoop() const;
/// verifyLoop - Verify loop structure of this loop and all nested loops.
void verifyLoopNest(DenseSet<const LoopT*> *Loops) const;
void print(raw_ostream &OS, unsigned Depth = 0) const;
protected:
friend class LoopInfoBase<BlockT, LoopT>;
explicit LoopBase(BlockT *BB) : ParentLoop(nullptr) {
Blocks.push_back(BB);
DenseBlockSet.insert(BB);
}
};
template<class BlockT, class LoopT>
raw_ostream& operator<<(raw_ostream &OS, const LoopBase<BlockT, LoopT> &Loop) {
Loop.print(OS);
return OS;
}
// Implementation in LoopInfoImpl.h
extern template class LoopBase<BasicBlock, Loop>;
class Loop : public LoopBase<BasicBlock, Loop> {
public:
Loop() {}
/// isLoopInvariant - Return true if the specified value is loop invariant
///
bool isLoopInvariant(const Value *V) const;
/// hasLoopInvariantOperands - Return true if all the operands of the
/// specified instruction are loop invariant.
bool hasLoopInvariantOperands(const Instruction *I) const;
/// makeLoopInvariant - If the given value is an instruction inside of the
/// loop and it can be hoisted, do so to make it trivially loop-invariant.
/// Return true if the value after any hoisting is loop invariant. This
/// function can be used as a slightly more aggressive replacement for
/// isLoopInvariant.
///
/// If InsertPt is specified, it is the point to hoist instructions to.
/// If null, the terminator of the loop preheader is used.
///
bool makeLoopInvariant(Value *V, bool &Changed,
Instruction *InsertPt = nullptr) const;
/// makeLoopInvariant - If the given instruction is inside of the
/// loop and it can be hoisted, do so to make it trivially loop-invariant.
/// Return true if the instruction after any hoisting is loop invariant. This
/// function can be used as a slightly more aggressive replacement for
/// isLoopInvariant.
///
/// If InsertPt is specified, it is the point to hoist instructions to.
/// If null, the terminator of the loop preheader is used.
///
bool makeLoopInvariant(Instruction *I, bool &Changed,
Instruction *InsertPt = nullptr) const;
/// getCanonicalInductionVariable - Check to see if the loop has a canonical
/// induction variable: an integer recurrence that starts at 0 and increments
/// by one each time through the loop. If so, return the phi node that
/// corresponds to it.
///
/// The IndVarSimplify pass transforms loops to have a canonical induction
/// variable.
///
PHINode *getCanonicalInductionVariable() const;
/// isLCSSAForm - Return true if the Loop is in LCSSA form
bool isLCSSAForm(DominatorTree &DT) const;
/// isLoopSimplifyForm - Return true if the Loop is in the form that
/// the LoopSimplify form transforms loops to, which is sometimes called
/// normal form.
bool isLoopSimplifyForm() const;
/// isSafeToClone - Return true if the loop body is safe to clone in practice.
bool isSafeToClone() const;
/// Returns true if the loop is annotated parallel.
///
/// A parallel loop can be assumed to not contain any dependencies between
/// iterations by the compiler. That is, any loop-carried dependency checking
/// can be skipped completely when parallelizing the loop on the target
/// machine. Thus, if the parallel loop information originates from the
/// programmer, e.g. via the OpenMP parallel for pragma, it is the
/// programmer's responsibility to ensure there are no loop-carried
/// dependencies. The final execution order of the instructions across
/// iterations is not guaranteed, thus, the end result might or might not
/// implement actual concurrent execution of instructions across multiple
/// iterations.
bool isAnnotatedParallel() const;
/// Return the llvm.loop loop id metadata node for this loop if it is present.
///
/// If this loop contains the same llvm.loop metadata on each branch to the
/// header then the node is returned. If any latch instruction does not
/// contain llvm.loop or or if multiple latches contain different nodes then
/// 0 is returned.
MDNode *getLoopID() const;
/// Set the llvm.loop loop id metadata for this loop.
///
/// The LoopID metadata node will be added to each terminator instruction in
/// the loop that branches to the loop header.
///
/// The LoopID metadata node should have one or more operands and the first
/// operand should should be the node itself.
void setLoopID(MDNode *LoopID) const;
/// hasDedicatedExits - Return true if no exit block for the loop
/// has a predecessor that is outside the loop.
bool hasDedicatedExits() const;
/// getUniqueExitBlocks - Return all unique successor blocks of this loop.
/// These are the blocks _outside of the current loop_ which are branched to.
/// This assumes that loop exits are in canonical form.
///
void getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const;
/// getUniqueExitBlock - If getUniqueExitBlocks would return exactly one
/// block, return that block. Otherwise return null.
BasicBlock *getUniqueExitBlock() const;
void dump() const;
/// \brief Return the debug location of the start of this loop.
/// This looks for a BB terminating instruction with a known debug
/// location by looking at the preheader and header blocks. If it
/// cannot find a terminating instruction with location information,
/// it returns an unknown location.
DebugLoc getStartLoc() const {
BasicBlock *HeadBB;
// Try the pre-header first.
if ((HeadBB = getLoopPreheader()) != nullptr)
if (DebugLoc DL = HeadBB->getTerminator()->getDebugLoc())
return DL;
// If we have no pre-header or there are no instructions with debug
// info in it, try the header.
HeadBB = getHeader();
if (HeadBB)
return HeadBB->getTerminator()->getDebugLoc();
return DebugLoc();
}
private:
friend class LoopInfoBase<BasicBlock, Loop>;
explicit Loop(BasicBlock *BB) : LoopBase<BasicBlock, Loop>(BB) {}
};
// //
///////////////////////////////////////////////////////////////////////////////
/// LoopInfo - This class builds and contains all of the top level loop
/// structures in the specified function.
///
template<class BlockT, class LoopT>
class LoopInfoBase {
// BBMap - Mapping of basic blocks to the inner most loop they occur in
DenseMap<const BlockT *, LoopT *> BBMap;
std::vector<LoopT *> TopLevelLoops;
friend class LoopBase<BlockT, LoopT>;
friend class LoopInfo;
void operator=(const LoopInfoBase &) = delete;
LoopInfoBase(const LoopInfoBase &) = delete;
public:
LoopInfoBase() { }
~LoopInfoBase() { releaseMemory(); }
LoopInfoBase(LoopInfoBase &&Arg)
: BBMap(std::move(Arg.BBMap)),
TopLevelLoops(std::move(Arg.TopLevelLoops)) {
// We have to clear the arguments top level loops as we've taken ownership.
Arg.TopLevelLoops.clear();
}
LoopInfoBase &operator=(LoopInfoBase &&RHS) {
BBMap = std::move(RHS.BBMap);
for (auto *L : TopLevelLoops)
delete L;
TopLevelLoops = std::move(RHS.TopLevelLoops);
RHS.TopLevelLoops.clear();
return *this;
}
void releaseMemory() {
BBMap.clear();
for (auto *L : TopLevelLoops)
delete L;
TopLevelLoops.clear();
}
/// iterator/begin/end - The interface to the top-level loops in the current
/// function.
///
typedef typename std::vector<LoopT *>::const_iterator iterator;
typedef typename std::vector<LoopT *>::const_reverse_iterator
reverse_iterator;
iterator begin() const { return TopLevelLoops.begin(); }
iterator end() const { return TopLevelLoops.end(); }
reverse_iterator rbegin() const { return TopLevelLoops.rbegin(); }
reverse_iterator rend() const { return TopLevelLoops.rend(); }
bool empty() const { return TopLevelLoops.empty(); }
/// getLoopFor - Return the inner most loop that BB lives in. If a basic
/// block is in no loop (for example the entry node), null is returned.
///
LoopT *getLoopFor(const BlockT *BB) const { return BBMap.lookup(BB); }
/// operator[] - same as getLoopFor...
///
const LoopT *operator[](const BlockT *BB) const {
return getLoopFor(BB);
}
/// getLoopDepth - Return the loop nesting level of the specified block. A
/// depth of 0 means the block is not inside any loop.
///
unsigned getLoopDepth(const BlockT *BB) const {
const LoopT *L = getLoopFor(BB);
return L ? L->getLoopDepth() : 0;
}
// isLoopHeader - True if the block is a loop header node
bool isLoopHeader(const BlockT *BB) const {
const LoopT *L = getLoopFor(BB);
return L && L->getHeader() == BB;
}
/// removeLoop - This removes the specified top-level loop from this loop info
/// object. The loop is not deleted, as it will presumably be inserted into
/// another loop.
LoopT *removeLoop(iterator I) {
assert(I != end() && "Cannot remove end iterator!");
LoopT *L = *I;
assert(!L->getParentLoop() && "Not a top-level loop!");
TopLevelLoops.erase(TopLevelLoops.begin() + (I-begin()));
return L;
}
/// changeLoopFor - Change the top-level loop that contains BB to the
/// specified loop. This should be used by transformations that restructure
/// the loop hierarchy tree.
void changeLoopFor(BlockT *BB, LoopT *L) {
if (!L) {
BBMap.erase(BB);
return;
}
BBMap[BB] = L;
}
/// changeTopLevelLoop - Replace the specified loop in the top-level loops
/// list with the indicated loop.
void changeTopLevelLoop(LoopT *OldLoop,
LoopT *NewLoop) {
auto I = std::find(TopLevelLoops.begin(), TopLevelLoops.end(), OldLoop);
assert(I != TopLevelLoops.end() && "Old loop not at top level!");
*I = NewLoop;
assert(!NewLoop->ParentLoop && !OldLoop->ParentLoop &&
"Loops already embedded into a subloop!");
}
/// addTopLevelLoop - This adds the specified loop to the collection of
/// top-level loops.
void addTopLevelLoop(LoopT *New) {
assert(!New->getParentLoop() && "Loop already in subloop!");
TopLevelLoops.push_back(New);
}
/// removeBlock - This method completely removes BB from all data structures,
/// including all of the Loop objects it is nested in and our mapping from
/// BasicBlocks to loops.
void removeBlock(BlockT *BB) {
auto I = BBMap.find(BB);
if (I != BBMap.end()) {
for (LoopT *L = I->second; L; L = L->getParentLoop())
L->removeBlockFromLoop(BB);
BBMap.erase(I);
}
}
// Internals
static bool isNotAlreadyContainedIn(const LoopT *SubLoop,
const LoopT *ParentLoop) {
if (!SubLoop) return true;
if (SubLoop == ParentLoop) return false;
return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
}
/// Create the loop forest using a stable algorithm.
void Analyze(DominatorTreeBase<BlockT> &DomTree);
// Debugging
void print(raw_ostream &OS) const;
void verify() const;
};
// Implementation in LoopInfoImpl.h
extern template class LoopInfoBase<BasicBlock, Loop>;
class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
typedef LoopInfoBase<BasicBlock, Loop> BaseT;
friend class LoopBase<BasicBlock, Loop>;
void operator=(const LoopInfo &) = delete;
LoopInfo(const LoopInfo &) = delete;
public:
LoopInfo() {}
LoopInfo(LoopInfo &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
LoopInfo &operator=(LoopInfo &&RHS) {
BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
return *this;
}
// Most of the public interface is provided via LoopInfoBase.
/// updateUnloop - Update LoopInfo after removing the last backedge from a
/// loop--now the "unloop". This updates the loop forest and parent loops for
/// each block so that Unloop is no longer referenced, but the caller must
/// actually delete the Unloop object.
void updateUnloop(Loop *Unloop);
/// replacementPreservesLCSSAForm - Returns true if replacing From with To
/// everywhere is guaranteed to preserve LCSSA form.
bool replacementPreservesLCSSAForm(Instruction *From, Value *To) {
// Preserving LCSSA form is only problematic if the replacing value is an
// instruction.
Instruction *I = dyn_cast<Instruction>(To);
if (!I) return true;
// If both instructions are defined in the same basic block then replacement
// cannot break LCSSA form.
if (I->getParent() == From->getParent())
return true;
// If the instruction is not defined in a loop then it can safely replace
// anything.
Loop *ToLoop = getLoopFor(I->getParent());
if (!ToLoop) return true;
// If the replacing instruction is defined in the same loop as the original
// instruction, or in a loop that contains it as an inner loop, then using
// it as a replacement will not break LCSSA form.
return ToLoop->contains(getLoopFor(From->getParent()));
}
};
// Allow clients to walk the list of nested loops...
template <> struct GraphTraits<const Loop*> {
typedef const Loop NodeType;
typedef LoopInfo::iterator ChildIteratorType;
static NodeType *getEntryNode(const Loop *L) { return L; }
static inline ChildIteratorType child_begin(NodeType *N) {
return N->begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->end();
}
};
template <> struct GraphTraits<Loop*> {
typedef Loop NodeType;
typedef LoopInfo::iterator ChildIteratorType;
static NodeType *getEntryNode(Loop *L) { return L; }
static inline ChildIteratorType child_begin(NodeType *N) {
return N->begin();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->end();
}
};
/// \brief Analysis pass that exposes the \c LoopInfo for a function.
class LoopAnalysis {
static char PassID;
public:
typedef LoopInfo Result;
/// \brief Opaque, unique identifier for this analysis pass.
static void *ID() { return (void *)&PassID; }
/// \brief Provide a name for the analysis for debugging and logging.
static StringRef name() { return "LoopAnalysis"; }
LoopInfo run(Function &F, AnalysisManager<Function> *AM);
};
/// \brief Printer pass for the \c LoopAnalysis results.
class LoopPrinterPass {
raw_ostream &OS;
public:
explicit LoopPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
static StringRef name() { return "LoopPrinterPass"; }
};
/// \brief The legacy pass manager's analysis pass to compute loop information.
class LoopInfoWrapperPass : public FunctionPass {
LoopInfo LI;
public:
static char ID; // Pass identification, replacement for typeid
LoopInfoWrapperPass() : FunctionPass(ID) {
initializeLoopInfoWrapperPassPass(*PassRegistry::getPassRegistry());
}
LoopInfo &getLoopInfo() { return LI; }
const LoopInfo &getLoopInfo() const { return LI; }
/// \brief Calculate the natural loop information for a given function.
bool runOnFunction(Function &F) override;
void verifyAnalysis() const override;
void releaseMemory() override { LI.releaseMemory(); }
void print(raw_ostream &O, const Module *M = nullptr) const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/RegionPass.h | //===- RegionPass.h - RegionPass class --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RegionPass class. All region based analysis,
// optimization and transformation passes are derived from RegionPass.
// This class is implemented following the some ideas of the LoopPass.h class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_REGIONPASS_H
#define LLVM_ANALYSIS_REGIONPASS_H
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LegacyPassManagers.h"
#include "llvm/Pass.h"
#include <deque>
namespace llvm {
class RGPassManager;
class Function;
// //
///////////////////////////////////////////////////////////////////////////////
/// @brief A pass that runs on each Region in a function.
///
/// RegionPass is managed by RGPassManager.
class RegionPass : public Pass {
public:
explicit RegionPass(char &pid) : Pass(PT_Region, pid) {}
//===--------------------------------------------------------------------===//
/// @name To be implemented by every RegionPass
///
//@{
/// @brief Run the pass on a specific Region
///
/// Accessing regions not contained in the current region is not allowed.
///
/// @param R The region this pass is run on.
/// @param RGM The RegionPassManager that manages this Pass.
///
/// @return True if the pass modifies this Region.
virtual bool runOnRegion(Region *R, RGPassManager &RGM) = 0;
/// @brief Get a pass to print the LLVM IR in the region.
///
/// @param O The output stream to print the Region.
/// @param Banner The banner to separate different printed passes.
///
/// @return The pass to print the LLVM IR in the region.
Pass *createPrinterPass(raw_ostream &O,
const std::string &Banner) const override;
using llvm::Pass::doInitialization;
using llvm::Pass::doFinalization;
virtual bool doInitialization(Region *R, RGPassManager &RGM) { return false; }
virtual bool doFinalization() { return false; }
//@}
//===--------------------------------------------------------------------===//
/// @name PassManager API
///
//@{
void preparePassManager(PMStack &PMS) override;
void assignPassManager(PMStack &PMS,
PassManagerType PMT = PMT_RegionPassManager) override;
PassManagerType getPotentialPassManagerType() const override {
return PMT_RegionPassManager;
}
//@}
};
/// @brief The pass manager to schedule RegionPasses.
class RGPassManager : public FunctionPass, public PMDataManager {
std::deque<Region*> RQ;
bool skipThisRegion;
bool redoThisRegion;
RegionInfo *RI;
Region *CurrentRegion;
public:
static char ID;
explicit RGPassManager();
/// @brief Execute all of the passes scheduled for execution.
///
/// @return True if any of the passes modifies the function.
bool runOnFunction(Function &F) override;
/// Pass Manager itself does not invalidate any analysis info.
/// RGPassManager needs RegionInfo.
void getAnalysisUsage(AnalysisUsage &Info) const override;
StringRef getPassName() const override {
return "Region Pass Manager";
}
PMDataManager *getAsPMDataManager() override { return this; }
Pass *getAsPass() override { return this; }
/// @brief Print passes managed by this manager.
void dumpPassStructure(unsigned Offset) override;
/// @brief Get passes contained by this manager.
Pass *getContainedPass(unsigned N) {
assert(N < PassVector.size() && "Pass number out of range!");
Pass *FP = static_cast<Pass *>(PassVector[N]);
return FP;
}
PassManagerType getPassManagerType() const override {
return PMT_RegionPassManager;
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/RegionInfoImpl.h | //===- RegionInfoImpl.h - SESE region detection analysis --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// Detects single entry single exit regions in the control flow graph.
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_REGIONINFOIMPL_H
#define LLVM_ANALYSIS_REGIONINFOIMPL_H
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <iterator>
#include <set>
namespace llvm {
#define DEBUG_TYPE "region"
//===----------------------------------------------------------------------===//
/// RegionBase Implementation
template <class Tr>
RegionBase<Tr>::RegionBase(BlockT *Entry, BlockT *Exit,
typename Tr::RegionInfoT *RInfo, DomTreeT *dt,
RegionT *Parent)
: RegionNodeBase<Tr>(Parent, Entry, 1), RI(RInfo), DT(dt), exit(Exit) {}
template <class Tr>
RegionBase<Tr>::~RegionBase() {
// Free the cached nodes.
for (typename BBNodeMapT::iterator it = BBNodeMap.begin(),
ie = BBNodeMap.end();
it != ie; ++it)
delete it->second;
// Only clean the cache for this Region. Caches of child Regions will be
// cleaned when the child Regions are deleted.
BBNodeMap.clear();
}
template <class Tr>
void RegionBase<Tr>::replaceEntry(BlockT *BB) {
this->entry.setPointer(BB);
}
template <class Tr>
void RegionBase<Tr>::replaceExit(BlockT *BB) {
assert(exit && "No exit to replace!");
exit = BB;
}
template <class Tr>
void RegionBase<Tr>::replaceEntryRecursive(BlockT *NewEntry) {
std::vector<RegionT *> RegionQueue;
BlockT *OldEntry = getEntry();
RegionQueue.push_back(static_cast<RegionT *>(this));
while (!RegionQueue.empty()) {
RegionT *R = RegionQueue.back();
RegionQueue.pop_back();
R->replaceEntry(NewEntry);
for (typename RegionT::const_iterator RI = R->begin(), RE = R->end();
RI != RE; ++RI) {
if ((*RI)->getEntry() == OldEntry)
RegionQueue.push_back(RI->get());
}
}
}
template <class Tr>
void RegionBase<Tr>::replaceExitRecursive(BlockT *NewExit) {
std::vector<RegionT *> RegionQueue;
BlockT *OldExit = getExit();
RegionQueue.push_back(static_cast<RegionT *>(this));
while (!RegionQueue.empty()) {
RegionT *R = RegionQueue.back();
RegionQueue.pop_back();
R->replaceExit(NewExit);
for (typename RegionT::const_iterator RI = R->begin(), RE = R->end();
RI != RE; ++RI) {
if ((*RI)->getExit() == OldExit)
RegionQueue.push_back(RI->get());
}
}
}
template <class Tr>
bool RegionBase<Tr>::contains(const BlockT *B) const {
BlockT *BB = const_cast<BlockT *>(B);
if (!DT->getNode(BB))
return false;
BlockT *entry = getEntry(), *exit = getExit();
// Toplevel region.
if (!exit)
return true;
return (DT->dominates(entry, BB) &&
!(DT->dominates(exit, BB) && DT->dominates(entry, exit)));
}
template <class Tr>
bool RegionBase<Tr>::contains(const LoopT *L) const {
// BBs that are not part of any loop are element of the Loop
// described by the NULL pointer. This loop is not part of any region,
// except if the region describes the whole function.
if (!L)
return getExit() == nullptr;
if (!contains(L->getHeader()))
return false;
SmallVector<BlockT *, 8> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
for (BlockT *BB : ExitingBlocks) {
if (!contains(BB))
return false;
}
return true;
}
template <class Tr>
typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopT *L) const {
if (!contains(L))
return nullptr;
while (L && contains(L->getParentLoop())) {
L = L->getParentLoop();
}
return L;
}
template <class Tr>
typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopInfoT *LI,
BlockT *BB) const {
assert(LI && BB && "LI and BB cannot be null!");
LoopT *L = LI->getLoopFor(BB);
return outermostLoopInRegion(L);
}
template <class Tr>
typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getEnteringBlock() const {
BlockT *entry = getEntry();
BlockT *Pred;
BlockT *enteringBlock = nullptr;
for (PredIterTy PI = InvBlockTraits::child_begin(entry),
PE = InvBlockTraits::child_end(entry);
PI != PE; ++PI) {
Pred = *PI;
if (DT->getNode(Pred) && !contains(Pred)) {
if (enteringBlock)
return nullptr;
enteringBlock = Pred;
}
}
return enteringBlock;
}
template <class Tr>
typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getExitingBlock() const {
BlockT *exit = getExit();
BlockT *Pred;
BlockT *exitingBlock = nullptr;
if (!exit)
return nullptr;
for (PredIterTy PI = InvBlockTraits::child_begin(exit),
PE = InvBlockTraits::child_end(exit);
PI != PE; ++PI) {
Pred = *PI;
if (contains(Pred)) {
if (exitingBlock)
return nullptr;
exitingBlock = Pred;
}
}
return exitingBlock;
}
template <class Tr>
bool RegionBase<Tr>::isSimple() const {
return !isTopLevelRegion() && getEnteringBlock() && getExitingBlock();
}
template <class Tr>
std::string RegionBase<Tr>::getNameStr() const {
std::string exitName;
std::string entryName;
if (getEntry()->getName().empty()) {
raw_string_ostream OS(entryName);
getEntry()->printAsOperand(OS, false);
} else
entryName = getEntry()->getName();
if (getExit()) {
if (getExit()->getName().empty()) {
raw_string_ostream OS(exitName);
getExit()->printAsOperand(OS, false);
} else
exitName = getExit()->getName();
} else
exitName = "<Function Return>";
return entryName + " => " + exitName;
}
template <class Tr>
void RegionBase<Tr>::verifyBBInRegion(BlockT *BB) const {
if (!contains(BB))
llvm_unreachable("Broken region found!");
BlockT *entry = getEntry(), *exit = getExit();
for (SuccIterTy SI = BlockTraits::child_begin(BB),
SE = BlockTraits::child_end(BB);
SI != SE; ++SI) {
if (!contains(*SI) && exit != *SI)
llvm_unreachable("Broken region found!");
}
if (entry != BB) {
for (PredIterTy SI = InvBlockTraits::child_begin(BB),
SE = InvBlockTraits::child_end(BB);
SI != SE; ++SI) {
if (!contains(*SI))
llvm_unreachable("Broken region found!");
}
}
}
template <class Tr>
void RegionBase<Tr>::verifyWalk(BlockT *BB, std::set<BlockT *> *visited) const {
BlockT *exit = getExit();
visited->insert(BB);
verifyBBInRegion(BB);
for (SuccIterTy SI = BlockTraits::child_begin(BB),
SE = BlockTraits::child_end(BB);
SI != SE; ++SI) {
if (*SI != exit && visited->find(*SI) == visited->end())
verifyWalk(*SI, visited);
}
}
template <class Tr>
void RegionBase<Tr>::verifyRegion() const {
// Only do verification when user wants to, otherwise this expensive check
// will be invoked by PMDataManager::verifyPreservedAnalysis when
// a regionpass (marked PreservedAll) finish.
if (!RegionInfoBase<Tr>::VerifyRegionInfo)
return;
std::set<BlockT *> visited;
verifyWalk(getEntry(), &visited);
}
template <class Tr>
void RegionBase<Tr>::verifyRegionNest() const {
for (typename RegionT::const_iterator RI = begin(), RE = end(); RI != RE;
++RI)
(*RI)->verifyRegionNest();
verifyRegion();
}
template <class Tr>
typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_begin() {
return GraphTraits<RegionT *>::nodes_begin(static_cast<RegionT *>(this));
}
template <class Tr>
typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_end() {
return GraphTraits<RegionT *>::nodes_end(static_cast<RegionT *>(this));
}
template <class Tr>
typename RegionBase<Tr>::const_element_iterator
RegionBase<Tr>::element_begin() const {
return GraphTraits<const RegionT *>::nodes_begin(
static_cast<const RegionT *>(this));
}
template <class Tr>
typename RegionBase<Tr>::const_element_iterator
RegionBase<Tr>::element_end() const {
return GraphTraits<const RegionT *>::nodes_end(
static_cast<const RegionT *>(this));
}
template <class Tr>
typename Tr::RegionT *RegionBase<Tr>::getSubRegionNode(BlockT *BB) const {
typedef typename Tr::RegionT RegionT;
RegionT *R = RI->getRegionFor(BB);
if (!R || R == this)
return nullptr;
// If we pass the BB out of this region, that means our code is broken.
assert(contains(R) && "BB not in current region!");
while (contains(R->getParent()) && R->getParent() != this)
R = R->getParent();
if (R->getEntry() != BB)
return nullptr;
return R;
}
template <class Tr>
typename Tr::RegionNodeT *RegionBase<Tr>::getBBNode(BlockT *BB) const {
assert(contains(BB) && "Can get BB node out of this region!");
typename BBNodeMapT::const_iterator at = BBNodeMap.find(BB);
if (at != BBNodeMap.end())
return at->second;
auto Deconst = const_cast<RegionBase<Tr> *>(this);
RegionNodeT *NewNode = new RegionNodeT(static_cast<RegionT *>(Deconst), BB);
BBNodeMap.insert(std::make_pair(BB, NewNode));
return NewNode;
}
template <class Tr>
typename Tr::RegionNodeT *RegionBase<Tr>::getNode(BlockT *BB) const {
assert(contains(BB) && "Can get BB node out of this region!");
if (RegionT *Child = getSubRegionNode(BB))
return Child->getNode();
return getBBNode(BB);
}
template <class Tr>
void RegionBase<Tr>::transferChildrenTo(RegionT *To) {
for (iterator I = begin(), E = end(); I != E; ++I) {
(*I)->parent = To;
To->children.push_back(std::move(*I));
}
children.clear();
}
template <class Tr>
void RegionBase<Tr>::addSubRegion(RegionT *SubRegion, bool moveChildren) {
assert(!SubRegion->parent && "SubRegion already has a parent!");
assert(std::find_if(begin(), end(), [&](const std::unique_ptr<RegionT> &R) {
return R.get() == SubRegion;
}) == children.end() &&
"Subregion already exists!");
SubRegion->parent = static_cast<RegionT *>(this);
children.push_back(std::unique_ptr<RegionT>(SubRegion));
if (!moveChildren)
return;
assert(SubRegion->children.empty() &&
"SubRegions that contain children are not supported");
for (element_iterator I = element_begin(), E = element_end(); I != E; ++I) {
if (!(*I)->isSubRegion()) {
BlockT *BB = (*I)->template getNodeAs<BlockT>();
if (SubRegion->contains(BB))
RI->setRegionFor(BB, SubRegion);
}
}
std::vector<std::unique_ptr<RegionT>> Keep;
for (iterator I = begin(), E = end(); I != E; ++I) {
if (SubRegion->contains(I->get()) && I->get() != SubRegion) {
(*I)->parent = SubRegion;
SubRegion->children.push_back(std::move(*I));
} else
Keep.push_back(std::move(*I));
}
children.clear();
children.insert(
children.begin(),
std::move_iterator<typename RegionSet::iterator>(Keep.begin()),
std::move_iterator<typename RegionSet::iterator>(Keep.end()));
}
template <class Tr>
typename Tr::RegionT *RegionBase<Tr>::removeSubRegion(RegionT *Child) {
assert(Child->parent == this && "Child is not a child of this region!");
Child->parent = nullptr;
typename RegionSet::iterator I = std::find_if(
children.begin(), children.end(),
[&](const std::unique_ptr<RegionT> &R) { return R.get() == Child; });
assert(I != children.end() && "Region does not exit. Unable to remove.");
children.erase(children.begin() + (I - begin()));
return Child;
}
template <class Tr>
unsigned RegionBase<Tr>::getDepth() const {
unsigned Depth = 0;
for (RegionT *R = getParent(); R != nullptr; R = R->getParent())
++Depth;
return Depth;
}
template <class Tr>
typename Tr::RegionT *RegionBase<Tr>::getExpandedRegion() const {
unsigned NumSuccessors = Tr::getNumSuccessors(exit);
if (NumSuccessors == 0)
return nullptr;
for (PredIterTy PI = InvBlockTraits::child_begin(getExit()),
PE = InvBlockTraits::child_end(getExit());
PI != PE; ++PI) {
if (!DT->dominates(getEntry(), *PI))
return nullptr;
}
RegionT *R = RI->getRegionFor(exit);
if (R->getEntry() != exit) {
if (Tr::getNumSuccessors(exit) == 1)
return new RegionT(getEntry(), *BlockTraits::child_begin(exit), RI, DT);
return nullptr;
}
while (R->getParent() && R->getParent()->getEntry() == exit)
R = R->getParent();
if (!DT->dominates(getEntry(), R->getExit())) {
for (PredIterTy PI = InvBlockTraits::child_begin(getExit()),
PE = InvBlockTraits::child_end(getExit());
PI != PE; ++PI) {
if (!DT->dominates(R->getExit(), *PI))
return nullptr;
}
}
return new RegionT(getEntry(), R->getExit(), RI, DT);
}
template <class Tr>
void RegionBase<Tr>::print(raw_ostream &OS, bool print_tree, unsigned level,
PrintStyle Style) const {
if (print_tree)
OS.indent(level * 2) << '[' << level << "] " << getNameStr();
else
OS.indent(level * 2) << getNameStr();
OS << '\n';
if (Style != PrintNone) {
OS.indent(level * 2) << "{\n";
OS.indent(level * 2 + 2);
if (Style == PrintBB) {
for (const auto *BB : blocks())
OS << BB->getName() << ", "; // TODO: remove the last ","
} else if (Style == PrintRN) {
for (const_element_iterator I = element_begin(), E = element_end();
I != E; ++I) {
OS << **I << ", "; // TODO: remove the last ",
}
}
OS << '\n';
}
if (print_tree) {
for (const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
(*RI)->print(OS, print_tree, level + 1, Style);
}
if (Style != PrintNone)
OS.indent(level * 2) << "} \n";
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <class Tr>
void RegionBase<Tr>::dump() const {
print(dbgs(), true, getDepth(), RegionInfoBase<Tr>::printStyle);
}
#endif
template <class Tr>
void RegionBase<Tr>::clearNodeCache() {
// Free the cached nodes.
for (typename BBNodeMapT::iterator I = BBNodeMap.begin(),
IE = BBNodeMap.end();
I != IE; ++I)
delete I->second;
BBNodeMap.clear();
for (typename RegionT::iterator RI = begin(), RE = end(); RI != RE; ++RI)
(*RI)->clearNodeCache();
}
// //
///////////////////////////////////////////////////////////////////////////////
// RegionInfoBase implementation
//
template <class Tr>
RegionInfoBase<Tr>::RegionInfoBase()
: TopLevelRegion(nullptr) {}
template <class Tr>
RegionInfoBase<Tr>::~RegionInfoBase() {
releaseMemory();
}
template <class Tr>
bool RegionInfoBase<Tr>::isCommonDomFrontier(BlockT *BB, BlockT *entry,
BlockT *exit) const {
for (PredIterTy PI = InvBlockTraits::child_begin(BB),
PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
BlockT *P = *PI;
if (DT->dominates(entry, P) && !DT->dominates(exit, P))
return false;
}
return true;
}
template <class Tr>
bool RegionInfoBase<Tr>::isRegion(BlockT *entry, BlockT *exit) const {
assert(entry && exit && "entry and exit must not be null!");
typedef typename DomFrontierT::DomSetType DST;
DST *entrySuccs = &DF->find(entry)->second;
// Exit is the header of a loop that contains the entry. In this case,
// the dominance frontier must only contain the exit.
if (!DT->dominates(entry, exit)) {
for (typename DST::iterator SI = entrySuccs->begin(),
SE = entrySuccs->end();
SI != SE; ++SI) {
if (*SI != exit && *SI != entry)
return false;
}
return true;
}
DST *exitSuccs = &DF->find(exit)->second;
// Do not allow edges leaving the region.
for (typename DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
SI != SE; ++SI) {
if (*SI == exit || *SI == entry)
continue;
if (exitSuccs->find(*SI) == exitSuccs->end())
return false;
if (!isCommonDomFrontier(*SI, entry, exit))
return false;
}
// Do not allow edges pointing into the region.
for (typename DST::iterator SI = exitSuccs->begin(), SE = exitSuccs->end();
SI != SE; ++SI) {
if (DT->properlyDominates(entry, *SI) && *SI != exit)
return false;
}
return true;
}
template <class Tr>
void RegionInfoBase<Tr>::insertShortCut(BlockT *entry, BlockT *exit,
BBtoBBMap *ShortCut) const {
assert(entry && exit && "entry and exit must not be null!");
typename BBtoBBMap::iterator e = ShortCut->find(exit);
if (e == ShortCut->end())
// No further region at exit available.
(*ShortCut)[entry] = exit;
else {
// We found a region e that starts at exit. Therefore (entry, e->second)
// is also a region, that is larger than (entry, exit). Insert the
// larger one.
BlockT *BB = e->second;
(*ShortCut)[entry] = BB;
}
}
template <class Tr>
typename Tr::DomTreeNodeT *
RegionInfoBase<Tr>::getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const {
typename BBtoBBMap::iterator e = ShortCut->find(N->getBlock());
if (e == ShortCut->end())
return N->getIDom();
return PDT->getNode(e->second)->getIDom();
}
template <class Tr>
bool RegionInfoBase<Tr>::isTrivialRegion(BlockT *entry, BlockT *exit) const {
assert(entry && exit && "entry and exit must not be null!");
unsigned num_successors =
BlockTraits::child_end(entry) - BlockTraits::child_begin(entry);
if (num_successors <= 1 && exit == *(BlockTraits::child_begin(entry)))
return true;
return false;
}
template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::createRegion(BlockT *entry,
BlockT *exit) {
assert(entry && exit && "entry and exit must not be null!");
if (isTrivialRegion(entry, exit))
return nullptr;
RegionT *region =
new RegionT(entry, exit, static_cast<RegionInfoT *>(this), DT);
BBtoRegion.insert(std::make_pair(entry, region));
#ifdef XDEBUG
region->verifyRegion();
#else
DEBUG(region->verifyRegion());
#endif
updateStatistics(region);
return region;
}
template <class Tr>
void RegionInfoBase<Tr>::findRegionsWithEntry(BlockT *entry,
BBtoBBMap *ShortCut) {
assert(entry);
DomTreeNodeT *N = PDT->getNode(entry);
if (!N)
return;
RegionT *lastRegion = nullptr;
BlockT *lastExit = entry;
// As only a BasicBlock that postdominates entry can finish a region, walk the
// post dominance tree upwards.
while ((N = getNextPostDom(N, ShortCut))) {
BlockT *exit = N->getBlock();
if (!exit)
break;
if (isRegion(entry, exit)) {
RegionT *newRegion = createRegion(entry, exit);
if (lastRegion)
newRegion->addSubRegion(lastRegion);
lastRegion = newRegion;
lastExit = exit;
}
// This can never be a region, so stop the search.
if (!DT->dominates(entry, exit))
break;
}
// Tried to create regions from entry to lastExit. Next time take a
// shortcut from entry to lastExit.
if (lastExit != entry)
insertShortCut(entry, lastExit, ShortCut);
}
template <class Tr>
void RegionInfoBase<Tr>::scanForRegions(FuncT &F, BBtoBBMap *ShortCut) {
typedef typename std::add_pointer<FuncT>::type FuncPtrT;
BlockT *entry = GraphTraits<FuncPtrT>::getEntryNode(&F);
DomTreeNodeT *N = DT->getNode(entry);
// Iterate over the dominance tree in post order to start with the small
// regions from the bottom of the dominance tree. If the small regions are
// detected first, detection of bigger regions is faster, as we can jump
// over the small regions.
for (auto DomNode : post_order(N))
findRegionsWithEntry(DomNode->getBlock(), ShortCut);
}
template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::getTopMostParent(RegionT *region) {
while (region->getParent())
region = region->getParent();
return region;
}
template <class Tr>
void RegionInfoBase<Tr>::buildRegionsTree(DomTreeNodeT *N, RegionT *region) {
BlockT *BB = N->getBlock();
// Passed region exit
while (BB == region->getExit())
region = region->getParent();
typename BBtoRegionMap::iterator it = BBtoRegion.find(BB);
// This basic block is a start block of a region. It is already in the
// BBtoRegion relation. Only the child basic blocks have to be updated.
if (it != BBtoRegion.end()) {
RegionT *newRegion = it->second;
region->addSubRegion(getTopMostParent(newRegion));
region = newRegion;
} else {
BBtoRegion[BB] = region;
}
for (typename DomTreeNodeT::iterator CI = N->begin(), CE = N->end(); CI != CE;
++CI) {
buildRegionsTree(*CI, region);
}
}
#ifdef XDEBUG
template <class Tr>
bool RegionInfoBase<Tr>::VerifyRegionInfo = true;
#else
template <class Tr>
bool RegionInfoBase<Tr>::VerifyRegionInfo = false;
#endif
template <class Tr>
typename Tr::RegionT::PrintStyle RegionInfoBase<Tr>::printStyle =
RegionBase<Tr>::PrintNone;
template <class Tr>
void RegionInfoBase<Tr>::print(raw_ostream &OS) const {
OS << "Region tree:\n";
TopLevelRegion->print(OS, true, 0, printStyle);
OS << "End region tree\n";
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <class Tr>
void RegionInfoBase<Tr>::dump() const { print(dbgs()); }
#endif
template <class Tr>
void RegionInfoBase<Tr>::releaseMemory() {
BBtoRegion.clear();
if (TopLevelRegion)
delete TopLevelRegion;
TopLevelRegion = nullptr;
}
template <class Tr>
void RegionInfoBase<Tr>::verifyAnalysis() const {
TopLevelRegion->verifyRegionNest();
}
// Region pass manager support.
template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::getRegionFor(BlockT *BB) const {
typename BBtoRegionMap::const_iterator I = BBtoRegion.find(BB);
return I != BBtoRegion.end() ? I->second : nullptr;
}
template <class Tr>
void RegionInfoBase<Tr>::setRegionFor(BlockT *BB, RegionT *R) {
BBtoRegion[BB] = R;
}
template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::operator[](BlockT *BB) const {
return getRegionFor(BB);
}
template <class Tr>
typename RegionInfoBase<Tr>::BlockT *
RegionInfoBase<Tr>::getMaxRegionExit(BlockT *BB) const {
BlockT *Exit = nullptr;
while (true) {
// Get largest region that starts at BB.
RegionT *R = getRegionFor(BB);
while (R && R->getParent() && R->getParent()->getEntry() == BB)
R = R->getParent();
// Get the single exit of BB.
if (R && R->getEntry() == BB)
Exit = R->getExit();
else if (++BlockTraits::child_begin(BB) == BlockTraits::child_end(BB))
Exit = *BlockTraits::child_begin(BB);
else // No single exit exists.
return Exit;
// Get largest region that starts at Exit.
RegionT *ExitR = getRegionFor(Exit);
while (ExitR && ExitR->getParent() &&
ExitR->getParent()->getEntry() == Exit)
ExitR = ExitR->getParent();
for (PredIterTy PI = InvBlockTraits::child_begin(Exit),
PE = InvBlockTraits::child_end(Exit);
PI != PE; ++PI) {
if (!R->contains(*PI) && !ExitR->contains(*PI))
break;
}
// This stops infinite cycles.
if (DT->dominates(Exit, BB))
break;
BB = Exit;
}
return Exit;
}
template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::getCommonRegion(RegionT *A,
RegionT *B) const {
assert(A && B && "One of the Regions is NULL");
if (A->contains(B))
return A;
while (!B->contains(A))
B = B->getParent();
return B;
}
template <class Tr>
typename Tr::RegionT *
RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const {
RegionT *ret = Regions.back();
Regions.pop_back();
for (RegionT *R : Regions)
ret = getCommonRegion(ret, R);
return ret;
}
template <class Tr>
typename Tr::RegionT *
RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const {
RegionT *ret = getRegionFor(BBs.back());
BBs.pop_back();
for (BlockT *BB : BBs)
ret = getCommonRegion(ret, getRegionFor(BB));
return ret;
}
template <class Tr>
void RegionInfoBase<Tr>::splitBlock(BlockT *NewBB, BlockT *OldBB) {
RegionT *R = getRegionFor(OldBB);
setRegionFor(NewBB, R);
while (R->getEntry() == OldBB && !R->isTopLevelRegion()) {
R->replaceEntry(NewBB);
R = R->getParent();
}
setRegionFor(OldBB, R);
}
template <class Tr>
void RegionInfoBase<Tr>::calculate(FuncT &F) {
typedef typename std::add_pointer<FuncT>::type FuncPtrT;
// ShortCut a function where for every BB the exit of the largest region
// starting with BB is stored. These regions can be threated as single BBS.
// This improves performance on linear CFGs.
BBtoBBMap ShortCut;
scanForRegions(F, &ShortCut);
BlockT *BB = GraphTraits<FuncPtrT>::getEntryNode(&F);
buildRegionsTree(DT->getNode(BB), TopLevelRegion);
}
#undef DEBUG_TYPE
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CGSCCPassManager.h | //===- CGSCCPassManager.h - Call graph pass management ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides classes for managing passes over SCCs of the call
/// graph. These passes form an important component of LLVM's interprocedural
/// optimizations. Because they operate on the SCCs of the call graph, and they
/// wtraverse the graph in post order, they can effectively do pair-wise
/// interprocedural optimizations for all call edges in the program. At each
/// call site edge, the callee has already been optimized as much as is
/// possible. This in turn allows very accurate analysis of it for IPO.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CGSCCPASSMANAGER_H
#define LLVM_ANALYSIS_CGSCCPASSMANAGER_H
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
/// \brief The CGSCC pass manager.
///
/// See the documentation for the PassManager template for details. It runs
/// a sequency of SCC passes over each SCC that the manager is run over. This
/// typedef serves as a convenient way to refer to this construct.
typedef PassManager<LazyCallGraph::SCC> CGSCCPassManager;
/// \brief The CGSCC analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this
/// construct in the adaptors and proxies used to integrate this into the larger
/// pass manager infrastructure.
typedef AnalysisManager<LazyCallGraph::SCC> CGSCCAnalysisManager;
/// \brief A module analysis which acts as a proxy for a CGSCC analysis
/// manager.
///
/// This primarily proxies invalidation information from the module analysis
/// manager and module pass manager to a CGSCC analysis manager. You should
/// never use a CGSCC analysis manager from within (transitively) a module
/// pass manager unless your parent module pass has received a proxy result
/// object for it.
class CGSCCAnalysisManagerModuleProxy {
public:
class Result {
public:
explicit Result(CGSCCAnalysisManager &CGAM) : CGAM(&CGAM) {}
// We have to explicitly define all the special member functions because
// MSVC refuses to generate them.
Result(const Result &Arg) : CGAM(Arg.CGAM) {}
Result(Result &&Arg) : CGAM(std::move(Arg.CGAM)) {}
Result &operator=(Result RHS) {
std::swap(CGAM, RHS.CGAM);
return *this;
}
~Result();
/// \brief Accessor for the \c CGSCCAnalysisManager.
CGSCCAnalysisManager &getManager() { return *CGAM; }
/// \brief Handler for invalidation of the module.
///
/// If this analysis itself is preserved, then we assume that the call
/// graph of the module hasn't changed and thus we don't need to invalidate
/// *all* cached data associated with a \c SCC* in the \c
/// CGSCCAnalysisManager.
///
/// Regardless of whether this analysis is marked as preserved, all of the
/// analyses in the \c CGSCCAnalysisManager are potentially invalidated
/// based on the set of preserved analyses.
bool invalidate(Module &M, const PreservedAnalyses &PA);
private:
CGSCCAnalysisManager *CGAM;
};
static void *ID() { return (void *)&PassID; }
static StringRef name() { return "CGSCCAnalysisManagerModuleProxy"; }
explicit CGSCCAnalysisManagerModuleProxy(CGSCCAnalysisManager &CGAM)
: CGAM(&CGAM) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
CGSCCAnalysisManagerModuleProxy(const CGSCCAnalysisManagerModuleProxy &Arg)
: CGAM(Arg.CGAM) {}
CGSCCAnalysisManagerModuleProxy(CGSCCAnalysisManagerModuleProxy &&Arg)
: CGAM(std::move(Arg.CGAM)) {}
CGSCCAnalysisManagerModuleProxy &
operator=(CGSCCAnalysisManagerModuleProxy RHS) {
std::swap(CGAM, RHS.CGAM);
return *this;
}
/// \brief Run the analysis pass and create our proxy result object.
///
/// This doesn't do any interesting work, it is primarily used to insert our
/// proxy result object into the module analysis cache so that we can proxy
/// invalidation to the CGSCC analysis manager.
///
/// In debug builds, it will also assert that the analysis manager is empty
/// as no queries should arrive at the CGSCC analysis manager prior to
/// this analysis being requested.
Result run(Module &M);
private:
static char PassID;
CGSCCAnalysisManager *CGAM;
};
/// \brief A CGSCC analysis which acts as a proxy for a module analysis
/// manager.
///
/// This primarily provides an accessor to a parent module analysis manager to
/// CGSCC passes. Only the const interface of the module analysis manager is
/// provided to indicate that once inside of a CGSCC analysis pass you
/// cannot request a module analysis to actually run. Instead, the user must
/// rely on the \c getCachedResult API.
///
/// This proxy *doesn't* manage the invalidation in any way. That is handled by
/// the recursive return path of each layer of the pass manager and the
/// returned PreservedAnalysis set.
class ModuleAnalysisManagerCGSCCProxy {
public:
/// \brief Result proxy object for \c ModuleAnalysisManagerCGSCCProxy.
class Result {
public:
explicit Result(const ModuleAnalysisManager &MAM) : MAM(&MAM) {}
// We have to explicitly define all the special member functions because
// MSVC refuses to generate them.
Result(const Result &Arg) : MAM(Arg.MAM) {}
Result(Result &&Arg) : MAM(std::move(Arg.MAM)) {}
Result &operator=(Result RHS) {
std::swap(MAM, RHS.MAM);
return *this;
}
const ModuleAnalysisManager &getManager() const { return *MAM; }
/// \brief Handle invalidation by ignoring it, this pass is immutable.
bool invalidate(LazyCallGraph::SCC &) { return false; }
private:
const ModuleAnalysisManager *MAM;
};
static void *ID() { return (void *)&PassID; }
static StringRef name() { return "ModuleAnalysisManagerCGSCCProxy"; }
ModuleAnalysisManagerCGSCCProxy(const ModuleAnalysisManager &MAM)
: MAM(&MAM) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
ModuleAnalysisManagerCGSCCProxy(const ModuleAnalysisManagerCGSCCProxy &Arg)
: MAM(Arg.MAM) {}
ModuleAnalysisManagerCGSCCProxy(ModuleAnalysisManagerCGSCCProxy &&Arg)
: MAM(std::move(Arg.MAM)) {}
ModuleAnalysisManagerCGSCCProxy &
operator=(ModuleAnalysisManagerCGSCCProxy RHS) {
std::swap(MAM, RHS.MAM);
return *this;
}
/// \brief Run the analysis pass and create our proxy result object.
/// Nothing to see here, it just forwards the \c MAM reference into the
/// result.
Result run(LazyCallGraph::SCC &) { return Result(*MAM); }
private:
static char PassID;
const ModuleAnalysisManager *MAM;
};
/// \brief The core module pass which does a post-order walk of the SCCs and
/// runs a CGSCC pass over each one.
///
/// Designed to allow composition of a CGSCCPass(Manager) and
/// a ModulePassManager. Note that this pass must be run with a module analysis
/// manager as it uses the LazyCallGraph analysis. It will also run the
/// \c CGSCCAnalysisManagerModuleProxy analysis prior to running the CGSCC
/// pass over the module to enable a \c FunctionAnalysisManager to be used
/// within this run safely.
template <typename CGSCCPassT> class ModuleToPostOrderCGSCCPassAdaptor {
public:
explicit ModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass)
: Pass(std::move(Pass)) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
ModuleToPostOrderCGSCCPassAdaptor(
const ModuleToPostOrderCGSCCPassAdaptor &Arg)
: Pass(Arg.Pass) {}
ModuleToPostOrderCGSCCPassAdaptor(ModuleToPostOrderCGSCCPassAdaptor &&Arg)
: Pass(std::move(Arg.Pass)) {}
friend void swap(ModuleToPostOrderCGSCCPassAdaptor &LHS,
ModuleToPostOrderCGSCCPassAdaptor &RHS) {
using std::swap;
swap(LHS.Pass, RHS.Pass);
}
ModuleToPostOrderCGSCCPassAdaptor &
operator=(ModuleToPostOrderCGSCCPassAdaptor RHS) {
swap(*this, RHS);
return *this;
}
/// \brief Runs the CGSCC pass across every SCC in the module.
PreservedAnalyses run(Module &M, ModuleAnalysisManager *AM) {
assert(AM && "We need analyses to compute the call graph!");
// Setup the CGSCC analysis manager from its proxy.
CGSCCAnalysisManager &CGAM =
AM->getResult<CGSCCAnalysisManagerModuleProxy>(M).getManager();
// Get the call graph for this module.
LazyCallGraph &CG = AM->getResult<LazyCallGraphAnalysis>(M);
PreservedAnalyses PA = PreservedAnalyses::all();
for (LazyCallGraph::SCC &C : CG.postorder_sccs()) {
PreservedAnalyses PassPA = Pass.run(C, &CGAM);
// We know that the CGSCC pass couldn't have invalidated any other
// SCC's analyses (that's the contract of a CGSCC pass), so
// directly handle the CGSCC analysis manager's invalidation here. We
// also update the preserved set of analyses to reflect that invalidated
// analyses are now safe to preserve.
// FIXME: This isn't quite correct. We need to handle the case where the
// pass updated the CG, particularly some child of the current SCC, and
// invalidate its analyses.
PassPA = CGAM.invalidate(C, std::move(PassPA));
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
}
// By definition we preserve the proxy. This precludes *any* invalidation
// of CGSCC analyses by the proxy, but that's OK because we've taken
// care to invalidate analyses in the CGSCC analysis manager
// incrementally above.
PA.preserve<CGSCCAnalysisManagerModuleProxy>();
return PA;
}
static StringRef name() { return "ModuleToPostOrderCGSCCPassAdaptor"; }
private:
CGSCCPassT Pass;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>
createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass) {
return ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>(std::move(Pass));
}
/// \brief A CGSCC analysis which acts as a proxy for a function analysis
/// manager.
///
/// This primarily proxies invalidation information from the CGSCC analysis
/// manager and CGSCC pass manager to a function analysis manager. You should
/// never use a function analysis manager from within (transitively) a CGSCC
/// pass manager unless your parent CGSCC pass has received a proxy result
/// object for it.
class FunctionAnalysisManagerCGSCCProxy {
public:
class Result {
public:
explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
// We have to explicitly define all the special member functions because
// MSVC refuses to generate them.
Result(const Result &Arg) : FAM(Arg.FAM) {}
Result(Result &&Arg) : FAM(std::move(Arg.FAM)) {}
Result &operator=(Result RHS) {
std::swap(FAM, RHS.FAM);
return *this;
}
~Result();
/// \brief Accessor for the \c FunctionAnalysisManager.
FunctionAnalysisManager &getManager() { return *FAM; }
/// \brief Handler for invalidation of the SCC.
///
/// If this analysis itself is preserved, then we assume that the set of \c
/// Function objects in the \c SCC hasn't changed and thus we don't need
/// to invalidate *all* cached data associated with a \c Function* in the \c
/// FunctionAnalysisManager.
///
/// Regardless of whether this analysis is marked as preserved, all of the
/// analyses in the \c FunctionAnalysisManager are potentially invalidated
/// based on the set of preserved analyses.
bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA);
private:
FunctionAnalysisManager *FAM;
};
static void *ID() { return (void *)&PassID; }
static StringRef name() { return "FunctionAnalysisManagerCGSCCProxy"; }
explicit FunctionAnalysisManagerCGSCCProxy(FunctionAnalysisManager &FAM)
: FAM(&FAM) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
FunctionAnalysisManagerCGSCCProxy(
const FunctionAnalysisManagerCGSCCProxy &Arg)
: FAM(Arg.FAM) {}
FunctionAnalysisManagerCGSCCProxy(FunctionAnalysisManagerCGSCCProxy &&Arg)
: FAM(std::move(Arg.FAM)) {}
FunctionAnalysisManagerCGSCCProxy &
operator=(FunctionAnalysisManagerCGSCCProxy RHS) {
std::swap(FAM, RHS.FAM);
return *this;
}
/// \brief Run the analysis pass and create our proxy result object.
///
/// This doesn't do any interesting work, it is primarily used to insert our
/// proxy result object into the module analysis cache so that we can proxy
/// invalidation to the function analysis manager.
///
/// In debug builds, it will also assert that the analysis manager is empty
/// as no queries should arrive at the function analysis manager prior to
/// this analysis being requested.
Result run(LazyCallGraph::SCC &C);
private:
static char PassID;
FunctionAnalysisManager *FAM;
};
/// \brief A function analysis which acts as a proxy for a CGSCC analysis
/// manager.
///
/// This primarily provides an accessor to a parent CGSCC analysis manager to
/// function passes. Only the const interface of the CGSCC analysis manager is
/// provided to indicate that once inside of a function analysis pass you
/// cannot request a CGSCC analysis to actually run. Instead, the user must
/// rely on the \c getCachedResult API.
///
/// This proxy *doesn't* manage the invalidation in any way. That is handled by
/// the recursive return path of each layer of the pass manager and the
/// returned PreservedAnalysis set.
class CGSCCAnalysisManagerFunctionProxy {
public:
/// \brief Result proxy object for \c ModuleAnalysisManagerFunctionProxy.
class Result {
public:
explicit Result(const CGSCCAnalysisManager &CGAM) : CGAM(&CGAM) {}
// We have to explicitly define all the special member functions because
// MSVC refuses to generate them.
Result(const Result &Arg) : CGAM(Arg.CGAM) {}
Result(Result &&Arg) : CGAM(std::move(Arg.CGAM)) {}
Result &operator=(Result RHS) {
std::swap(CGAM, RHS.CGAM);
return *this;
}
const CGSCCAnalysisManager &getManager() const { return *CGAM; }
/// \brief Handle invalidation by ignoring it, this pass is immutable.
bool invalidate(Function &) { return false; }
private:
const CGSCCAnalysisManager *CGAM;
};
static void *ID() { return (void *)&PassID; }
static StringRef name() { return "CGSCCAnalysisManagerFunctionProxy"; }
CGSCCAnalysisManagerFunctionProxy(const CGSCCAnalysisManager &CGAM)
: CGAM(&CGAM) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
CGSCCAnalysisManagerFunctionProxy(
const CGSCCAnalysisManagerFunctionProxy &Arg)
: CGAM(Arg.CGAM) {}
CGSCCAnalysisManagerFunctionProxy(CGSCCAnalysisManagerFunctionProxy &&Arg)
: CGAM(std::move(Arg.CGAM)) {}
CGSCCAnalysisManagerFunctionProxy &
operator=(CGSCCAnalysisManagerFunctionProxy RHS) {
std::swap(CGAM, RHS.CGAM);
return *this;
}
/// \brief Run the analysis pass and create our proxy result object.
/// Nothing to see here, it just forwards the \c CGAM reference into the
/// result.
Result run(Function &) { return Result(*CGAM); }
private:
static char PassID;
const CGSCCAnalysisManager *CGAM;
};
/// \brief Adaptor that maps from a SCC to its functions.
///
/// Designed to allow composition of a FunctionPass(Manager) and
/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
/// to a \c CGSCCAnalysisManager it will run the
/// \c FunctionAnalysisManagerCGSCCProxy analysis prior to running the function
/// pass over the SCC to enable a \c FunctionAnalysisManager to be used
/// within this run safely.
template <typename FunctionPassT> class CGSCCToFunctionPassAdaptor {
public:
explicit CGSCCToFunctionPassAdaptor(FunctionPassT Pass)
: Pass(std::move(Pass)) {}
// We have to explicitly define all the special member functions because MSVC
// refuses to generate them.
CGSCCToFunctionPassAdaptor(const CGSCCToFunctionPassAdaptor &Arg)
: Pass(Arg.Pass) {}
CGSCCToFunctionPassAdaptor(CGSCCToFunctionPassAdaptor &&Arg)
: Pass(std::move(Arg.Pass)) {}
friend void swap(CGSCCToFunctionPassAdaptor &LHS,
CGSCCToFunctionPassAdaptor &RHS) {
using std::swap;
swap(LHS.Pass, RHS.Pass);
}
CGSCCToFunctionPassAdaptor &operator=(CGSCCToFunctionPassAdaptor RHS) {
swap(*this, RHS);
return *this;
}
/// \brief Runs the function pass across every function in the module.
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager *AM) {
FunctionAnalysisManager *FAM = nullptr;
if (AM)
// Setup the function analysis manager from its proxy.
FAM = &AM->getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager();
PreservedAnalyses PA = PreservedAnalyses::all();
for (LazyCallGraph::Node *N : C) {
PreservedAnalyses PassPA = Pass.run(N->getFunction(), FAM);
// We know that the function pass couldn't have invalidated any other
// function's analyses (that's the contract of a function pass), so
// directly handle the function analysis manager's invalidation here.
// Also, update the preserved analyses to reflect that once invalidated
// these can again be preserved.
if (FAM)
PassPA = FAM->invalidate(N->getFunction(), std::move(PassPA));
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
}
// By definition we preserve the proxy. This precludes *any* invalidation
// of function analyses by the proxy, but that's OK because we've taken
// care to invalidate analyses in the function analysis manager
// incrementally above.
// FIXME: We need to update the call graph here to account for any deleted
// edges!
PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
return PA;
}
static StringRef name() { return "CGSCCToFunctionPassAdaptor"; }
private:
FunctionPassT Pass;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename FunctionPassT>
CGSCCToFunctionPassAdaptor<FunctionPassT>
createCGSCCToFunctionPassAdaptor(FunctionPassT Pass) {
return CGSCCToFunctionPassAdaptor<FunctionPassT>(std::move(Pass));
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/IntervalIterator.h | //===- IntervalIterator.h - Interval Iterator Declaration -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines an iterator that enumerates the intervals in a control flow
// graph of some sort. This iterator is parametric, allowing iterator over the
// following types of graphs:
//
// 1. A Function* object, composed of BasicBlock nodes.
// 2. An IntervalPartition& object, composed of Interval nodes.
//
// This iterator is defined to walk the control flow graph, returning intervals
// in depth first order. These intervals are completely filled in except for
// the predecessor fields (the successor information is filled in however).
//
// By default, the intervals created by this iterator are deleted after they
// are no longer any use to the iterator. This behavior can be changed by
// passing a false value into the intervals_begin() function. This causes the
// IOwnMem member to be set, and the intervals to not be deleted.
//
// It is only safe to use this if all of the intervals are deleted by the caller
// and all of the intervals are processed. However, the user of the iterator is
// not allowed to modify or delete the intervals until after the iterator has
// been used completely. The IntervalPartition class uses this functionality.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_INTERVALITERATOR_H
#define LLVM_ANALYSIS_INTERVALITERATOR_H
#include "llvm/Analysis/IntervalPartition.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Function.h"
#include <algorithm>
#include <set>
#include <vector>
namespace llvm {
// getNodeHeader - Given a source graph node and the source graph, return the
// BasicBlock that is the header node. This is the opposite of
// getSourceGraphNode.
//
inline BasicBlock *getNodeHeader(BasicBlock *BB) { return BB; }
inline BasicBlock *getNodeHeader(Interval *I) { return I->getHeaderNode(); }
// getSourceGraphNode - Given a BasicBlock and the source graph, return the
// source graph node that corresponds to the BasicBlock. This is the opposite
// of getNodeHeader.
//
inline BasicBlock *getSourceGraphNode(Function *, BasicBlock *BB) {
return BB;
}
inline Interval *getSourceGraphNode(IntervalPartition *IP, BasicBlock *BB) {
return IP->getBlockInterval(BB);
}
// addNodeToInterval - This method exists to assist the generic ProcessNode
// with the task of adding a node to the new interval, depending on the
// type of the source node. In the case of a CFG source graph (BasicBlock
// case), the BasicBlock itself is added to the interval.
//
inline void addNodeToInterval(Interval *Int, BasicBlock *BB) {
Int->Nodes.push_back(BB);
}
// addNodeToInterval - This method exists to assist the generic ProcessNode
// with the task of adding a node to the new interval, depending on the
// type of the source node. In the case of a CFG source graph (BasicBlock
// case), the BasicBlock itself is added to the interval. In the case of
// an IntervalPartition source graph (Interval case), all of the member
// BasicBlocks are added to the interval.
//
inline void addNodeToInterval(Interval *Int, Interval *I) {
// Add all of the nodes in I as new nodes in Int.
Int->Nodes.insert(Int->Nodes.end(), I->Nodes.begin(), I->Nodes.end());
}
template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>,
class IGT = GraphTraits<Inverse<NodeTy*> > >
class IntervalIterator {
std::vector<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
std::set<BasicBlock*> Visited;
OrigContainer_t *OrigContainer;
bool IOwnMem; // If True, delete intervals when done with them
// See file header for conditions of use
public:
typedef std::forward_iterator_tag iterator_category;
IntervalIterator() {} // End iterator, empty stack
IntervalIterator(Function *M, bool OwnMemory) : IOwnMem(OwnMemory) {
OrigContainer = M;
if (!ProcessInterval(&M->front())) {
llvm_unreachable("ProcessInterval should never fail for first interval!");
}
}
IntervalIterator(IntervalIterator &&x)
: IntStack(std::move(x.IntStack)), Visited(std::move(x.Visited)),
OrigContainer(x.OrigContainer), IOwnMem(x.IOwnMem) {
x.IOwnMem = false;
}
IntervalIterator(IntervalPartition &IP, bool OwnMemory) : IOwnMem(OwnMemory) {
OrigContainer = &IP;
if (!ProcessInterval(IP.getRootInterval())) {
llvm_unreachable("ProcessInterval should never fail for first interval!");
}
}
~IntervalIterator() {
if (IOwnMem)
while (!IntStack.empty()) {
delete operator*();
IntStack.pop_back();
}
}
bool operator==(const IntervalIterator &x) const {
return IntStack == x.IntStack;
}
bool operator!=(const IntervalIterator &x) const { return !(*this == x); }
const Interval *operator*() const { return IntStack.back().first; }
Interval *operator*() { return IntStack.back().first; }
const Interval *operator->() const { return operator*(); }
Interval *operator->() { return operator*(); }
IntervalIterator &operator++() { // Preincrement
assert(!IntStack.empty() && "Attempting to use interval iterator at end!");
do {
// All of the intervals on the stack have been visited. Try visiting
// their successors now.
Interval::succ_iterator &SuccIt = IntStack.back().second,
EndIt = succ_end(IntStack.back().first);
while (SuccIt != EndIt) { // Loop over all interval succs
bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
++SuccIt; // Increment iterator
if (Done) return *this; // Found a new interval! Use it!
}
// Free interval memory... if necessary
if (IOwnMem) delete IntStack.back().first;
// We ran out of successors for this interval... pop off the stack
IntStack.pop_back();
} while (!IntStack.empty());
return *this;
}
IntervalIterator operator++(int) { // Postincrement
IntervalIterator tmp = *this;
++*this;
return tmp;
}
private:
// ProcessInterval - This method is used during the construction of the
// interval graph. It walks through the source graph, recursively creating
// an interval per invocation until the entire graph is covered. This uses
// the ProcessNode method to add all of the nodes to the interval.
//
// This method is templated because it may operate on two different source
// graphs: a basic block graph, or a preexisting interval graph.
//
bool ProcessInterval(NodeTy *Node) {
BasicBlock *Header = getNodeHeader(Node);
if (!Visited.insert(Header).second)
return false;
Interval *Int = new Interval(Header);
// Check all of our successors to see if they are in the interval...
for (typename GT::ChildIteratorType I = GT::child_begin(Node),
E = GT::child_end(Node); I != E; ++I)
ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));
IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
return true;
}
// ProcessNode - This method is called by ProcessInterval to add nodes to the
// interval being constructed, and it is also called recursively as it walks
// the source graph. A node is added to the current interval only if all of
// its predecessors are already in the graph. This also takes care of keeping
// the successor set of an interval up to date.
//
// This method is templated because it may operate on two different source
// graphs: a basic block graph, or a preexisting interval graph.
//
void ProcessNode(Interval *Int, NodeTy *Node) {
assert(Int && "Null interval == bad!");
assert(Node && "Null Node == bad!");
BasicBlock *NodeHeader = getNodeHeader(Node);
if (Visited.count(NodeHeader)) { // Node already been visited?
if (Int->contains(NodeHeader)) { // Already in this interval...
return;
} else { // In other interval, add as successor
if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
Int->Successors.push_back(NodeHeader);
}
} else { // Otherwise, not in interval yet
for (typename IGT::ChildIteratorType I = IGT::child_begin(Node),
E = IGT::child_end(Node); I != E; ++I) {
if (!Int->contains(*I)) { // If pred not in interval, we can't be
if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
Int->Successors.push_back(NodeHeader);
return; // See you later
}
}
// If we get here, then all of the predecessors of BB are in the interval
// already. In this case, we must add BB to the interval!
addNodeToInterval(Int, Node);
Visited.insert(NodeHeader); // The node has now been visited!
if (Int->isSuccessor(NodeHeader)) {
// If we were in the successor list from before... remove from succ list
Int->Successors.erase(std::remove(Int->Successors.begin(),
Int->Successors.end(), NodeHeader),
Int->Successors.end());
}
// Now that we have discovered that Node is in the interval, perhaps some
// of its successors are as well?
for (typename GT::ChildIteratorType It = GT::child_begin(Node),
End = GT::child_end(Node); It != End; ++It)
ProcessNode(Int, getSourceGraphNode(OrigContainer, *It));
}
}
};
typedef IntervalIterator<BasicBlock, Function> function_interval_iterator;
typedef IntervalIterator<Interval, IntervalPartition>
interval_part_interval_iterator;
inline function_interval_iterator intervals_begin(Function *F,
bool DeleteInts = true) {
return function_interval_iterator(F, DeleteInts);
}
inline function_interval_iterator intervals_end(Function *) {
return function_interval_iterator();
}
inline interval_part_interval_iterator
intervals_begin(IntervalPartition &IP, bool DeleteIntervals = true) {
return interval_part_interval_iterator(IP, DeleteIntervals);
}
inline interval_part_interval_iterator intervals_end(IntervalPartition &IP) {
return interval_part_interval_iterator();
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/Interval.h | //===- llvm/Analysis/Interval.h - Interval Class Declaration ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Interval class, which
// represents a set of CFG nodes and is a portion of an interval partition.
//
// Intervals have some interesting and useful properties, including the
// following:
// 1. The header node of an interval dominates all of the elements of the
// interval
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_INTERVAL_H
#define LLVM_ANALYSIS_INTERVAL_H
#include "llvm/ADT/GraphTraits.h"
#include <vector>
namespace llvm {
class BasicBlock;
class raw_ostream;
// //
///////////////////////////////////////////////////////////////////////////////
//
/// Interval Class - An Interval is a set of nodes defined such that every node
/// in the interval has all of its predecessors in the interval (except for the
/// header)
///
class Interval {
/// HeaderNode - The header BasicBlock, which dominates all BasicBlocks in this
/// interval. Also, any loops in this interval must go through the HeaderNode.
///
BasicBlock *HeaderNode;
public:
typedef std::vector<BasicBlock*>::iterator succ_iterator;
typedef std::vector<BasicBlock*>::iterator pred_iterator;
typedef std::vector<BasicBlock*>::iterator node_iterator;
inline Interval(BasicBlock *Header) : HeaderNode(Header) {
Nodes.push_back(Header);
}
inline BasicBlock *getHeaderNode() const { return HeaderNode; }
/// Nodes - The basic blocks in this interval.
///
std::vector<BasicBlock*> Nodes;
/// Successors - List of BasicBlocks that are reachable directly from nodes in
/// this interval, but are not in the interval themselves.
/// These nodes necessarily must be header nodes for other intervals.
///
std::vector<BasicBlock*> Successors;
/// Predecessors - List of BasicBlocks that have this Interval's header block
/// as one of their successors.
///
std::vector<BasicBlock*> Predecessors;
/// contains - Find out if a basic block is in this interval
inline bool contains(BasicBlock *BB) const {
for (unsigned i = 0; i < Nodes.size(); ++i)
if (Nodes[i] == BB) return true;
return false;
// I don't want the dependency on <algorithm>
//return find(Nodes.begin(), Nodes.end(), BB) != Nodes.end();
}
/// isSuccessor - find out if a basic block is a successor of this Interval
inline bool isSuccessor(BasicBlock *BB) const {
for (unsigned i = 0; i < Successors.size(); ++i)
if (Successors[i] == BB) return true;
return false;
// I don't want the dependency on <algorithm>
//return find(Successors.begin(), Successors.end(), BB) != Successors.end();
}
/// Equality operator. It is only valid to compare two intervals from the
/// same partition, because of this, all we have to check is the header node
/// for equality.
///
inline bool operator==(const Interval &I) const {
return HeaderNode == I.HeaderNode;
}
/// isLoop - Find out if there is a back edge in this interval...
bool isLoop() const;
/// print - Show contents in human readable format...
void print(raw_ostream &O) const;
};
/// succ_begin/succ_end - define methods so that Intervals may be used
/// just like BasicBlocks can with the succ_* functions, and *::succ_iterator.
///
inline Interval::succ_iterator succ_begin(Interval *I) {
return I->Successors.begin();
}
inline Interval::succ_iterator succ_end(Interval *I) {
return I->Successors.end();
}
/// pred_begin/pred_end - define methods so that Intervals may be used
/// just like BasicBlocks can with the pred_* functions, and *::pred_iterator.
///
inline Interval::pred_iterator pred_begin(Interval *I) {
return I->Predecessors.begin();
}
inline Interval::pred_iterator pred_end(Interval *I) {
return I->Predecessors.end();
}
template <> struct GraphTraits<Interval*> {
typedef Interval NodeType;
typedef Interval::succ_iterator ChildIteratorType;
static NodeType *getEntryNode(Interval *I) { return I; }
/// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
static inline ChildIteratorType child_begin(NodeType *N) {
return succ_begin(N);
}
static inline ChildIteratorType child_end(NodeType *N) {
return succ_end(N);
}
};
template <> struct GraphTraits<Inverse<Interval*> > {
typedef Interval NodeType;
typedef Interval::pred_iterator ChildIteratorType;
static NodeType *getEntryNode(Inverse<Interval *> G) { return G.Graph; }
static inline ChildIteratorType child_begin(NodeType *N) {
return pred_begin(N);
}
static inline ChildIteratorType child_end(NodeType *N) {
return pred_end(N);
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DxilSimplify.h | //===-- DxilSimplify.h - Simplify Dxil operations ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
// Copyright (C) Microsoft Corporation. All rights reserved.
//===----------------------------------------------------------------------===//
//
// This file declares routines for simplify dxil intrinsics when some operands
// are constants.
//
// We hook into the llvm::SimplifyInstruction so the function
// interfaces are dictated by what llvm provides.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_HLSLDXILSIMPLIFY_H
#define LLVM_ANALYSIS_HLSLDXILSIMPLIFY_H
#include "llvm/ADT/ArrayRef.h"
namespace llvm {
class Function;
class Instruction;
class Value;
} // namespace llvm
namespace hlsl {
/// \brief Given a function and set of arguments, see if we can fold the
/// result as dxil operation.
///
/// If this call could not be simplified returns null.
llvm::Value *SimplifyDxilCall(llvm::Function *F,
llvm::ArrayRef<llvm::Value *> Args,
llvm::Instruction *I, bool MayInsert);
/// CanSimplify
/// Return true on dxil operation function which can be simplified.
bool CanSimplify(const llvm::Function *F);
} // namespace hlsl
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DependenceAnalysis.h | //===-- llvm/Analysis/DependenceAnalysis.h -------------------- -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// DependenceAnalysis is an LLVM pass that analyses dependences between memory
// accesses. Currently, it is an implementation of the approach described in
//
// Practical Dependence Testing
// Goff, Kennedy, Tseng
// PLDI 1991
//
// There's a single entry point that analyzes the dependence between a pair
// of memory references in a function, returning either NULL, for no dependence,
// or a more-or-less detailed description of the dependence between them.
//
// This pass exists to support the DependenceGraph pass. There are two separate
// passes because there's a useful separation of concerns. A dependence exists
// if two conditions are met:
//
// 1) Two instructions reference the same memory location, and
// 2) There is a flow of control leading from one instruction to the other.
//
// DependenceAnalysis attacks the first condition; DependenceGraph will attack
// the second (it's not yet ready).
//
// Please note that this is work in progress and the interface is subject to
// change.
//
// Plausible changes:
// Return a set of more precise dependences instead of just one dependence
// summarizing all.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
#define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Pass.h"
namespace llvm {
class AliasAnalysis;
class Loop;
class LoopInfo;
class ScalarEvolution;
class SCEV;
class SCEVConstant;
class raw_ostream;
/// Dependence - This class represents a dependence between two memory
/// memory references in a function. It contains minimal information and
/// is used in the very common situation where the compiler is unable to
/// determine anything beyond the existence of a dependence; that is, it
/// represents a confused dependence (see also FullDependence). In most
/// cases (for output, flow, and anti dependences), the dependence implies
/// an ordering, where the source must precede the destination; in contrast,
/// input dependences are unordered.
///
/// When a dependence graph is built, each Dependence will be a member of
/// the set of predecessor edges for its destination instruction and a set
/// if successor edges for its source instruction. These sets are represented
/// as singly-linked lists, with the "next" fields stored in the dependence
/// itelf.
class Dependence {
public:
Dependence(Instruction *Source,
Instruction *Destination) :
Src(Source),
Dst(Destination),
NextPredecessor(nullptr),
NextSuccessor(nullptr) {}
virtual ~Dependence() {}
/// Dependence::DVEntry - Each level in the distance/direction vector
/// has a direction (or perhaps a union of several directions), and
/// perhaps a distance.
struct DVEntry {
enum { NONE = 0,
LT = 1,
EQ = 2,
LE = 3,
GT = 4,
NE = 5,
GE = 6,
ALL = 7 };
unsigned char Direction : 3; // Init to ALL, then refine.
bool Scalar : 1; // Init to true.
bool PeelFirst : 1; // Peeling the first iteration will break dependence.
bool PeelLast : 1; // Peeling the last iteration will break the dependence.
bool Splitable : 1; // Splitting the loop will break dependence.
const SCEV *Distance; // NULL implies no distance available.
DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
PeelLast(false), Splitable(false), Distance(nullptr) { }
};
/// getSrc - Returns the source instruction for this dependence.
///
Instruction *getSrc() const { return Src; }
/// getDst - Returns the destination instruction for this dependence.
///
Instruction *getDst() const { return Dst; }
/// isInput - Returns true if this is an input dependence.
///
bool isInput() const;
/// isOutput - Returns true if this is an output dependence.
///
bool isOutput() const;
/// isFlow - Returns true if this is a flow (aka true) dependence.
///
bool isFlow() const;
/// isAnti - Returns true if this is an anti dependence.
///
bool isAnti() const;
/// isOrdered - Returns true if dependence is Output, Flow, or Anti
///
bool isOrdered() const { return isOutput() || isFlow() || isAnti(); }
/// isUnordered - Returns true if dependence is Input
///
bool isUnordered() const { return isInput(); }
/// isLoopIndependent - Returns true if this is a loop-independent
/// dependence.
virtual bool isLoopIndependent() const { return true; }
/// isConfused - Returns true if this dependence is confused
/// (the compiler understands nothing and makes worst-case
/// assumptions).
virtual bool isConfused() const { return true; }
/// isConsistent - Returns true if this dependence is consistent
/// (occurs every time the source and destination are executed).
virtual bool isConsistent() const { return false; }
/// getLevels - Returns the number of common loops surrounding the
/// source and destination of the dependence.
virtual unsigned getLevels() const { return 0; }
/// getDirection - Returns the direction associated with a particular
/// level.
virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; }
/// getDistance - Returns the distance (or NULL) associated with a
/// particular level.
virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
/// isPeelFirst - Returns true if peeling the first iteration from
/// this loop will break this dependence.
virtual bool isPeelFirst(unsigned Level) const { return false; }
/// isPeelLast - Returns true if peeling the last iteration from
/// this loop will break this dependence.
virtual bool isPeelLast(unsigned Level) const { return false; }
/// isSplitable - Returns true if splitting this loop will break
/// the dependence.
virtual bool isSplitable(unsigned Level) const { return false; }
/// isScalar - Returns true if a particular level is scalar; that is,
/// if no subscript in the source or destination mention the induction
/// variable associated with the loop at this level.
virtual bool isScalar(unsigned Level) const;
/// getNextPredecessor - Returns the value of the NextPredecessor
/// field.
const Dependence *getNextPredecessor() const {
return NextPredecessor;
}
/// getNextSuccessor - Returns the value of the NextSuccessor
/// field.
const Dependence *getNextSuccessor() const {
return NextSuccessor;
}
/// setNextPredecessor - Sets the value of the NextPredecessor
/// field.
void setNextPredecessor(const Dependence *pred) {
NextPredecessor = pred;
}
/// setNextSuccessor - Sets the value of the NextSuccessor
/// field.
void setNextSuccessor(const Dependence *succ) {
NextSuccessor = succ;
}
/// dump - For debugging purposes, dumps a dependence to OS.
///
void dump(raw_ostream &OS) const;
private:
Instruction *Src, *Dst;
const Dependence *NextPredecessor, *NextSuccessor;
friend class DependenceAnalysis;
};
/// FullDependence - This class represents a dependence between two memory
/// references in a function. It contains detailed information about the
/// dependence (direction vectors, etc.) and is used when the compiler is
/// able to accurately analyze the interaction of the references; that is,
/// it is not a confused dependence (see Dependence). In most cases
/// (for output, flow, and anti dependences), the dependence implies an
/// ordering, where the source must precede the destination; in contrast,
/// input dependences are unordered.
class FullDependence : public Dependence {
public:
FullDependence(Instruction *Src, Instruction *Dst, bool LoopIndependent,
unsigned Levels);
~FullDependence() override { delete[] DV; }
/// isLoopIndependent - Returns true if this is a loop-independent
/// dependence.
bool isLoopIndependent() const override { return LoopIndependent; }
/// isConfused - Returns true if this dependence is confused
/// (the compiler understands nothing and makes worst-case
/// assumptions).
bool isConfused() const override { return false; }
/// isConsistent - Returns true if this dependence is consistent
/// (occurs every time the source and destination are executed).
bool isConsistent() const override { return Consistent; }
/// getLevels - Returns the number of common loops surrounding the
/// source and destination of the dependence.
unsigned getLevels() const override { return Levels; }
/// getDirection - Returns the direction associated with a particular
/// level.
unsigned getDirection(unsigned Level) const override;
/// getDistance - Returns the distance (or NULL) associated with a
/// particular level.
const SCEV *getDistance(unsigned Level) const override;
/// isPeelFirst - Returns true if peeling the first iteration from
/// this loop will break this dependence.
bool isPeelFirst(unsigned Level) const override;
/// isPeelLast - Returns true if peeling the last iteration from
/// this loop will break this dependence.
bool isPeelLast(unsigned Level) const override;
/// isSplitable - Returns true if splitting the loop will break
/// the dependence.
bool isSplitable(unsigned Level) const override;
/// isScalar - Returns true if a particular level is scalar; that is,
/// if no subscript in the source or destination mention the induction
/// variable associated with the loop at this level.
bool isScalar(unsigned Level) const override;
private:
unsigned short Levels;
bool LoopIndependent;
bool Consistent; // Init to true, then refine.
DVEntry *DV;
friend class DependenceAnalysis;
};
/// DependenceAnalysis - This class is the main dependence-analysis driver.
///
class DependenceAnalysis : public FunctionPass {
void operator=(const DependenceAnalysis &) = delete;
DependenceAnalysis(const DependenceAnalysis &) = delete;
public:
/// depends - Tests for a dependence between the Src and Dst instructions.
/// Returns NULL if no dependence; otherwise, returns a Dependence (or a
/// FullDependence) with as much information as can be gleaned.
/// The flag PossiblyLoopIndependent should be set by the caller
/// if it appears that control flow can reach from Src to Dst
/// without traversing a loop back edge.
std::unique_ptr<Dependence> depends(Instruction *Src,
Instruction *Dst,
bool PossiblyLoopIndependent);
/// getSplitIteration - Give a dependence that's splittable at some
/// particular level, return the iteration that should be used to split
/// the loop.
///
/// Generally, the dependence analyzer will be used to build
/// a dependence graph for a function (basically a map from instructions
/// to dependences). Looking for cycles in the graph shows us loops
/// that cannot be trivially vectorized/parallelized.
///
/// We can try to improve the situation by examining all the dependences
/// that make up the cycle, looking for ones we can break.
/// Sometimes, peeling the first or last iteration of a loop will break
/// dependences, and there are flags for those possibilities.
/// Sometimes, splitting a loop at some other iteration will do the trick,
/// and we've got a flag for that case. Rather than waste the space to
/// record the exact iteration (since we rarely know), we provide
/// a method that calculates the iteration. It's a drag that it must work
/// from scratch, but wonderful in that it's possible.
///
/// Here's an example:
///
/// for (i = 0; i < 10; i++)
/// A[i] = ...
/// ... = A[11 - i]
///
/// There's a loop-carried flow dependence from the store to the load,
/// found by the weak-crossing SIV test. The dependence will have a flag,
/// indicating that the dependence can be broken by splitting the loop.
/// Calling getSplitIteration will return 5.
/// Splitting the loop breaks the dependence, like so:
///
/// for (i = 0; i <= 5; i++)
/// A[i] = ...
/// ... = A[11 - i]
/// for (i = 6; i < 10; i++)
/// A[i] = ...
/// ... = A[11 - i]
///
/// breaks the dependence and allows us to vectorize/parallelize
/// both loops.
const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);
private:
AliasAnalysis *AA;
ScalarEvolution *SE;
LoopInfo *LI;
Function *F;
/// Subscript - This private struct represents a pair of subscripts from
/// a pair of potentially multi-dimensional array references. We use a
/// vector of them to guide subscript partitioning.
struct Subscript {
const SCEV *Src;
const SCEV *Dst;
enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification;
SmallBitVector Loops;
SmallBitVector GroupLoops;
SmallBitVector Group;
};
struct CoefficientInfo {
const SCEV *Coeff;
const SCEV *PosPart;
const SCEV *NegPart;
const SCEV *Iterations;
};
struct BoundInfo {
const SCEV *Iterations;
const SCEV *Upper[8];
const SCEV *Lower[8];
unsigned char Direction;
unsigned char DirSet;
};
/// Constraint - This private class represents a constraint, as defined
/// in the paper
///
/// Practical Dependence Testing
/// Goff, Kennedy, Tseng
/// PLDI 1991
///
/// There are 5 kinds of constraint, in a hierarchy.
/// 1) Any - indicates no constraint, any dependence is possible.
/// 2) Line - A line ax + by = c, where a, b, and c are parameters,
/// representing the dependence equation.
/// 3) Distance - The value d of the dependence distance;
/// 4) Point - A point <x, y> representing the dependence from
/// iteration x to iteration y.
/// 5) Empty - No dependence is possible.
class Constraint {
private:
enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind;
ScalarEvolution *SE;
const SCEV *A;
const SCEV *B;
const SCEV *C;
const Loop *AssociatedLoop;
public:
/// isEmpty - Return true if the constraint is of kind Empty.
bool isEmpty() const { return Kind == Empty; }
/// isPoint - Return true if the constraint is of kind Point.
bool isPoint() const { return Kind == Point; }
/// isDistance - Return true if the constraint is of kind Distance.
bool isDistance() const { return Kind == Distance; }
/// isLine - Return true if the constraint is of kind Line.
/// Since Distance's can also be represented as Lines, we also return
/// true if the constraint is of kind Distance.
bool isLine() const { return Kind == Line || Kind == Distance; }
/// isAny - Return true if the constraint is of kind Any;
bool isAny() const { return Kind == Any; }
/// getX - If constraint is a point <X, Y>, returns X.
/// Otherwise assert.
const SCEV *getX() const;
/// getY - If constraint is a point <X, Y>, returns Y.
/// Otherwise assert.
const SCEV *getY() const;
/// getA - If constraint is a line AX + BY = C, returns A.
/// Otherwise assert.
const SCEV *getA() const;
/// getB - If constraint is a line AX + BY = C, returns B.
/// Otherwise assert.
const SCEV *getB() const;
/// getC - If constraint is a line AX + BY = C, returns C.
/// Otherwise assert.
const SCEV *getC() const;
/// getD - If constraint is a distance, returns D.
/// Otherwise assert.
const SCEV *getD() const;
/// getAssociatedLoop - Returns the loop associated with this constraint.
const Loop *getAssociatedLoop() const;
/// setPoint - Change a constraint to Point.
void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);
/// setLine - Change a constraint to Line.
void setLine(const SCEV *A, const SCEV *B,
const SCEV *C, const Loop *CurrentLoop);
/// setDistance - Change a constraint to Distance.
void setDistance(const SCEV *D, const Loop *CurrentLoop);
/// setEmpty - Change a constraint to Empty.
void setEmpty();
/// setAny - Change a constraint to Any.
void setAny(ScalarEvolution *SE);
/// dump - For debugging purposes. Dumps the constraint
/// out to OS.
void dump(raw_ostream &OS) const;
};
/// establishNestingLevels - Examines the loop nesting of the Src and Dst
/// instructions and establishes their shared loops. Sets the variables
/// CommonLevels, SrcLevels, and MaxLevels.
/// The source and destination instructions needn't be contained in the same
/// loop. The routine establishNestingLevels finds the level of most deeply
/// nested loop that contains them both, CommonLevels. An instruction that's
/// not contained in a loop is at level = 0. MaxLevels is equal to the level
/// of the source plus the level of the destination, minus CommonLevels.
/// This lets us allocate vectors MaxLevels in length, with room for every
/// distinct loop referenced in both the source and destination subscripts.
/// The variable SrcLevels is the nesting depth of the source instruction.
/// It's used to help calculate distinct loops referenced by the destination.
/// Here's the map from loops to levels:
/// 0 - unused
/// 1 - outermost common loop
/// ... - other common loops
/// CommonLevels - innermost common loop
/// ... - loops containing Src but not Dst
/// SrcLevels - innermost loop containing Src but not Dst
/// ... - loops containing Dst but not Src
/// MaxLevels - innermost loop containing Dst but not Src
/// Consider the follow code fragment:
/// for (a = ...) {
/// for (b = ...) {
/// for (c = ...) {
/// for (d = ...) {
/// A[] = ...;
/// }
/// }
/// for (e = ...) {
/// for (f = ...) {
/// for (g = ...) {
/// ... = A[];
/// }
/// }
/// }
/// }
/// }
/// If we're looking at the possibility of a dependence between the store
/// to A (the Src) and the load from A (the Dst), we'll note that they
/// have 2 loops in common, so CommonLevels will equal 2 and the direction
/// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
/// A map from loop names to level indices would look like
/// a - 1
/// b - 2 = CommonLevels
/// c - 3
/// d - 4 = SrcLevels
/// e - 5
/// f - 6
/// g - 7 = MaxLevels
void establishNestingLevels(const Instruction *Src,
const Instruction *Dst);
unsigned CommonLevels, SrcLevels, MaxLevels;
/// mapSrcLoop - Given one of the loops containing the source, return
/// its level index in our numbering scheme.
unsigned mapSrcLoop(const Loop *SrcLoop) const;
/// mapDstLoop - Given one of the loops containing the destination,
/// return its level index in our numbering scheme.
unsigned mapDstLoop(const Loop *DstLoop) const;
/// isLoopInvariant - Returns true if Expression is loop invariant
/// in LoopNest.
bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const;
/// Makes sure all subscript pairs share the same integer type by
/// sign-extending as necessary.
/// Sign-extending a subscript is safe because getelementptr assumes the
/// array subscripts are signed.
void unifySubscriptType(ArrayRef<Subscript *> Pairs);
/// removeMatchingExtensions - Examines a subscript pair.
/// If the source and destination are identically sign (or zero)
/// extended, it strips off the extension in an effort to
/// simplify the actual analysis.
void removeMatchingExtensions(Subscript *Pair);
/// collectCommonLoops - Finds the set of loops from the LoopNest that
/// have a level <= CommonLevels and are referred to by the SCEV Expression.
void collectCommonLoops(const SCEV *Expression,
const Loop *LoopNest,
SmallBitVector &Loops) const;
/// checkSrcSubscript - Examines the SCEV Src, returning true iff it's
/// linear. Collect the set of loops mentioned by Src.
bool checkSrcSubscript(const SCEV *Src,
const Loop *LoopNest,
SmallBitVector &Loops);
/// checkDstSubscript - Examines the SCEV Dst, returning true iff it's
/// linear. Collect the set of loops mentioned by Dst.
bool checkDstSubscript(const SCEV *Dst,
const Loop *LoopNest,
SmallBitVector &Loops);
/// isKnownPredicate - Compare X and Y using the predicate Pred.
/// Basically a wrapper for SCEV::isKnownPredicate,
/// but tries harder, especially in the presence of sign and zero
/// extensions and symbolics.
bool isKnownPredicate(ICmpInst::Predicate Pred,
const SCEV *X,
const SCEV *Y) const;
/// collectUpperBound - All subscripts are the same type (on my machine,
/// an i64). The loop bound may be a smaller type. collectUpperBound
/// find the bound, if available, and zero extends it to the Type T.
/// (I zero extend since the bound should always be >= 0.)
/// If no upper bound is available, return NULL.
const SCEV *collectUpperBound(const Loop *l, Type *T) const;
/// collectConstantUpperBound - Calls collectUpperBound(), then
/// attempts to cast it to SCEVConstant. If the cast fails,
/// returns NULL.
const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const;
/// classifyPair - Examines the subscript pair (the Src and Dst SCEVs)
/// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
/// Collects the associated loops in a set.
Subscript::ClassificationKind classifyPair(const SCEV *Src,
const Loop *SrcLoopNest,
const SCEV *Dst,
const Loop *DstLoopNest,
SmallBitVector &Loops);
/// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence.
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// If the dependence isn't proven to exist,
/// marks the Result as inconsistent.
bool testZIV(const SCEV *Src,
const SCEV *Dst,
FullDependence &Result) const;
/// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence.
/// Things of the form [c1 + a1*i] and [c2 + a2*j], where
/// i and j are induction variables, c1 and c2 are loop invariant,
/// and a1 and a2 are constant.
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// Sets appropriate direction vector entry and, when possible,
/// the distance vector entry.
/// If the dependence isn't proven to exist,
/// marks the Result as inconsistent.
bool testSIV(const SCEV *Src,
const SCEV *Dst,
unsigned &Level,
FullDependence &Result,
Constraint &NewConstraint,
const SCEV *&SplitIter) const;
/// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence.
/// Things of the form [c1 + a1*i] and [c2 + a2*j]
/// where i and j are induction variables, c1 and c2 are loop invariant,
/// and a1 and a2 are constant.
/// With minor algebra, this test can also be used for things like
/// [c1 + a1*i + a2*j][c2].
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// Marks the Result as inconsistent.
bool testRDIV(const SCEV *Src,
const SCEV *Dst,
FullDependence &Result) const;
/// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence.
/// Returns true if dependence disproved.
/// Can sometimes refine direction vectors.
bool testMIV(const SCEV *Src,
const SCEV *Dst,
const SmallBitVector &Loops,
FullDependence &Result) const;
/// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst)
/// for dependence.
/// Things of the form [c1 + a*i] and [c2 + a*i],
/// where i is an induction variable, c1 and c2 are loop invariant,
/// and a is a constant
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// Sets appropriate direction and distance.
bool strongSIVtest(const SCEV *Coeff,
const SCEV *SrcConst,
const SCEV *DstConst,
const Loop *CurrentLoop,
unsigned Level,
FullDependence &Result,
Constraint &NewConstraint) const;
/// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair
/// (Src and Dst) for dependence.
/// Things of the form [c1 + a*i] and [c2 - a*i],
/// where i is an induction variable, c1 and c2 are loop invariant,
/// and a is a constant.
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// Sets appropriate direction entry.
/// Set consistent to false.
/// Marks the dependence as splitable.
bool weakCrossingSIVtest(const SCEV *SrcCoeff,
const SCEV *SrcConst,
const SCEV *DstConst,
const Loop *CurrentLoop,
unsigned Level,
FullDependence &Result,
Constraint &NewConstraint,
const SCEV *&SplitIter) const;
/// ExactSIVtest - Tests the SIV subscript pair
/// (Src and Dst) for dependence.
/// Things of the form [c1 + a1*i] and [c2 + a2*i],
/// where i is an induction variable, c1 and c2 are loop invariant,
/// and a1 and a2 are constant.
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// Sets appropriate direction entry.
/// Set consistent to false.
bool exactSIVtest(const SCEV *SrcCoeff,
const SCEV *DstCoeff,
const SCEV *SrcConst,
const SCEV *DstConst,
const Loop *CurrentLoop,
unsigned Level,
FullDependence &Result,
Constraint &NewConstraint) const;
/// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair
/// (Src and Dst) for dependence.
/// Things of the form [c1] and [c2 + a*i],
/// where i is an induction variable, c1 and c2 are loop invariant,
/// and a is a constant. See also weakZeroDstSIVtest.
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// Sets appropriate direction entry.
/// Set consistent to false.
/// If loop peeling will break the dependence, mark appropriately.
bool weakZeroSrcSIVtest(const SCEV *DstCoeff,
const SCEV *SrcConst,
const SCEV *DstConst,
const Loop *CurrentLoop,
unsigned Level,
FullDependence &Result,
Constraint &NewConstraint) const;
/// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair
/// (Src and Dst) for dependence.
/// Things of the form [c1 + a*i] and [c2],
/// where i is an induction variable, c1 and c2 are loop invariant,
/// and a is a constant. See also weakZeroSrcSIVtest.
/// Returns true if any possible dependence is disproved.
/// If there might be a dependence, returns false.
/// Sets appropriate direction entry.
/// Set consistent to false.
/// If loop peeling will break the dependence, mark appropriately.
bool weakZeroDstSIVtest(const SCEV *SrcCoeff,
const SCEV *SrcConst,
const SCEV *DstConst,
const Loop *CurrentLoop,
unsigned Level,
FullDependence &Result,
Constraint &NewConstraint) const;
/// exactRDIVtest - Tests the RDIV subscript pair for dependence.
/// Things of the form [c1 + a*i] and [c2 + b*j],
/// where i and j are induction variable, c1 and c2 are loop invariant,
/// and a and b are constants.
/// Returns true if any possible dependence is disproved.
/// Marks the result as inconsistent.
/// Works in some cases that symbolicRDIVtest doesn't,
/// and vice versa.
bool exactRDIVtest(const SCEV *SrcCoeff,
const SCEV *DstCoeff,
const SCEV *SrcConst,
const SCEV *DstConst,
const Loop *SrcLoop,
const Loop *DstLoop,
FullDependence &Result) const;
/// symbolicRDIVtest - Tests the RDIV subscript pair for dependence.
/// Things of the form [c1 + a*i] and [c2 + b*j],
/// where i and j are induction variable, c1 and c2 are loop invariant,
/// and a and b are constants.
/// Returns true if any possible dependence is disproved.
/// Marks the result as inconsistent.
/// Works in some cases that exactRDIVtest doesn't,
/// and vice versa. Can also be used as a backup for
/// ordinary SIV tests.
bool symbolicRDIVtest(const SCEV *SrcCoeff,
const SCEV *DstCoeff,
const SCEV *SrcConst,
const SCEV *DstConst,
const Loop *SrcLoop,
const Loop *DstLoop) const;
/// gcdMIVtest - Tests an MIV subscript pair for dependence.
/// Returns true if any possible dependence is disproved.
/// Marks the result as inconsistent.
/// Can sometimes disprove the equal direction for 1 or more loops.
// Can handle some symbolics that even the SIV tests don't get,
/// so we use it as a backup for everything.
bool gcdMIVtest(const SCEV *Src,
const SCEV *Dst,
FullDependence &Result) const;
/// banerjeeMIVtest - Tests an MIV subscript pair for dependence.
/// Returns true if any possible dependence is disproved.
/// Marks the result as inconsistent.
/// Computes directions.
bool banerjeeMIVtest(const SCEV *Src,
const SCEV *Dst,
const SmallBitVector &Loops,
FullDependence &Result) const;
/// collectCoefficientInfo - Walks through the subscript,
/// collecting each coefficient, the associated loop bounds,
/// and recording its positive and negative parts for later use.
CoefficientInfo *collectCoeffInfo(const SCEV *Subscript,
bool SrcFlag,
const SCEV *&Constant) const;
/// getPositivePart - X^+ = max(X, 0).
///
const SCEV *getPositivePart(const SCEV *X) const;
/// getNegativePart - X^- = min(X, 0).
///
const SCEV *getNegativePart(const SCEV *X) const;
/// getLowerBound - Looks through all the bounds info and
/// computes the lower bound given the current direction settings
/// at each level.
const SCEV *getLowerBound(BoundInfo *Bound) const;
/// getUpperBound - Looks through all the bounds info and
/// computes the upper bound given the current direction settings
/// at each level.
const SCEV *getUpperBound(BoundInfo *Bound) const;
/// exploreDirections - Hierarchically expands the direction vector
/// search space, combining the directions of discovered dependences
/// in the DirSet field of Bound. Returns the number of distinct
/// dependences discovered. If the dependence is disproved,
/// it will return 0.
unsigned exploreDirections(unsigned Level,
CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
const SmallBitVector &Loops,
unsigned &DepthExpanded,
const SCEV *Delta) const;
/// testBounds - Returns true iff the current bounds are plausible.
///
bool testBounds(unsigned char DirKind,
unsigned Level,
BoundInfo *Bound,
const SCEV *Delta) const;
/// findBoundsALL - Computes the upper and lower bounds for level K
/// using the * direction. Records them in Bound.
void findBoundsALL(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const;
/// findBoundsLT - Computes the upper and lower bounds for level K
/// using the < direction. Records them in Bound.
void findBoundsLT(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const;
/// findBoundsGT - Computes the upper and lower bounds for level K
/// using the > direction. Records them in Bound.
void findBoundsGT(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const;
/// findBoundsEQ - Computes the upper and lower bounds for level K
/// using the = direction. Records them in Bound.
void findBoundsEQ(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const;
/// intersectConstraints - Updates X with the intersection
/// of the Constraints X and Y. Returns true if X has changed.
bool intersectConstraints(Constraint *X,
const Constraint *Y);
/// propagate - Review the constraints, looking for opportunities
/// to simplify a subscript pair (Src and Dst).
/// Return true if some simplification occurs.
/// If the simplification isn't exact (that is, if it is conservative
/// in terms of dependence), set consistent to false.
bool propagate(const SCEV *&Src,
const SCEV *&Dst,
SmallBitVector &Loops,
SmallVectorImpl<Constraint> &Constraints,
bool &Consistent);
/// propagateDistance - Attempt to propagate a distance
/// constraint into a subscript pair (Src and Dst).
/// Return true if some simplification occurs.
/// If the simplification isn't exact (that is, if it is conservative
/// in terms of dependence), set consistent to false.
bool propagateDistance(const SCEV *&Src,
const SCEV *&Dst,
Constraint &CurConstraint,
bool &Consistent);
/// propagatePoint - Attempt to propagate a point
/// constraint into a subscript pair (Src and Dst).
/// Return true if some simplification occurs.
bool propagatePoint(const SCEV *&Src,
const SCEV *&Dst,
Constraint &CurConstraint);
/// propagateLine - Attempt to propagate a line
/// constraint into a subscript pair (Src and Dst).
/// Return true if some simplification occurs.
/// If the simplification isn't exact (that is, if it is conservative
/// in terms of dependence), set consistent to false.
bool propagateLine(const SCEV *&Src,
const SCEV *&Dst,
Constraint &CurConstraint,
bool &Consistent);
/// findCoefficient - Given a linear SCEV,
/// return the coefficient corresponding to specified loop.
/// If there isn't one, return the SCEV constant 0.
/// For example, given a*i + b*j + c*k, returning the coefficient
/// corresponding to the j loop would yield b.
const SCEV *findCoefficient(const SCEV *Expr,
const Loop *TargetLoop) const;
/// zeroCoefficient - Given a linear SCEV,
/// return the SCEV given by zeroing out the coefficient
/// corresponding to the specified loop.
/// For example, given a*i + b*j + c*k, zeroing the coefficient
/// corresponding to the j loop would yield a*i + c*k.
const SCEV *zeroCoefficient(const SCEV *Expr,
const Loop *TargetLoop) const;
/// addToCoefficient - Given a linear SCEV Expr,
/// return the SCEV given by adding some Value to the
/// coefficient corresponding to the specified TargetLoop.
/// For example, given a*i + b*j + c*k, adding 1 to the coefficient
/// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
const SCEV *addToCoefficient(const SCEV *Expr,
const Loop *TargetLoop,
const SCEV *Value) const;
/// updateDirection - Update direction vector entry
/// based on the current constraint.
void updateDirection(Dependence::DVEntry &Level,
const Constraint &CurConstraint) const;
bool tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV,
SmallVectorImpl<Subscript> &Pair,
const SCEV *ElementSize);
public:
static char ID; // Class identification, replacement for typeinfo
DependenceAnalysis() : FunctionPass(ID) {
initializeDependenceAnalysisPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &) const override;
void print(raw_ostream &, const Module * = nullptr) const override;
}; // class DependenceAnalysis
/// createDependenceAnalysisPass - This creates an instance of the
/// DependenceAnalysis pass.
FunctionPass *createDependenceAnalysisPass();
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/Trace.h | //===- llvm/Analysis/Trace.h - Represent one trace of LLVM code -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class represents a single trace of LLVM basic blocks. A trace is a
// single entry, multiple exit, region of code that is often hot. Trace-based
// optimizations treat traces almost like they are a large, strange, basic
// block: because the trace path is assumed to be hot, optimizations for the
// fall-through path are made at the expense of the non-fall-through paths.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_TRACE_H
#define LLVM_ANALYSIS_TRACE_H
#include <cassert>
#include <vector>
namespace llvm {
class BasicBlock;
class Function;
class Module;
class raw_ostream;
class Trace {
typedef std::vector<BasicBlock *> BasicBlockListType;
BasicBlockListType BasicBlocks;
public:
/// Trace ctor - Make a new trace from a vector of basic blocks,
/// residing in the function which is the parent of the first
/// basic block in the vector.
///
Trace(const std::vector<BasicBlock *> &vBB) : BasicBlocks (vBB) {}
/// getEntryBasicBlock - Return the entry basic block (first block)
/// of the trace.
///
BasicBlock *getEntryBasicBlock () const { return BasicBlocks[0]; }
/// operator[]/getBlock - Return basic block N in the trace.
///
BasicBlock *operator[](unsigned i) const { return BasicBlocks[i]; }
BasicBlock *getBlock(unsigned i) const { return BasicBlocks[i]; }
/// getFunction - Return this trace's parent function.
///
Function *getFunction () const;
/// getModule - Return this Module that contains this trace's parent
/// function.
///
Module *getModule () const;
/// getBlockIndex - Return the index of the specified basic block in the
/// trace, or -1 if it is not in the trace.
int getBlockIndex(const BasicBlock *X) const {
for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
if (BasicBlocks[i] == X)
return i;
return -1;
}
/// contains - Returns true if this trace contains the given basic
/// block.
///
bool contains(const BasicBlock *X) const {
return getBlockIndex(X) != -1;
}
/// Returns true if B1 occurs before B2 in the trace, or if it is the same
/// block as B2.. Both blocks must be in the trace.
///
bool dominates(const BasicBlock *B1, const BasicBlock *B2) const {
int B1Idx = getBlockIndex(B1), B2Idx = getBlockIndex(B2);
assert(B1Idx != -1 && B2Idx != -1 && "Block is not in the trace!");
return B1Idx <= B2Idx;
}
// BasicBlock iterators...
typedef BasicBlockListType::iterator iterator;
typedef BasicBlockListType::const_iterator const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
iterator begin() { return BasicBlocks.begin(); }
const_iterator begin() const { return BasicBlocks.begin(); }
iterator end () { return BasicBlocks.end(); }
const_iterator end () const { return BasicBlocks.end(); }
reverse_iterator rbegin() { return BasicBlocks.rbegin(); }
const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
reverse_iterator rend () { return BasicBlocks.rend(); }
const_reverse_iterator rend () const { return BasicBlocks.rend(); }
unsigned size() const { return BasicBlocks.size(); }
bool empty() const { return BasicBlocks.empty(); }
iterator erase(iterator q) { return BasicBlocks.erase (q); }
iterator erase(iterator q1, iterator q2) { return BasicBlocks.erase (q1, q2); }
/// print - Write trace to output stream.
///
void print(raw_ostream &O) const;
/// dump - Debugger convenience method; writes trace to standard error
/// output stream.
///
void dump() const;
};
} // end namespace llvm
#endif // LLVM_ANALYSIS_TRACE_H
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/VectorUtils.h | //===- llvm/Transforms/Utils/VectorUtils.h - Vector utilities -*- C++ -*-=====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines some vectorizer utilities.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
#define LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
namespace llvm {
class GetElementPtrInst;
class Loop;
class ScalarEvolution;
class Type;
class Value;
/// \brief Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all
/// scalars for the scalar form of the intrinsic and all vectors for
/// the vector form of the intrinsic.
bool isTriviallyVectorizable(Intrinsic::ID ID);
/// \brief Identifies if the intrinsic has a scalar operand. It checks for
/// ctlz,cttz and powi special intrinsics whose argument is scalar.
bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
/// \brief Identify if call has a unary float signature
/// It returns input intrinsic ID if call has a single argument,
/// argument type and call instruction type should be floating
/// point type and call should only reads memory.
/// else return not_intrinsic.
Intrinsic::ID checkUnaryFloatSignature(const CallInst &I,
Intrinsic::ID ValidIntrinsicID);
/// \brief Identify if call has a binary float signature
/// It returns input intrinsic ID if call has two arguments,
/// arguments type and call instruction type should be floating
/// point type and call should only reads memory.
/// else return not_intrinsic.
Intrinsic::ID checkBinaryFloatSignature(const CallInst &I,
Intrinsic::ID ValidIntrinsicID);
/// \brief Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its intrinsic ID, in case it does not found it return not_intrinsic.
Intrinsic::ID getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI);
/// \brief Find the operand of the GEP that should be checked for consecutive
/// stores. This ignores trailing indices that have no effect on the final
/// pointer.
unsigned getGEPInductionOperand(const GetElementPtrInst *Gep);
/// \brief If the argument is a GEP, then returns the operand identified by
/// getGEPInductionOperand. However, if there is some other non-loop-invariant
/// operand, it returns that instead.
Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
/// \brief If a value has only one user that is a CastInst, return it.
Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty);
/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
/// \brief Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
Value *findScalarElement(Value *V, unsigned EltNo);
} // llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/RegionInfo.h | //===- RegionInfo.h - SESE region analysis ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Calculate a program structure tree built out of single entry single exit
// regions.
// The basic ideas are taken from "The Program Structure Tree - Richard Johnson,
// David Pearson, Keshav Pingali - 1994", however enriched with ideas from "The
// Refined Process Structure Tree - Jussi Vanhatalo, Hagen Voelyer, Jana
// Koehler - 2009".
// The algorithm to calculate these data structures however is completely
// different, as it takes advantage of existing information already available
// in (Post)dominace tree and dominance frontier passes. This leads to a simpler
// and in practice hopefully better performing algorithm. The runtime of the
// algorithms described in the papers above are both linear in graph size,
// O(V+E), whereas this algorithm is not, as the dominance frontier information
// itself is not, but in practice runtime seems to be in the order of magnitude
// of dominance tree calculation.
//
// WARNING: LLVM is generally very concerned about compile time such that
// the use of additional analysis passes in the default
// optimization sequence is avoided as much as possible.
// Specifically, if you do not need the RegionInfo, but dominance
// information could be sufficient please base your work only on
// the dominator tree. Most passes maintain it, such that using
// it has often near zero cost. In contrast RegionInfo is by
// default not available, is not maintained by existing
// transformations and there is no intention to do so.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_REGIONINFO_H
#define LLVM_ANALYSIS_REGIONINFO_H
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Dominators.h"
#include <map>
#include <memory>
#include <set>
namespace llvm {
// RegionTraits - Class to be specialized for different users of RegionInfo
// (i.e. BasicBlocks or MachineBasicBlocks). This is only to avoid needing to
// pass around an unreasonable number of template parameters.
template <class FuncT_>
struct RegionTraits {
// FuncT
// BlockT
// RegionT
// RegionNodeT
// RegionInfoT
typedef typename FuncT_::UnknownRegionTypeError BrokenT;
};
class DominatorTree;
class DominanceFrontier;
class Loop;
class LoopInfo;
struct PostDominatorTree;
class raw_ostream;
class Region;
template <class RegionTr>
class RegionBase;
class RegionNode;
class RegionInfo;
template <class RegionTr>
class RegionInfoBase;
template <>
struct RegionTraits<Function> {
typedef Function FuncT;
typedef BasicBlock BlockT;
typedef Region RegionT;
typedef RegionNode RegionNodeT;
typedef RegionInfo RegionInfoT;
typedef DominatorTree DomTreeT;
typedef DomTreeNode DomTreeNodeT;
typedef DominanceFrontier DomFrontierT;
typedef PostDominatorTree PostDomTreeT;
typedef Instruction InstT;
typedef Loop LoopT;
typedef LoopInfo LoopInfoT;
static unsigned getNumSuccessors(BasicBlock *BB) {
return BB->getTerminator()->getNumSuccessors();
}
};
/// @brief Marker class to iterate over the elements of a Region in flat mode.
///
/// The class is used to either iterate in Flat mode or by not using it to not
/// iterate in Flat mode. During a Flat mode iteration all Regions are entered
/// and the iteration returns every BasicBlock. If the Flat mode is not
/// selected for SubRegions just one RegionNode containing the subregion is
/// returned.
template <class GraphType>
class FlatIt {};
/// @brief A RegionNode represents a subregion or a BasicBlock that is part of a
/// Region.
template <class Tr>
class RegionNodeBase {
friend class RegionBase<Tr>;
public:
typedef typename Tr::BlockT BlockT;
typedef typename Tr::RegionT RegionT;
private:
RegionNodeBase(const RegionNodeBase &) = delete;
const RegionNodeBase &operator=(const RegionNodeBase &) = delete;
/// This is the entry basic block that starts this region node. If this is a
/// BasicBlock RegionNode, then entry is just the basic block, that this
/// RegionNode represents. Otherwise it is the entry of this (Sub)RegionNode.
///
/// In the BBtoRegionNode map of the parent of this node, BB will always map
/// to this node no matter which kind of node this one is.
///
/// The node can hold either a Region or a BasicBlock.
/// Use one bit to save, if this RegionNode is a subregion or BasicBlock
/// RegionNode.
PointerIntPair<BlockT *, 1, bool> entry;
/// @brief The parent Region of this RegionNode.
/// @see getParent()
RegionT *parent;
protected:
/// @brief Create a RegionNode.
///
/// @param Parent The parent of this RegionNode.
/// @param Entry The entry BasicBlock of the RegionNode. If this
/// RegionNode represents a BasicBlock, this is the
/// BasicBlock itself. If it represents a subregion, this
/// is the entry BasicBlock of the subregion.
/// @param isSubRegion If this RegionNode represents a SubRegion.
inline RegionNodeBase(RegionT *Parent, BlockT *Entry,
bool isSubRegion = false)
: entry(Entry, isSubRegion), parent(Parent) {}
public:
/// @brief Get the parent Region of this RegionNode.
///
/// The parent Region is the Region this RegionNode belongs to. If for
/// example a BasicBlock is element of two Regions, there exist two
/// RegionNodes for this BasicBlock. Each with the getParent() function
/// pointing to the Region this RegionNode belongs to.
///
/// @return Get the parent Region of this RegionNode.
inline RegionT *getParent() const { return parent; }
/// @brief Get the entry BasicBlock of this RegionNode.
///
/// If this RegionNode represents a BasicBlock this is just the BasicBlock
/// itself, otherwise we return the entry BasicBlock of the Subregion
///
/// @return The entry BasicBlock of this RegionNode.
inline BlockT *getEntry() const { return entry.getPointer(); }
/// @brief Get the content of this RegionNode.
///
/// This can be either a BasicBlock or a subregion. Before calling getNodeAs()
/// check the type of the content with the isSubRegion() function call.
///
/// @return The content of this RegionNode.
template <class T> inline T *getNodeAs() const;
/// @brief Is this RegionNode a subregion?
///
/// @return True if it contains a subregion. False if it contains a
/// BasicBlock.
inline bool isSubRegion() const { return entry.getInt(); }
};
//===----------------------------------------------------------------------===//
/// @brief A single entry single exit Region.
///
/// A Region is a connected subgraph of a control flow graph that has exactly
/// two connections to the remaining graph. It can be used to analyze or
/// optimize parts of the control flow graph.
///
/// A <em> simple Region </em> is connected to the remaining graph by just two
/// edges. One edge entering the Region and another one leaving the Region.
///
/// An <em> extended Region </em> (or just Region) is a subgraph that can be
/// transform into a simple Region. The transformation is done by adding
/// BasicBlocks that merge several entry or exit edges so that after the merge
/// just one entry and one exit edge exists.
///
/// The \e Entry of a Region is the first BasicBlock that is passed after
/// entering the Region. It is an element of the Region. The entry BasicBlock
/// dominates all BasicBlocks in the Region.
///
/// The \e Exit of a Region is the first BasicBlock that is passed after
/// leaving the Region. It is not an element of the Region. The exit BasicBlock,
/// postdominates all BasicBlocks in the Region.
///
/// A <em> canonical Region </em> cannot be constructed by combining smaller
/// Regions.
///
/// Region A is the \e parent of Region B, if B is completely contained in A.
///
/// Two canonical Regions either do not intersect at all or one is
/// the parent of the other.
///
/// The <em> Program Structure Tree</em> is a graph (V, E) where V is the set of
/// Regions in the control flow graph and E is the \e parent relation of these
/// Regions.
///
/// Example:
///
/// \verbatim
/// A simple control flow graph, that contains two regions.
///
/// 1
/// / |
/// 2 |
/// / \ 3
/// 4 5 |
/// | | |
/// 6 7 8
/// \ | /
/// \ |/ Region A: 1 -> 9 {1,2,3,4,5,6,7,8}
/// 9 Region B: 2 -> 9 {2,4,5,6,7}
/// \endverbatim
///
/// You can obtain more examples by either calling
///
/// <tt> "opt -regions -analyze anyprogram.ll" </tt>
/// or
/// <tt> "opt -view-regions-only anyprogram.ll" </tt>
///
/// on any LLVM file you are interested in.
///
/// The first call returns a textual representation of the program structure
/// tree, the second one creates a graphical representation using graphviz.
template <class Tr>
class RegionBase : public RegionNodeBase<Tr> {
typedef typename Tr::FuncT FuncT;
typedef typename Tr::BlockT BlockT;
typedef typename Tr::RegionInfoT RegionInfoT;
typedef typename Tr::RegionT RegionT;
typedef typename Tr::RegionNodeT RegionNodeT;
typedef typename Tr::DomTreeT DomTreeT;
typedef typename Tr::LoopT LoopT;
typedef typename Tr::LoopInfoT LoopInfoT;
typedef typename Tr::InstT InstT;
typedef GraphTraits<BlockT *> BlockTraits;
typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
typedef typename InvBlockTraits::ChildIteratorType PredIterTy;
friend class RegionInfoBase<Tr>;
RegionBase(const RegionBase &) = delete;
const RegionBase &operator=(const RegionBase &) = delete;
// Information necessary to manage this Region.
RegionInfoT *RI;
DomTreeT *DT;
// The exit BasicBlock of this region.
// (The entry BasicBlock is part of RegionNode)
BlockT *exit;
typedef std::vector<std::unique_ptr<RegionT>> RegionSet;
// The subregions of this region.
RegionSet children;
typedef std::map<BlockT *, RegionNodeT *> BBNodeMapT;
// Save the BasicBlock RegionNodes that are element of this Region.
mutable BBNodeMapT BBNodeMap;
/// verifyBBInRegion - Check if a BB is in this Region. This check also works
/// if the region is incorrectly built. (EXPENSIVE!)
void verifyBBInRegion(BlockT *BB) const;
/// verifyWalk - Walk over all the BBs of the region starting from BB and
/// verify that all reachable basic blocks are elements of the region.
/// (EXPENSIVE!)
void verifyWalk(BlockT *BB, std::set<BlockT *> *visitedBB) const;
/// verifyRegionNest - Verify if the region and its children are valid
/// regions (EXPENSIVE!)
void verifyRegionNest() const;
public:
/// @brief Create a new region.
///
/// @param Entry The entry basic block of the region.
/// @param Exit The exit basic block of the region.
/// @param RI The region info object that is managing this region.
/// @param DT The dominator tree of the current function.
/// @param Parent The surrounding region or NULL if this is a top level
/// region.
RegionBase(BlockT *Entry, BlockT *Exit, RegionInfoT *RI, DomTreeT *DT,
RegionT *Parent = nullptr);
/// Delete the Region and all its subregions.
~RegionBase();
/// @brief Get the entry BasicBlock of the Region.
/// @return The entry BasicBlock of the region.
BlockT *getEntry() const {
return RegionNodeBase<Tr>::getEntry();
}
/// @brief Replace the entry basic block of the region with the new basic
/// block.
///
/// @param BB The new entry basic block of the region.
void replaceEntry(BlockT *BB);
/// @brief Replace the exit basic block of the region with the new basic
/// block.
///
/// @param BB The new exit basic block of the region.
void replaceExit(BlockT *BB);
/// @brief Recursively replace the entry basic block of the region.
///
/// This function replaces the entry basic block with a new basic block. It
/// also updates all child regions that have the same entry basic block as
/// this region.
///
/// @param NewEntry The new entry basic block.
void replaceEntryRecursive(BlockT *NewEntry);
/// @brief Recursively replace the exit basic block of the region.
///
/// This function replaces the exit basic block with a new basic block. It
/// also updates all child regions that have the same exit basic block as
/// this region.
///
/// @param NewExit The new exit basic block.
void replaceExitRecursive(BlockT *NewExit);
/// @brief Get the exit BasicBlock of the Region.
/// @return The exit BasicBlock of the Region, NULL if this is the TopLevel
/// Region.
BlockT *getExit() const { return exit; }
/// @brief Get the parent of the Region.
/// @return The parent of the Region or NULL if this is a top level
/// Region.
RegionT *getParent() const {
return RegionNodeBase<Tr>::getParent();
}
/// @brief Get the RegionNode representing the current Region.
/// @return The RegionNode representing the current Region.
RegionNodeT *getNode() const {
return const_cast<RegionNodeT *>(
reinterpret_cast<const RegionNodeT *>(this));
}
/// @brief Get the nesting level of this Region.
///
/// An toplevel Region has depth 0.
///
/// @return The depth of the region.
unsigned getDepth() const;
/// @brief Check if a Region is the TopLevel region.
///
/// The toplevel region represents the whole function.
bool isTopLevelRegion() const { return exit == nullptr; }
/// @brief Return a new (non-canonical) region, that is obtained by joining
/// this region with its predecessors.
///
/// @return A region also starting at getEntry(), but reaching to the next
/// basic block that forms with getEntry() a (non-canonical) region.
/// NULL if such a basic block does not exist.
RegionT *getExpandedRegion() const;
/// @brief Return the first block of this region's single entry edge,
/// if existing.
///
/// @return The BasicBlock starting this region's single entry edge,
/// else NULL.
BlockT *getEnteringBlock() const;
/// @brief Return the first block of this region's single exit edge,
/// if existing.
///
/// @return The BasicBlock starting this region's single exit edge,
/// else NULL.
BlockT *getExitingBlock() const;
/// @brief Is this a simple region?
///
/// A region is simple if it has exactly one exit and one entry edge.
///
/// @return True if the Region is simple.
bool isSimple() const;
/// @brief Returns the name of the Region.
/// @return The Name of the Region.
std::string getNameStr() const;
/// @brief Return the RegionInfo object, that belongs to this Region.
RegionInfoT *getRegionInfo() const { return RI; }
/// PrintStyle - Print region in difference ways.
enum PrintStyle { PrintNone, PrintBB, PrintRN };
/// @brief Print the region.
///
/// @param OS The output stream the Region is printed to.
/// @param printTree Print also the tree of subregions.
/// @param level The indentation level used for printing.
void print(raw_ostream &OS, bool printTree = true, unsigned level = 0,
PrintStyle Style = PrintNone) const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// @brief Print the region to stderr.
void dump() const;
#endif
/// @brief Check if the region contains a BasicBlock.
///
/// @param BB The BasicBlock that might be contained in this Region.
/// @return True if the block is contained in the region otherwise false.
bool contains(const BlockT *BB) const;
/// @brief Check if the region contains another region.
///
/// @param SubRegion The region that might be contained in this Region.
/// @return True if SubRegion is contained in the region otherwise false.
bool contains(const RegionT *SubRegion) const {
// Toplevel Region.
if (!getExit())
return true;
return contains(SubRegion->getEntry()) &&
(contains(SubRegion->getExit()) ||
SubRegion->getExit() == getExit());
}
/// @brief Check if the region contains an Instruction.
///
/// @param Inst The Instruction that might be contained in this region.
/// @return True if the Instruction is contained in the region otherwise
/// false.
bool contains(const InstT *Inst) const { return contains(Inst->getParent()); }
/// @brief Check if the region contains a loop.
///
/// @param L The loop that might be contained in this region.
/// @return True if the loop is contained in the region otherwise false.
/// In case a NULL pointer is passed to this function the result
/// is false, except for the region that describes the whole function.
/// In that case true is returned.
bool contains(const LoopT *L) const;
/// @brief Get the outermost loop in the region that contains a loop.
///
/// Find for a Loop L the outermost loop OuterL that is a parent loop of L
/// and is itself contained in the region.
///
/// @param L The loop the lookup is started.
/// @return The outermost loop in the region, NULL if such a loop does not
/// exist or if the region describes the whole function.
LoopT *outermostLoopInRegion(LoopT *L) const;
/// @brief Get the outermost loop in the region that contains a basic block.
///
/// Find for a basic block BB the outermost loop L that contains BB and is
/// itself contained in the region.
///
/// @param LI A pointer to a LoopInfo analysis.
/// @param BB The basic block surrounded by the loop.
/// @return The outermost loop in the region, NULL if such a loop does not
/// exist or if the region describes the whole function.
LoopT *outermostLoopInRegion(LoopInfoT *LI, BlockT *BB) const;
/// @brief Get the subregion that starts at a BasicBlock
///
/// @param BB The BasicBlock the subregion should start.
/// @return The Subregion if available, otherwise NULL.
RegionT *getSubRegionNode(BlockT *BB) const;
/// @brief Get the RegionNode for a BasicBlock
///
/// @param BB The BasicBlock at which the RegionNode should start.
/// @return If available, the RegionNode that represents the subregion
/// starting at BB. If no subregion starts at BB, the RegionNode
/// representing BB.
RegionNodeT *getNode(BlockT *BB) const;
/// @brief Get the BasicBlock RegionNode for a BasicBlock
///
/// @param BB The BasicBlock for which the RegionNode is requested.
/// @return The RegionNode representing the BB.
RegionNodeT *getBBNode(BlockT *BB) const;
/// @brief Add a new subregion to this Region.
///
/// @param SubRegion The new subregion that will be added.
/// @param moveChildren Move the children of this region, that are also
/// contained in SubRegion into SubRegion.
void addSubRegion(RegionT *SubRegion, bool moveChildren = false);
/// @brief Remove a subregion from this Region.
///
/// The subregion is not deleted, as it will probably be inserted into another
/// region.
/// @param SubRegion The SubRegion that will be removed.
RegionT *removeSubRegion(RegionT *SubRegion);
/// @brief Move all direct child nodes of this Region to another Region.
///
/// @param To The Region the child nodes will be transferred to.
void transferChildrenTo(RegionT *To);
/// @brief Verify if the region is a correct region.
///
/// Check if this is a correctly build Region. This is an expensive check, as
/// the complete CFG of the Region will be walked.
void verifyRegion() const;
/// @brief Clear the cache for BB RegionNodes.
///
/// After calling this function the BasicBlock RegionNodes will be stored at
/// different memory locations. RegionNodes obtained before this function is
/// called are therefore not comparable to RegionNodes abtained afterwords.
void clearNodeCache();
/// @name Subregion Iterators
///
/// These iterators iterator over all subregions of this Region.
//@{
typedef typename RegionSet::iterator iterator;
typedef typename RegionSet::const_iterator const_iterator;
iterator begin() { return children.begin(); }
iterator end() { return children.end(); }
const_iterator begin() const { return children.begin(); }
const_iterator end() const { return children.end(); }
//@}
/// @name BasicBlock Iterators
///
/// These iterators iterate over all BasicBlocks that are contained in this
/// Region. The iterator also iterates over BasicBlocks that are elements of
/// a subregion of this Region. It is therefore called a flat iterator.
//@{
template <bool IsConst>
class block_iterator_wrapper
: public df_iterator<
typename std::conditional<IsConst, const BlockT, BlockT>::type *> {
typedef df_iterator<
typename std::conditional<IsConst, const BlockT, BlockT>::type *> super;
public:
typedef block_iterator_wrapper<IsConst> Self;
typedef typename super::pointer pointer;
// Construct the begin iterator.
block_iterator_wrapper(pointer Entry, pointer Exit)
: super(df_begin(Entry)) {
// Mark the exit of the region as visited, so that the children of the
// exit and the exit itself, i.e. the block outside the region will never
// be visited.
super::Visited.insert(Exit);
}
// Construct the end iterator.
block_iterator_wrapper() : super(df_end<pointer>((BlockT *)nullptr)) {}
/*implicit*/ block_iterator_wrapper(super I) : super(I) {}
// FIXME: Even a const_iterator returns a non-const BasicBlock pointer.
// This was introduced for backwards compatibility, but should
// be removed as soon as all users are fixed.
BlockT *operator*() const {
return const_cast<BlockT *>(super::operator*());
}
};
typedef block_iterator_wrapper<false> block_iterator;
typedef block_iterator_wrapper<true> const_block_iterator;
block_iterator block_begin() { return block_iterator(getEntry(), getExit()); }
block_iterator block_end() { return block_iterator(); }
const_block_iterator block_begin() const {
return const_block_iterator(getEntry(), getExit());
}
const_block_iterator block_end() const { return const_block_iterator(); }
typedef iterator_range<block_iterator> block_range;
typedef iterator_range<const_block_iterator> const_block_range;
/// @brief Returns a range view of the basic blocks in the region.
inline block_range blocks() {
return block_range(block_begin(), block_end());
}
/// @brief Returns a range view of the basic blocks in the region.
///
/// This is the 'const' version of the range view.
inline const_block_range blocks() const {
return const_block_range(block_begin(), block_end());
}
//@}
/// @name Element Iterators
///
/// These iterators iterate over all BasicBlock and subregion RegionNodes that
/// are direct children of this Region. It does not iterate over any
/// RegionNodes that are also element of a subregion of this Region.
//@{
typedef df_iterator<RegionNodeT *, SmallPtrSet<RegionNodeT *, 8>, false,
GraphTraits<RegionNodeT *>> element_iterator;
typedef df_iterator<const RegionNodeT *, SmallPtrSet<const RegionNodeT *, 8>,
false,
GraphTraits<const RegionNodeT *>> const_element_iterator;
element_iterator element_begin();
element_iterator element_end();
const_element_iterator element_begin() const;
const_element_iterator element_end() const;
//@}
};
/// Print a RegionNode.
template <class Tr>
inline raw_ostream &operator<<(raw_ostream &OS, const RegionNodeBase<Tr> &Node);
// //
///////////////////////////////////////////////////////////////////////////////
/// @brief Analysis that detects all canonical Regions.
///
/// The RegionInfo pass detects all canonical regions in a function. The Regions
/// are connected using the parent relation. This builds a Program Structure
/// Tree.
template <class Tr>
class RegionInfoBase {
typedef typename Tr::BlockT BlockT;
typedef typename Tr::FuncT FuncT;
typedef typename Tr::RegionT RegionT;
typedef typename Tr::RegionInfoT RegionInfoT;
typedef typename Tr::DomTreeT DomTreeT;
typedef typename Tr::DomTreeNodeT DomTreeNodeT;
typedef typename Tr::PostDomTreeT PostDomTreeT;
typedef typename Tr::DomFrontierT DomFrontierT;
typedef GraphTraits<BlockT *> BlockTraits;
typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
typedef typename InvBlockTraits::ChildIteratorType PredIterTy;
friend class RegionInfo;
friend class MachineRegionInfo;
typedef DenseMap<BlockT *, BlockT *> BBtoBBMap;
typedef DenseMap<BlockT *, RegionT *> BBtoRegionMap;
typedef SmallPtrSet<RegionT *, 4> RegionSet;
RegionInfoBase();
virtual ~RegionInfoBase();
RegionInfoBase(const RegionInfoBase &) = delete;
const RegionInfoBase &operator=(const RegionInfoBase &) = delete;
DomTreeT *DT;
PostDomTreeT *PDT;
DomFrontierT *DF;
/// The top level region.
RegionT *TopLevelRegion;
private:
/// Map every BB to the smallest region, that contains BB.
BBtoRegionMap BBtoRegion;
// isCommonDomFrontier - Returns true if BB is in the dominance frontier of
// entry, because it was inherited from exit. In the other case there is an
// edge going from entry to BB without passing exit.
bool isCommonDomFrontier(BlockT *BB, BlockT *entry, BlockT *exit) const;
// isRegion - Check if entry and exit surround a valid region, based on
// dominance tree and dominance frontier.
bool isRegion(BlockT *entry, BlockT *exit) const;
// insertShortCut - Saves a shortcut pointing from entry to exit.
// This function may extend this shortcut if possible.
void insertShortCut(BlockT *entry, BlockT *exit, BBtoBBMap *ShortCut) const;
// getNextPostDom - Returns the next BB that postdominates N, while skipping
// all post dominators that cannot finish a canonical region.
DomTreeNodeT *getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const;
// isTrivialRegion - A region is trivial, if it contains only one BB.
bool isTrivialRegion(BlockT *entry, BlockT *exit) const;
// createRegion - Creates a single entry single exit region.
RegionT *createRegion(BlockT *entry, BlockT *exit);
// findRegionsWithEntry - Detect all regions starting with bb 'entry'.
void findRegionsWithEntry(BlockT *entry, BBtoBBMap *ShortCut);
// scanForRegions - Detects regions in F.
void scanForRegions(FuncT &F, BBtoBBMap *ShortCut);
// getTopMostParent - Get the top most parent with the same entry block.
RegionT *getTopMostParent(RegionT *region);
// buildRegionsTree - build the region hierarchy after all region detected.
void buildRegionsTree(DomTreeNodeT *N, RegionT *region);
// updateStatistics - Update statistic about created regions.
virtual void updateStatistics(RegionT *R) = 0;
// calculate - detect all regions in function and build the region tree.
void calculate(FuncT &F);
public:
static bool VerifyRegionInfo;
static typename RegionT::PrintStyle printStyle;
void print(raw_ostream &OS) const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void dump() const;
#endif
void releaseMemory();
/// @brief Get the smallest region that contains a BasicBlock.
///
/// @param BB The basic block.
/// @return The smallest region, that contains BB or NULL, if there is no
/// region containing BB.
RegionT *getRegionFor(BlockT *BB) const;
/// @brief Set the smallest region that surrounds a basic block.
///
/// @param BB The basic block surrounded by a region.
/// @param R The smallest region that surrounds BB.
void setRegionFor(BlockT *BB, RegionT *R);
/// @brief A shortcut for getRegionFor().
///
/// @param BB The basic block.
/// @return The smallest region, that contains BB or NULL, if there is no
/// region containing BB.
RegionT *operator[](BlockT *BB) const;
/// @brief Return the exit of the maximal refined region, that starts at a
/// BasicBlock.
///
/// @param BB The BasicBlock the refined region starts.
BlockT *getMaxRegionExit(BlockT *BB) const;
/// @brief Find the smallest region that contains two regions.
///
/// @param A The first region.
/// @param B The second region.
/// @return The smallest region containing A and B.
RegionT *getCommonRegion(RegionT *A, RegionT *B) const;
/// @brief Find the smallest region that contains two basic blocks.
///
/// @param A The first basic block.
/// @param B The second basic block.
/// @return The smallest region that contains A and B.
RegionT *getCommonRegion(BlockT *A, BlockT *B) const {
return getCommonRegion(getRegionFor(A), getRegionFor(B));
}
/// @brief Find the smallest region that contains a set of regions.
///
/// @param Regions A vector of regions.
/// @return The smallest region that contains all regions in Regions.
RegionT *getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const;
/// @brief Find the smallest region that contains a set of basic blocks.
///
/// @param BBs A vector of basic blocks.
/// @return The smallest region that contains all basic blocks in BBS.
RegionT *getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const;
RegionT *getTopLevelRegion() const { return TopLevelRegion; }
/// @brief Update RegionInfo after a basic block was split.
///
/// @param NewBB The basic block that was created before OldBB.
/// @param OldBB The old basic block.
void splitBlock(BlockT *NewBB, BlockT *OldBB);
/// @brief Clear the Node Cache for all Regions.
///
/// @see Region::clearNodeCache()
void clearNodeCache() {
if (TopLevelRegion)
TopLevelRegion->clearNodeCache();
}
void verifyAnalysis() const;
};
class Region;
class RegionNode : public RegionNodeBase<RegionTraits<Function>> {
public:
inline RegionNode(Region *Parent, BasicBlock *Entry, bool isSubRegion = false)
: RegionNodeBase<RegionTraits<Function>>(Parent, Entry, isSubRegion) {}
bool operator==(const Region &RN) const {
return this == reinterpret_cast<const RegionNode *>(&RN);
}
};
class Region : public RegionBase<RegionTraits<Function>> {
public:
Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo *RI, DominatorTree *DT,
Region *Parent = nullptr);
~Region();
bool operator==(const RegionNode &RN) const {
return &RN == reinterpret_cast<const RegionNode *>(this);
}
};
class RegionInfo : public RegionInfoBase<RegionTraits<Function>> {
public:
explicit RegionInfo();
~RegionInfo() override;
// updateStatistics - Update statistic about created regions.
void updateStatistics(Region *R) final;
void recalculate(Function &F, DominatorTree *DT, PostDominatorTree *PDT,
DominanceFrontier *DF);
};
class RegionInfoPass : public FunctionPass {
RegionInfo RI;
public:
static char ID;
explicit RegionInfoPass();
~RegionInfoPass() override;
RegionInfo &getRegionInfo() { return RI; }
const RegionInfo &getRegionInfo() const { return RI; }
/// @name FunctionPass interface
//@{
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void verifyAnalysis() const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
void print(raw_ostream &OS, const Module *) const override;
void dump() const;
//@}
};
template <>
template <>
inline BasicBlock *
RegionNodeBase<RegionTraits<Function>>::getNodeAs<BasicBlock>() const {
assert(!isSubRegion() && "This is not a BasicBlock RegionNode!");
return getEntry();
}
template <>
template <>
inline Region *
RegionNodeBase<RegionTraits<Function>>::getNodeAs<Region>() const {
assert(isSubRegion() && "This is not a subregion RegionNode!");
auto Unconst = const_cast<RegionNodeBase<RegionTraits<Function>> *>(this);
return reinterpret_cast<Region *>(Unconst);
}
template <class Tr>
inline raw_ostream &operator<<(raw_ostream &OS,
const RegionNodeBase<Tr> &Node) {
typedef typename Tr::BlockT BlockT;
typedef typename Tr::RegionT RegionT;
if (Node.isSubRegion())
return OS << Node.template getNodeAs<RegionT>()->getNameStr();
else
return OS << Node.template getNodeAs<BlockT>()->getName();
}
extern template class RegionBase<RegionTraits<Function>>;
extern template class RegionNodeBase<RegionTraits<Function>>;
extern template class RegionInfoBase<RegionTraits<Function>>;
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/IntervalPartition.h | //===- IntervalPartition.h - Interval partition Calculation -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the IntervalPartition class, which
// calculates and represents the interval partition of a function, or a
// preexisting interval partition.
//
// In this way, the interval partition may be used to reduce a flow graph down
// to its degenerate single node interval partition (unless it is irreducible).
//
// TODO: The IntervalPartition class should take a bool parameter that tells
// whether it should add the "tails" of an interval to an interval itself or if
// they should be represented as distinct intervals.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_INTERVALPARTITION_H
#define LLVM_ANALYSIS_INTERVALPARTITION_H
#include "llvm/Analysis/Interval.h"
#include "llvm/Pass.h"
#include <map>
namespace llvm {
// //
///////////////////////////////////////////////////////////////////////////////
//
// IntervalPartition - This class builds and holds an "interval partition" for
// a function. This partition divides the control flow graph into a set of
// maximal intervals, as defined with the properties above. Intuitively, an
// interval is a (possibly nonexistent) loop with a "tail" of non-looping
// nodes following it.
//
class IntervalPartition : public FunctionPass {
typedef std::map<BasicBlock*, Interval*> IntervalMapTy;
IntervalMapTy IntervalMap;
typedef std::vector<Interval*> IntervalListTy;
Interval *RootInterval;
std::vector<Interval*> Intervals;
public:
static char ID; // Pass identification, replacement for typeid
IntervalPartition() : FunctionPass(ID), RootInterval(nullptr) {
initializeIntervalPartitionPass(*PassRegistry::getPassRegistry());
}
// run - Calculate the interval partition for this function
bool runOnFunction(Function &F) override;
// IntervalPartition ctor - Build a reduced interval partition from an
// existing interval graph. This takes an additional boolean parameter to
// distinguish it from a copy constructor. Always pass in false for now.
//
IntervalPartition(IntervalPartition &I, bool);
// print - Show contents in human readable format...
void print(raw_ostream &O, const Module* = nullptr) const override;
// getRootInterval() - Return the root interval that contains the starting
// block of the function.
inline Interval *getRootInterval() { return RootInterval; }
// isDegeneratePartition() - Returns true if the interval partition contains
// a single interval, and thus cannot be simplified anymore.
bool isDegeneratePartition() { return Intervals.size() == 1; }
// TODO: isIrreducible - look for triangle graph.
// getBlockInterval - Return the interval that a basic block exists in.
inline Interval *getBlockInterval(BasicBlock *BB) {
IntervalMapTy::iterator I = IntervalMap.find(BB);
return I != IntervalMap.end() ? I->second : nullptr;
}
// getAnalysisUsage - Implement the Pass API
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
// Interface to Intervals vector...
const std::vector<Interval*> &getIntervals() const { return Intervals; }
// releaseMemory - Reset state back to before function was analyzed
void releaseMemory() override;
private:
// addIntervalToPartition - Add an interval to the internal list of intervals,
// and then add mappings from all of the basic blocks in the interval to the
// interval itself (in the IntervalMap).
//
void addIntervalToPartition(Interval *I);
// updatePredecessors - Interval generation only sets the successor fields of
// the interval data structures. After interval generation is complete,
// run through all of the intervals and propagate successor info as
// predecessor info.
//
void updatePredecessors(Interval *Int);
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CFG.h | //===-- Analysis/CFG.h - BasicBlock Analyses --------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This family of functions performs analyses on basic blocks, and instructions
// contained within basic blocks.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CFG_H
#define LLVM_ANALYSIS_CFG_H
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
namespace llvm {
class BasicBlock;
class DominatorTree;
class Function;
class Instruction;
class LoopInfo;
class TerminatorInst;
/// Analyze the specified function to find all of the loop backedges in the
/// function and return them. This is a relatively cheap (compared to
/// computing dominators and loop info) analysis.
///
/// The output is added to Result, as pairs of <from,to> edge info.
void FindFunctionBackedges(
const Function &F,
SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *> > &
Result);
/// Search for the specified successor of basic block BB and return its position
/// in the terminator instruction's list of successors. It is an error to call
/// this with a block that is not a successor.
unsigned GetSuccessorNumber(BasicBlock *BB, BasicBlock *Succ);
/// Return true if the specified edge is a critical edge. Critical edges are
/// edges from a block with multiple successors to a block with multiple
/// predecessors.
///
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
bool AllowIdenticalEdges = false);
/// \brief Determine whether instruction 'To' is reachable from 'From',
/// returning true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
/// Returns false only if we can prove that once 'From' has been executed then
/// 'To' can not be executed. Conservatively returns true.
///
/// This function is linear with respect to the number of blocks in the CFG,
/// walking down successors from From to reach To, with a fixed threshold.
/// Using DT or LI allows us to answer more quickly. LI reduces the cost of
/// an entire loop of any number of blocsk to be the same as the cost of a
/// single block. DT reduces the cost by allowing the search to terminate when
/// we find a block that dominates the block containing 'To'. DT is most useful
/// on branchy code but not loops, and LI is most useful on code with loops but
/// does not help on branchy code outside loops.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
/// Returns false only if we can prove that once 'From' has been reached then
/// 'To' can not be executed. Conservatively returns true.
bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether there is at least one path from a block in
/// 'Worklist' to 'StopBB', returning true if uncertain.
///
/// Determine whether there is a path from at least one block in Worklist to
/// StopBB within a single function. Returns false only if we can prove that
/// once any block in 'Worklist' has been reached then 'StopBB' can not be
/// executed. Conservatively returns true.
bool isPotentiallyReachableFromMany(SmallVectorImpl<BasicBlock *> &Worklist,
BasicBlock *StopBB,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/ScalarEvolutionExpressions.h | //===- llvm/Analysis/ScalarEvolutionExpressions.h - SCEV Exprs --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the classes used to represent and build scalar expressions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class ConstantInt;
class ConstantRange;
class DominatorTree;
enum SCEVTypes {
// These should be ordered in terms of increasing complexity to make the
// folders simpler.
scConstant, scTruncate, scZeroExtend, scSignExtend, scAddExpr, scMulExpr,
scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr,
scUnknown, scCouldNotCompute
};
//===--------------------------------------------------------------------===//
/// SCEVConstant - This class represents a constant integer value.
///
class SCEVConstant : public SCEV {
friend class ScalarEvolution;
ConstantInt *V;
SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
SCEV(ID, scConstant), V(v) {}
public:
ConstantInt *getValue() const { return V; }
Type *getType() const { return V->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scConstant;
}
};
//===--------------------------------------------------------------------===//
/// SCEVCastExpr - This is the base class for unary cast operator classes.
///
class SCEVCastExpr : public SCEV {
protected:
const SCEV *Op;
Type *Ty;
SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, Type *ty);
public:
const SCEV *getOperand() const { return Op; }
Type *getType() const { return Ty; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scTruncate ||
S->getSCEVType() == scZeroExtend ||
S->getSCEVType() == scSignExtend;
}
};
//===--------------------------------------------------------------------===//
/// SCEVTruncateExpr - This class represents a truncation of an integer value
/// to a smaller integer value.
///
class SCEVTruncateExpr : public SCEVCastExpr {
friend class ScalarEvolution;
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scTruncate;
}
};
//===--------------------------------------------------------------------===//
/// SCEVZeroExtendExpr - This class represents a zero extension of a small
/// integer value to a larger integer value.
///
class SCEVZeroExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scZeroExtend;
}
};
//===--------------------------------------------------------------------===//
/// SCEVSignExtendExpr - This class represents a sign extension of a small
/// integer value to a larger integer value.
///
class SCEVSignExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, Type *ty);
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scSignExtend;
}
};
//===--------------------------------------------------------------------===//
/// SCEVNAryExpr - This node is a base class providing common
/// functionality for n'ary operators.
///
class SCEVNAryExpr : public SCEV {
protected:
// Since SCEVs are immutable, ScalarEvolution allocates operand
// arrays with its SCEVAllocator, so this class just needs a simple
// pointer rather than a more elaborate vector-like data structure.
// This also avoids the need for a non-trivial destructor.
const SCEV *const *Operands;
size_t NumOperands;
SCEVNAryExpr(const FoldingSetNodeIDRef ID,
enum SCEVTypes T, const SCEV *const *O, size_t N)
: SCEV(ID, T), Operands(O), NumOperands(N) {}
public:
size_t getNumOperands() const { return NumOperands; }
const SCEV *getOperand(unsigned i) const {
assert(i < NumOperands && "Operand index out of range!");
return Operands[i];
}
typedef const SCEV *const *op_iterator;
typedef iterator_range<op_iterator> op_range;
op_iterator op_begin() const { return Operands; }
op_iterator op_end() const { return Operands + NumOperands; }
op_range operands() const {
return make_range(op_begin(), op_end());
}
Type *getType() const { return getOperand(0)->getType(); }
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
return (NoWrapFlags)(SubclassData & Mask);
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddExpr ||
S->getSCEVType() == scMulExpr ||
S->getSCEVType() == scSMaxExpr ||
S->getSCEVType() == scUMaxExpr ||
S->getSCEVType() == scAddRecExpr;
}
};
//===--------------------------------------------------------------------===//
/// SCEVCommutativeExpr - This node is the base class for n'ary commutative
/// operators.
///
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
SCEVCommutativeExpr(const FoldingSetNodeIDRef ID,
enum SCEVTypes T, const SCEV *const *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddExpr ||
S->getSCEVType() == scMulExpr ||
S->getSCEVType() == scSMaxExpr ||
S->getSCEVType() == scUMaxExpr;
}
/// Set flags for a non-recurrence without clearing previously set flags.
void setNoWrapFlags(NoWrapFlags Flags) {
SubclassData |= Flags;
}
};
//===--------------------------------------------------------------------===//
/// SCEVAddExpr - This node represents an addition of some number of SCEVs.
///
class SCEVAddExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
SCEVAddExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {
}
public:
Type *getType() const {
// Use the type of the last operand, which is likely to be a pointer
// type, if there is one. This doesn't usually matter, but it can help
// reduce casts when the expressions are expanded.
return getOperand(getNumOperands() - 1)->getType();
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddExpr;
}
};
//===--------------------------------------------------------------------===//
/// SCEVMulExpr - This node represents multiplication of some number of SCEVs.
///
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
SCEVMulExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {
}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scMulExpr;
}
};
//===--------------------------------------------------------------------===//
/// SCEVUDivExpr - This class represents a binary unsigned division operation.
///
class SCEVUDivExpr : public SCEV {
friend class ScalarEvolution;
const SCEV *LHS;
const SCEV *RHS;
SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
: SCEV(ID, scUDivExpr), LHS(lhs), RHS(rhs) {}
public:
const SCEV *getLHS() const { return LHS; }
const SCEV *getRHS() const { return RHS; }
Type *getType() const {
// In most cases the types of LHS and RHS will be the same, but in some
// crazy cases one or the other may be a pointer. ScalarEvolution doesn't
// depend on the type for correctness, but handling types carefully can
// avoid extra casts in the SCEVExpander. The LHS is more likely to be
// a pointer type than the RHS, so use the RHS' type here.
return getRHS()->getType();
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scUDivExpr;
}
};
//===--------------------------------------------------------------------===//
/// SCEVAddRecExpr - This node represents a polynomial recurrence on the trip
/// count of the specified loop. This is the primary focus of the
/// ScalarEvolution framework; all the other SCEV subclasses are mostly just
/// supporting infrastructure to allow SCEVAddRecExpr expressions to be
/// created and analyzed.
///
/// All operands of an AddRec are required to be loop invariant.
///
class SCEVAddRecExpr : public SCEVNAryExpr {
friend class ScalarEvolution;
const Loop *L;
SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N, const Loop *l)
: SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
public:
const SCEV *getStart() const { return Operands[0]; }
const Loop *getLoop() const { return L; }
/// getStepRecurrence - This method constructs and returns the recurrence
/// indicating how much this expression steps by. If this is a polynomial
/// of degree N, it returns a chrec of degree N-1.
/// We cannot determine whether the step recurrence has self-wraparound.
const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
if (isAffine()) return getOperand(1);
return SE.getAddRecExpr(SmallVector<const SCEV *, 3>(op_begin()+1,
op_end()),
getLoop(), FlagAnyWrap);
}
/// isAffine - Return true if this represents an expression
/// A + B*x where A and B are loop invariant values.
bool isAffine() const {
// We know that the start value is invariant. This expression is thus
// affine iff the step is also invariant.
return getNumOperands() == 2;
}
/// isQuadratic - Return true if this represents an expression
/// A + B*x + C*x^2 where A, B and C are loop invariant values.
/// This corresponds to an addrec of the form {L,+,M,+,N}
bool isQuadratic() const {
return getNumOperands() == 3;
}
/// Set flags for a recurrence without clearing any previously set flags.
/// For AddRec, either NUW or NSW implies NW. Keep track of this fact here
/// to make it easier to propagate flags.
void setNoWrapFlags(NoWrapFlags Flags) {
if (Flags & (FlagNUW | FlagNSW))
Flags = ScalarEvolution::setFlags(Flags, FlagNW);
SubclassData |= Flags;
}
/// evaluateAtIteration - Return the value of this chain of recurrences at
/// the specified iteration number.
const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
/// getNumIterationsInRange - Return the number of iterations of this loop
/// that produce values in the specified constant range. Another way of
/// looking at this is that it returns the first iteration number where the
/// value is not in the condition, thus computing the exit count. If the
/// iteration count can't be computed, an instance of SCEVCouldNotCompute is
/// returned.
const SCEV *getNumIterationsInRange(ConstantRange Range,
ScalarEvolution &SE) const;
/// getPostIncExpr - Return an expression representing the value of
/// this expression one iteration of the loop ahead.
const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const {
return cast<SCEVAddRecExpr>(SE.getAddExpr(this, getStepRecurrence(SE)));
}
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddRecExpr;
}
};
//===--------------------------------------------------------------------===//
/// SCEVSMaxExpr - This class represents a signed maximum selection.
///
class SCEVSMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
SCEVSMaxExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
// Max never overflows.
setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scSMaxExpr;
}
};
//===--------------------------------------------------------------------===//
/// SCEVUMaxExpr - This class represents an unsigned maximum selection.
///
class SCEVUMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
SCEVUMaxExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
// Max never overflows.
setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
}
public:
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scUMaxExpr;
}
};
//===--------------------------------------------------------------------===//
/// SCEVUnknown - This means that we are dealing with an entirely unknown SCEV
/// value, and only represent it as its LLVM Value. This is the "bottom"
/// value for the analysis.
///
class SCEVUnknown : public SCEV, private CallbackVH {
friend class ScalarEvolution;
// Implement CallbackVH.
void deleted() override;
void allUsesReplacedWith(Value *New) override;
/// SE - The parent ScalarEvolution value. This is used to update
/// the parent's maps when the value associated with a SCEVUnknown
/// is deleted or RAUW'd.
ScalarEvolution *SE;
/// Next - The next pointer in the linked list of all
/// SCEVUnknown instances owned by a ScalarEvolution.
SCEVUnknown *Next;
SCEVUnknown(const FoldingSetNodeIDRef ID, Value *V,
ScalarEvolution *se, SCEVUnknown *next) :
SCEV(ID, scUnknown), CallbackVH(V), SE(se), Next(next) {}
public:
Value *getValue() const { return getValPtr(); }
/// isSizeOf, isAlignOf, isOffsetOf - Test whether this is a special
/// constant representing a type size, alignment, or field offset in
/// a target-independent manner, and hasn't happened to have been
/// folded with other operations into something unrecognizable. This
/// is mainly only useful for pretty-printing and other situations
/// where it isn't absolutely required for these to succeed.
bool isSizeOf(Type *&AllocTy) const;
bool isAlignOf(Type *&AllocTy) const;
bool isOffsetOf(Type *&STy, Constant *&FieldNo) const;
Type *getType() const { return getValPtr()->getType(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scUnknown;
}
};
/// SCEVVisitor - This class defines a simple visitor class that may be used
/// for various SCEV analysis purposes.
template<typename SC, typename RetVal=void>
struct SCEVVisitor {
RetVal visit(const SCEV *S) {
switch (S->getSCEVType()) {
case scConstant:
return ((SC*)this)->visitConstant((const SCEVConstant*)S);
case scTruncate:
return ((SC*)this)->visitTruncateExpr((const SCEVTruncateExpr*)S);
case scZeroExtend:
return ((SC*)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr*)S);
case scSignExtend:
return ((SC*)this)->visitSignExtendExpr((const SCEVSignExtendExpr*)S);
case scAddExpr:
return ((SC*)this)->visitAddExpr((const SCEVAddExpr*)S);
case scMulExpr:
return ((SC*)this)->visitMulExpr((const SCEVMulExpr*)S);
case scUDivExpr:
return ((SC*)this)->visitUDivExpr((const SCEVUDivExpr*)S);
case scAddRecExpr:
return ((SC*)this)->visitAddRecExpr((const SCEVAddRecExpr*)S);
case scSMaxExpr:
return ((SC*)this)->visitSMaxExpr((const SCEVSMaxExpr*)S);
case scUMaxExpr:
return ((SC*)this)->visitUMaxExpr((const SCEVUMaxExpr*)S);
case scUnknown:
return ((SC*)this)->visitUnknown((const SCEVUnknown*)S);
case scCouldNotCompute:
return ((SC*)this)->visitCouldNotCompute((const SCEVCouldNotCompute*)S);
default:
llvm_unreachable("Unknown SCEV type!");
}
}
RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
}
};
/// Visit all nodes in the expression tree using worklist traversal.
///
/// Visitor implements:
/// // return true to follow this node.
/// bool follow(const SCEV *S);
/// // return true to terminate the search.
/// bool isDone();
template<typename SV>
class SCEVTraversal {
SV &Visitor;
SmallVector<const SCEV *, 8> Worklist;
SmallPtrSet<const SCEV *, 8> Visited;
void push(const SCEV *S) {
if (Visited.insert(S).second && Visitor.follow(S))
Worklist.push_back(S);
}
public:
SCEVTraversal(SV& V): Visitor(V) {}
void visitAll(const SCEV *Root) {
push(Root);
while (!Worklist.empty() && !Visitor.isDone()) {
const SCEV *S = Worklist.pop_back_val();
switch (S->getSCEVType()) {
case scConstant:
case scUnknown:
break;
case scTruncate:
case scZeroExtend:
case scSignExtend:
push(cast<SCEVCastExpr>(S)->getOperand());
break;
case scAddExpr:
case scMulExpr:
case scSMaxExpr:
case scUMaxExpr:
case scAddRecExpr: {
const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
for (SCEVNAryExpr::op_iterator I = NAry->op_begin(),
E = NAry->op_end(); I != E; ++I) {
push(*I);
}
break;
}
case scUDivExpr: {
const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
push(UDiv->getLHS());
push(UDiv->getRHS());
break;
}
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
default:
llvm_unreachable("Unknown SCEV kind!");
}
}
}
};
/// Use SCEVTraversal to visit all nodes in the given expression tree.
template<typename SV>
void visitAll(const SCEV *Root, SV& Visitor) {
SCEVTraversal<SV> T(Visitor);
T.visitAll(Root);
}
typedef DenseMap<const Value*, Value*> ValueToValueMap;
/// The SCEVParameterRewriter takes a scalar evolution expression and updates
/// the SCEVUnknown components following the Map (Value -> Value).
struct SCEVParameterRewriter
: public SCEVVisitor<SCEVParameterRewriter, const SCEV*> {
public:
static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
ValueToValueMap &Map,
bool InterpretConsts = false) {
SCEVParameterRewriter Rewriter(SE, Map, InterpretConsts);
return Rewriter.visit(Scev);
}
SCEVParameterRewriter(ScalarEvolution &S, ValueToValueMap &M, bool C)
: SE(S), Map(M), InterpretConsts(C) {}
const SCEV *visitConstant(const SCEVConstant *Constant) {
return Constant;
}
const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
const SCEV *Operand = visit(Expr->getOperand());
return SE.getTruncateExpr(Operand, Expr->getType());
}
const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
const SCEV *Operand = visit(Expr->getOperand());
return SE.getZeroExtendExpr(Operand, Expr->getType());
}
const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
const SCEV *Operand = visit(Expr->getOperand());
return SE.getSignExtendExpr(Operand, Expr->getType());
}
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getAddExpr(Operands);
}
const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getMulExpr(Operands);
}
const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
return SE.getUDivExpr(visit(Expr->getLHS()), visit(Expr->getRHS()));
}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getAddRecExpr(Operands, Expr->getLoop(),
Expr->getNoWrapFlags());
}
const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getSMaxExpr(Operands);
}
const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getUMaxExpr(Operands);
}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
Value *V = Expr->getValue();
if (Map.count(V)) {
Value *NV = Map[V];
if (InterpretConsts && isa<ConstantInt>(NV))
return SE.getConstant(cast<ConstantInt>(NV));
return SE.getUnknown(NV);
}
return Expr;
}
const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
return Expr;
}
private:
ScalarEvolution &SE;
ValueToValueMap ⤅
bool InterpretConsts;
};
typedef DenseMap<const Loop*, const SCEV*> LoopToScevMapT;
/// The SCEVApplyRewriter takes a scalar evolution expression and applies
/// the Map (Loop -> SCEV) to all AddRecExprs.
struct SCEVApplyRewriter
: public SCEVVisitor<SCEVApplyRewriter, const SCEV*> {
public:
static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
ScalarEvolution &SE) {
SCEVApplyRewriter Rewriter(SE, Map);
return Rewriter.visit(Scev);
}
SCEVApplyRewriter(ScalarEvolution &S, LoopToScevMapT &M)
: SE(S), Map(M) {}
const SCEV *visitConstant(const SCEVConstant *Constant) {
return Constant;
}
const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
const SCEV *Operand = visit(Expr->getOperand());
return SE.getTruncateExpr(Operand, Expr->getType());
}
const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
const SCEV *Operand = visit(Expr->getOperand());
return SE.getZeroExtendExpr(Operand, Expr->getType());
}
const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
const SCEV *Operand = visit(Expr->getOperand());
return SE.getSignExtendExpr(Operand, Expr->getType());
}
const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getAddExpr(Operands);
}
const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getMulExpr(Operands);
}
const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
return SE.getUDivExpr(visit(Expr->getLHS()), visit(Expr->getRHS()));
}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
const Loop *L = Expr->getLoop();
const SCEV *Res = SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
if (0 == Map.count(L))
return Res;
const SCEVAddRecExpr *Rec = (const SCEVAddRecExpr *) Res;
return Rec->evaluateAtIteration(Map[L], SE);
}
const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getSMaxExpr(Operands);
}
const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
SmallVector<const SCEV *, 2> Operands;
for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
Operands.push_back(visit(Expr->getOperand(i)));
return SE.getUMaxExpr(Operands);
}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
return Expr;
}
const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
return Expr;
}
private:
ScalarEvolution &SE;
LoopToScevMapT ⤅
};
/// Applies the Map (Loop -> SCEV) to the given Scev.
static inline const SCEV *apply(const SCEV *Scev, LoopToScevMapT &Map,
ScalarEvolution &SE) {
return SCEVApplyRewriter::rewrite(Scev, Map, SE);
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/ScalarEvolutionExpander.h | //===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the classes used to generate code from scalar expressions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/ValueHandle.h"
#include <set>
namespace llvm {
class TargetTransformInfo;
/// Return true if the given expression is safe to expand in the sense that
/// all materialized values are safe to speculate.
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE);
/// This class uses information about analyze scalars to
/// rewrite expressions in canonical form.
///
/// Clients should create an instance of this class when rewriting is needed,
/// and destroy it when finished to allow the release of the associated
/// memory.
class SCEVExpander : public SCEVVisitor<SCEVExpander, Value*> {
ScalarEvolution &SE;
const DataLayout &DL;
// New instructions receive a name to identifies them with the current pass.
const char* IVName;
// InsertedExpressions caches Values for reuse, so must track RAUW.
std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >
InsertedExpressions;
// InsertedValues only flags inserted instructions so needs no RAUW.
std::set<AssertingVH<Value> > InsertedValues;
std::set<AssertingVH<Value> > InsertedPostIncValues;
/// A memoization of the "relevant" loop for a given SCEV.
DenseMap<const SCEV *, const Loop *> RelevantLoops;
/// \brief Addrecs referring to any of the given loops are expanded
/// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode
/// returns the add instruction that adds one to the phi for {0,+,1}<L>,
/// as opposed to a new phi starting at 1. This is only supported in
/// non-canonical mode.
PostIncLoopSet PostIncLoops;
/// \brief When this is non-null, addrecs expanded in the loop it indicates
/// should be inserted with increments at IVIncInsertPos.
const Loop *IVIncInsertLoop;
/// \brief When expanding addrecs in the IVIncInsertLoop loop, insert the IV
/// increment at this position.
Instruction *IVIncInsertPos;
/// \brief Phis that complete an IV chain. Reuse
std::set<AssertingVH<PHINode> > ChainedPhis;
/// \brief When true, expressions are expanded in "canonical" form. In
/// particular, addrecs are expanded as arithmetic based on a canonical
/// induction variable. When false, expression are expanded in a more
/// literal form.
bool CanonicalMode;
/// \brief When invoked from LSR, the expander is in "strength reduction"
/// mode. The only difference is that phi's are only reused if they are
/// already in "expanded" form.
bool LSRMode;
typedef IRBuilder<true, TargetFolder> BuilderType;
BuilderType Builder;
#ifndef NDEBUG
const char *DebugType;
#endif
friend struct SCEVVisitor<SCEVExpander, Value*>;
public:
/// \brief Construct a SCEVExpander in "canonical" mode.
explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
const char *name)
: SE(se), DL(DL), IVName(name), IVIncInsertLoop(nullptr),
IVIncInsertPos(nullptr), CanonicalMode(true), LSRMode(false),
Builder(se.getContext(), TargetFolder(DL)) {
#ifndef NDEBUG
DebugType = "";
#endif
}
#ifndef NDEBUG
void setDebugType(const char* s) { DebugType = s; }
#endif
/// \brief Erase the contents of the InsertedExpressions map so that users
/// trying to expand the same expression into multiple BasicBlocks or
/// different places within the same BasicBlock can do so.
void clear() {
InsertedExpressions.clear();
InsertedValues.clear();
InsertedPostIncValues.clear();
ChainedPhis.clear();
}
/// \brief Return true for expressions that may incur non-trivial cost to
/// evaluate at runtime.
bool isHighCostExpansion(const SCEV *Expr, Loop *L) {
SmallPtrSet<const SCEV *, 8> Processed;
return isHighCostExpansionHelper(Expr, L, Processed);
}
/// \brief This method returns the canonical induction variable of the
/// specified type for the specified loop (inserting one if there is none).
/// A canonical induction variable starts at zero and steps by one on each
/// iteration.
PHINode *getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty);
/// \brief Return the induction variable increment's IV operand.
Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
bool allowScale);
/// \brief Utility for hoisting an IV increment.
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos);
/// \brief replace congruent phis with their most canonical
/// representative. Return the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakTrackingVH> &DeadInsts,
const TargetTransformInfo *TTI = nullptr);
/// \brief Insert code to directly compute the specified SCEV expression
/// into the program. The inserted code is inserted into the specified
/// block.
Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I);
/// \brief Set the current IV increment loop and position.
void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
assert(!CanonicalMode &&
"IV increment positions are not supported in CanonicalMode");
IVIncInsertLoop = L;
IVIncInsertPos = Pos;
}
/// \brief Enable post-inc expansion for addrecs referring to the given
/// loops. Post-inc expansion is only supported in non-canonical mode.
void setPostInc(const PostIncLoopSet &L) {
assert(!CanonicalMode &&
"Post-inc expansion is not supported in CanonicalMode");
PostIncLoops = L;
}
/// \brief Disable all post-inc expansion.
void clearPostInc() {
PostIncLoops.clear();
// When we change the post-inc loop set, cached expansions may no
// longer be valid.
InsertedPostIncValues.clear();
}
/// \brief Disable the behavior of expanding expressions in canonical form
/// rather than in a more literal form. Non-canonical mode is useful for
/// late optimization passes.
void disableCanonicalMode() { CanonicalMode = false; }
void enableLSRMode() { LSRMode = true; }
/// \brief Clear the current insertion point. This is useful if the
/// instruction that had been serving as the insertion point may have been
/// deleted.
void clearInsertPoint() {
Builder.ClearInsertionPoint();
}
/// \brief Return true if the specified instruction was inserted by the code
/// rewriter. If so, the client should not modify the instruction.
bool isInsertedInstruction(Instruction *I) const {
return InsertedValues.count(I) || InsertedPostIncValues.count(I);
}
void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }
private:
LLVMContext &getContext() const { return SE.getContext(); }
/// \brief Recursive helper function for isHighCostExpansion.
bool isHighCostExpansionHelper(const SCEV *S, Loop *L,
SmallPtrSetImpl<const SCEV *> &Processed);
/// \brief Insert the specified binary operator, doing a small amount
/// of work to avoid inserting an obviously redundant operation.
Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
/// \brief Arrange for there to be a cast of V to Ty at IP, reusing an
/// existing cast if a suitable one exists, moving an existing cast if a
/// suitable one exists but isn't in the right place, or or creating a new
/// one.
Value *ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP);
/// \brief Insert a cast of V to the specified type, which must be possible
/// with a noop cast, doing what we can to share the casts.
Value *InsertNoopCastOfTo(Value *V, Type *Ty);
/// \brief Expand a SCEVAddExpr with a pointer type into a GEP
/// instead of using ptrtoint+arithmetic+inttoptr.
Value *expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
PointerType *PTy, Type *Ty, Value *V);
Value *expand(const SCEV *S);
/// \brief Insert code to directly compute the specified SCEV expression
/// into the program. The inserted code is inserted into the SCEVExpander's
/// current insertion point. If a type is specified, the result will be
/// expanded to have that type, with a cast if necessary.
Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr);
/// \brief Determine the most "relevant" loop for the given SCEV.
const Loop *getRelevantLoop(const SCEV *);
Value *visitConstant(const SCEVConstant *S) {
return S->getValue();
}
Value *visitTruncateExpr(const SCEVTruncateExpr *S);
Value *visitZeroExtendExpr(const SCEVZeroExtendExpr *S);
Value *visitSignExtendExpr(const SCEVSignExtendExpr *S);
Value *visitAddExpr(const SCEVAddExpr *S);
Value *visitMulExpr(const SCEVMulExpr *S);
Value *visitUDivExpr(const SCEVUDivExpr *S);
Value *visitAddRecExpr(const SCEVAddRecExpr *S);
Value *visitSMaxExpr(const SCEVSMaxExpr *S);
Value *visitUMaxExpr(const SCEVUMaxExpr *S);
Value *visitUnknown(const SCEVUnknown *S) {
return S->getValue();
}
void rememberInstruction(Value *I);
bool isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
bool isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
const Loop *L,
Type *ExpandTy,
Type *IntTy,
Type *&TruncTy,
bool &InvertStep);
Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
Type *ExpandTy, Type *IntTy, bool useSubtract);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/IVUsers.h | //===- llvm/Analysis/IVUsers.h - Induction Variable Users -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements bookkeeping for "interesting" users of expressions
// computed from induction variables.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_IVUSERS_H
#define LLVM_ANALYSIS_IVUSERS_H
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/IR/ValueHandle.h"
namespace llvm {
class AssumptionCache;
class DominatorTree;
class Instruction;
class Value;
class ScalarEvolution;
class SCEV;
class IVUsers;
class DataLayout;
/// IVStrideUse - Keep track of one use of a strided induction variable.
/// The Expr member keeps track of the expression, User is the actual user
/// instruction of the operand, and 'OperandValToReplace' is the operand of
/// the User that is the use.
class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> {
friend class IVUsers;
public:
IVStrideUse(IVUsers *P, Instruction* U, Value *O)
: CallbackVH(U), Parent(P), OperandValToReplace(O) {
}
/// getUser - Return the user instruction for this use.
Instruction *getUser() const {
return cast<Instruction>(getValPtr());
}
/// setUser - Assign a new user instruction for this use.
void setUser(Instruction *NewUser) {
setValPtr(NewUser);
}
/// getOperandValToReplace - Return the Value of the operand in the user
/// instruction that this IVStrideUse is representing.
Value *getOperandValToReplace() const {
return OperandValToReplace;
}
/// setOperandValToReplace - Assign a new Value as the operand value
/// to replace.
void setOperandValToReplace(Value *Op) {
OperandValToReplace = Op;
}
/// getPostIncLoops - Return the set of loops for which the expression has
/// been adjusted to use post-inc mode.
const PostIncLoopSet &getPostIncLoops() const {
return PostIncLoops;
}
/// transformToPostInc - Transform the expression to post-inc form for the
/// given loop.
void transformToPostInc(const Loop *L);
private:
/// Parent - a pointer to the IVUsers that owns this IVStrideUse.
IVUsers *Parent;
/// OperandValToReplace - The Value of the operand in the user instruction
/// that this IVStrideUse is representing.
WeakTrackingVH OperandValToReplace;
/// PostIncLoops - The set of loops for which Expr has been adjusted to
/// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
PostIncLoopSet PostIncLoops;
/// Deleted - Implementation of CallbackVH virtual function to
/// receive notification when the User is deleted.
void deleted() override;
};
template<> struct ilist_traits<IVStrideUse>
: public ilist_default_traits<IVStrideUse> {
// createSentinel is used to get hold of a node that marks the end of
// the list...
// The sentinel is relative to this instance, so we use a non-static
// method.
// HLSL Change Starts
// Temporarily disable "downcast of address" UBSAN runtime error
// https://github.com/microsoft/DirectXShaderCompiler/issues/6446
#ifdef __has_feature
#if __has_feature(undefined_behavior_sanitizer)
__attribute__((no_sanitize("undefined")))
#endif // __has_feature(address_sanitizer)
#endif // defined(__has_feature)
// HLSL Change Ends
IVStrideUse *
createSentinel() const {
// since i(p)lists always publicly derive from the corresponding
// traits, placing a data member in this class will augment i(p)list.
// But since the NodeTy is expected to publicly derive from
// ilist_node<NodeTy>, there is a legal viable downcast from it
// to NodeTy. We use this trick to superpose i(p)list with a "ghostly"
// NodeTy, which becomes the sentinel. Dereferencing the sentinel is
// forbidden (save the ilist_node<NodeTy>) so no one will ever notice
// the superposition.
return static_cast<IVStrideUse*>(&Sentinel);
}
static void destroySentinel(IVStrideUse*) {}
IVStrideUse *provideInitialHead() const { return createSentinel(); }
IVStrideUse *ensureHead(IVStrideUse*) const { return createSentinel(); }
static void noteHead(IVStrideUse*, IVStrideUse*) {}
private:
mutable ilist_node<IVStrideUse> Sentinel;
};
class IVUsers : public LoopPass {
friend class IVStrideUse;
Loop *L;
AssumptionCache *AC;
LoopInfo *LI;
DominatorTree *DT;
ScalarEvolution *SE;
SmallPtrSet<Instruction*, 16> Processed;
/// IVUses - A list of all tracked IV uses of induction variable expressions
/// we are interested in.
ilist<IVStrideUse> IVUses;
// Ephemeral values used by @llvm.assume in this function.
SmallPtrSet<const Value *, 32> EphValues;
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
void releaseMemory() override;
public:
static char ID; // Pass ID, replacement for typeid
IVUsers();
Loop *getLoop() const { return L; }
/// AddUsersIfInteresting - Inspect the specified Instruction. If it is a
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
/// return true. Otherwise, return false.
bool AddUsersIfInteresting(Instruction *I);
IVStrideUse &AddUser(Instruction *User, Value *Operand);
/// getReplacementExpr - Return a SCEV expression which computes the
/// value of the OperandValToReplace of the given IVStrideUse.
const SCEV *getReplacementExpr(const IVStrideUse &IU) const;
/// getExpr - Return the expression for the use.
const SCEV *getExpr(const IVStrideUse &IU) const;
const SCEV *getStride(const IVStrideUse &IU, const Loop *L) const;
typedef ilist<IVStrideUse>::iterator iterator;
typedef ilist<IVStrideUse>::const_iterator const_iterator;
iterator begin() { return IVUses.begin(); }
iterator end() { return IVUses.end(); }
const_iterator begin() const { return IVUses.begin(); }
const_iterator end() const { return IVUses.end(); }
bool empty() const { return IVUses.empty(); }
bool isIVUserOrOperand(Instruction *Inst) const {
return Processed.count(Inst);
}
void print(raw_ostream &OS, const Module* = nullptr) const override;
/// dump - This method is used for debugging.
void dump() const;
protected:
bool AddUsersImpl(Instruction *I, SmallPtrSetImpl<Loop*> &SimpleLoopNests);
};
Pass *createIVUsersPass();
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LibCallAliasAnalysis.h | //===- LibCallAliasAnalysis.h - Implement AliasAnalysis for libcalls ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the LibCallAliasAnalysis class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LIBCALLALIASANALYSIS_H
#define LLVM_ANALYSIS_LIBCALLALIASANALYSIS_H
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
namespace llvm {
class LibCallInfo;
struct LibCallFunctionInfo;
/// LibCallAliasAnalysis - Alias analysis driven from LibCallInfo.
struct LibCallAliasAnalysis : public FunctionPass, public AliasAnalysis {
static char ID; // Class identification
LibCallInfo *LCI;
explicit LibCallAliasAnalysis(LibCallInfo *LC = nullptr)
: FunctionPass(ID), LCI(LC) {
initializeLibCallAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
explicit LibCallAliasAnalysis(char &ID, LibCallInfo *LC)
: FunctionPass(ID), LCI(LC) {
initializeLibCallAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
~LibCallAliasAnalysis() override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
// TODO: Could compare two direct calls against each other if we cared to.
return AliasAnalysis::getModRefInfo(CS1, CS2);
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
void *getAdjustedAnalysisPointer(const void *PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
private:
ModRefResult AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
ImmutableCallSite CS,
const MemoryLocation &Loc);
};
} // End of llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CallPrinter.h | //===-- CallPrinter.h - Call graph printer external interface ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the call graph printer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CALLPRINTER_H
#define LLVM_ANALYSIS_CALLPRINTER_H
namespace llvm {
class ModulePass;
ModulePass *createCallGraphViewerPass();
ModulePass *createCallGraphPrinterPass();
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/PHITransAddr.h | //===- PHITransAddr.h - PHI Translation for Addresses -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the PHITransAddr class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_PHITRANSADDR_H
#define LLVM_ANALYSIS_PHITRANSADDR_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Instruction.h"
namespace llvm {
class AssumptionCache;
class DominatorTree;
class DataLayout;
class TargetLibraryInfo;
/// PHITransAddr - An address value which tracks and handles phi translation.
/// As we walk "up" the CFG through predecessors, we need to ensure that the
/// address we're tracking is kept up to date. For example, if we're analyzing
/// an address of "&A[i]" and walk through the definition of 'i' which is a PHI
/// node, we *must* phi translate i to get "&A[j]" or else we will analyze an
/// incorrect pointer in the predecessor block.
///
/// This is designed to be a relatively small object that lives on the stack and
/// is copyable.
///
class PHITransAddr {
/// Addr - The actual address we're analyzing.
Value *Addr;
/// The DataLayout we are playing with.
const DataLayout &DL;
/// TLI - The target library info if known, otherwise null.
const TargetLibraryInfo *TLI;
/// A cache of @llvm.assume calls used by SimplifyInstruction.
AssumptionCache *AC;
/// InstInputs - The inputs for our symbolic address.
SmallVector<Instruction*, 4> InstInputs;
public:
PHITransAddr(Value *addr, const DataLayout &DL, AssumptionCache *AC)
: Addr(addr), DL(DL), TLI(nullptr), AC(AC) {
// If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr))
InstInputs.push_back(I);
}
Value *getAddr() const { return Addr; }
/// NeedsPHITranslationFromBlock - Return true if moving from the specified
/// BasicBlock to its predecessors requires PHI translation.
bool NeedsPHITranslationFromBlock(BasicBlock *BB) const {
// We do need translation if one of our input instructions is defined in
// this block.
for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
if (InstInputs[i]->getParent() == BB)
return true;
return false;
}
/// IsPotentiallyPHITranslatable - If this needs PHI translation, return true
/// if we have some hope of doing it. This should be used as a filter to
/// avoid calling PHITranslateValue in hopeless situations.
bool IsPotentiallyPHITranslatable() const;
/// PHITranslateValue - PHI translate the current address up the CFG from
/// CurBB to Pred, updating our state to reflect any needed changes. If
/// 'MustDominate' is true, the translated value must dominate
/// PredBB. This returns true on failure and sets Addr to null.
bool PHITranslateValue(BasicBlock *CurBB, BasicBlock *PredBB,
const DominatorTree *DT, bool MustDominate);
/// PHITranslateWithInsertion - PHI translate this value into the specified
/// predecessor block, inserting a computation of the value if it is
/// unavailable.
///
/// All newly created instructions are added to the NewInsts list. This
/// returns null on failure.
///
Value *PHITranslateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
const DominatorTree &DT,
SmallVectorImpl<Instruction*> &NewInsts);
void dump() const;
/// Verify - Check internal consistency of this data structure. If the
/// structure is valid, it returns true. If invalid, it prints errors and
/// returns false.
bool Verify() const;
private:
Value *PHITranslateSubExpr(Value *V, BasicBlock *CurBB, BasicBlock *PredBB,
const DominatorTree *DT);
/// InsertPHITranslatedSubExpr - Insert a computation of the PHI translated
/// version of 'V' for the edge PredBB->CurBB into the end of the PredBB
/// block. All newly created instructions are added to the NewInsts list.
/// This returns null on failure.
///
Value *InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
BasicBlock *PredBB, const DominatorTree &DT,
SmallVectorImpl<Instruction*> &NewInsts);
/// AddAsInput - If the specified value is an instruction, add it as an input.
Value *AddAsInput(Value *V) {
// If V is an instruction, it is now an input.
if (Instruction *VI = dyn_cast<Instruction>(V))
InstInputs.push_back(VI);
return V;
}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LoopPass.h | //===- LoopPass.h - LoopPass class ----------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines LoopPass class. All loop optimization
// and transformation passes are derived from LoopPass.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPPASS_H
#define LLVM_ANALYSIS_LOOPPASS_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/LegacyPassManagers.h"
#include "llvm/Pass.h"
#include <deque>
namespace llvm {
class LPPassManager;
class Function;
class PMStack;
class LoopPass : public Pass {
public:
explicit LoopPass(char &pid) : Pass(PT_Loop, pid) {}
/// getPrinterPass - Get a pass to print the function corresponding
/// to a Loop.
Pass *createPrinterPass(raw_ostream &O,
const std::string &Banner) const override;
// runOnLoop - This method should be implemented by the subclass to perform
// whatever action is necessary for the specified Loop.
virtual bool runOnLoop(Loop *L, LPPassManager &LPM) = 0;
using llvm::Pass::doInitialization;
using llvm::Pass::doFinalization;
// Initialization and finalization hooks.
virtual bool doInitialization(Loop *L, LPPassManager &LPM) {
return false;
}
// Finalization hook does not supply Loop because at this time
// loop nest is completely different.
virtual bool doFinalization() { return false; }
// Check if this pass is suitable for the current LPPassManager, if
// available. This pass P is not suitable for a LPPassManager if P
// is not preserving higher level analysis info used by other
// LPPassManager passes. In such case, pop LPPassManager from the
// stack. This will force assignPassManager() to create new
// LPPassManger as expected.
void preparePassManager(PMStack &PMS) override;
/// Assign pass manager to manage this pass
void assignPassManager(PMStack &PMS, PassManagerType PMT) override;
/// Return what kind of Pass Manager can manage this pass.
PassManagerType getPotentialPassManagerType() const override {
return PMT_LoopPassManager;
}
//===--------------------------------------------------------------------===//
/// SimpleAnalysis - Provides simple interface to update analysis info
/// maintained by various passes. Note, if required this interface can
/// be extracted into a separate abstract class but it would require
/// additional use of multiple inheritance in Pass class hierarchy, something
/// we are trying to avoid.
/// Each loop pass can override these simple analysis hooks to update
/// desired analysis information.
/// cloneBasicBlockAnalysis - Clone analysis info associated with basic block.
virtual void cloneBasicBlockAnalysis(BasicBlock *F, BasicBlock *T, Loop *L) {}
/// deleteAnalysisValue - Delete analysis info associated with value V.
virtual void deleteAnalysisValue(Value *V, Loop *L) {}
/// Delete analysis info associated with Loop L.
/// Called to notify a Pass that a loop has been deleted and any
/// associated analysis values can be deleted.
virtual void deleteAnalysisLoop(Loop *L) {}
protected:
/// skipOptnoneFunction - Containing function has Attribute::OptimizeNone
/// and most transformation passes should skip it.
bool skipOptnoneFunction(const Loop *L) const;
};
class LPPassManager : public FunctionPass, public PMDataManager {
public:
static char ID;
explicit LPPassManager();
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool runOnFunction(Function &F) override;
/// Pass Manager itself does not invalidate any analysis info.
// LPPassManager needs LoopInfo.
void getAnalysisUsage(AnalysisUsage &Info) const override;
StringRef getPassName() const override {
return "Loop Pass Manager";
}
PMDataManager *getAsPMDataManager() override { return this; }
Pass *getAsPass() override { return this; }
/// Print passes managed by this manager
void dumpPassStructure(unsigned Offset) override;
LoopPass *getContainedPass(unsigned N) {
assert(N < PassVector.size() && "Pass number out of range!");
LoopPass *LP = static_cast<LoopPass *>(PassVector[N]);
return LP;
}
PassManagerType getPassManagerType() const override {
return PMT_LoopPassManager;
}
public:
// Delete loop from the loop queue and loop nest (LoopInfo).
void deleteLoopFromQueue(Loop *L);
// Insert loop into the loop queue and add it as a child of the
// given parent.
void insertLoop(Loop *L, Loop *ParentLoop);
// Insert a loop into the loop queue.
void insertLoopIntoQueue(Loop *L);
// Reoptimize this loop. LPPassManager will re-insert this loop into the
// queue. This allows LoopPass to change loop nest for the loop. This
// utility may send LPPassManager into infinite loops so use caution.
void redoLoop(Loop *L);
//===--------------------------------------------------------------------===//
/// SimpleAnalysis - Provides simple interface to update analysis info
/// maintained by various passes. Note, if required this interface can
/// be extracted into a separate abstract class but it would require
/// additional use of multiple inheritance in Pass class hierarchy, something
/// we are trying to avoid.
/// cloneBasicBlockSimpleAnalysis - Invoke cloneBasicBlockAnalysis hook for
/// all passes that implement simple analysis interface.
void cloneBasicBlockSimpleAnalysis(BasicBlock *From, BasicBlock *To, Loop *L);
/// deleteSimpleAnalysisValue - Invoke deleteAnalysisValue hook for all passes
/// that implement simple analysis interface.
void deleteSimpleAnalysisValue(Value *V, Loop *L);
/// Invoke deleteAnalysisLoop hook for all passes that implement simple
/// analysis interface.
void deleteSimpleAnalysisLoop(Loop *L);
private:
std::deque<Loop *> LQ;
bool skipThisLoop;
bool redoThisLoop;
LoopInfo *LI;
Loop *CurrentLoop;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LazyCallGraph.h | //===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Implements a lazy call graph analysis and related passes for the new pass
/// manager.
///
/// NB: This is *not* a traditional call graph! It is a graph which models both
/// the current calls and potential calls. As a consequence there are many
/// edges in this call graph that do not correspond to a 'call' or 'invoke'
/// instruction.
///
/// The primary use cases of this graph analysis is to facilitate iterating
/// across the functions of a module in ways that ensure all callees are
/// visited prior to a caller (given any SCC constraints), or vice versa. As
/// such is it particularly well suited to organizing CGSCC optimizations such
/// as inlining, outlining, argument promotion, etc. That is its primary use
/// case and motivates the design. It may not be appropriate for other
/// purposes. The use graph of functions or some other conservative analysis of
/// call instructions may be interesting for optimizations and subsequent
/// analyses which don't work in the context of an overly specified
/// potential-call-edge graph.
///
/// To understand the specific rules and nature of this call graph analysis,
/// see the documentation of the \c LazyCallGraph below.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
#define LLVM_ANALYSIS_LAZYCALLGRAPH_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/Allocator.h"
#include <iterator>
namespace llvm {
class PreservedAnalyses;
class raw_ostream;
/// \brief A lazily constructed view of the call graph of a module.
///
/// With the edges of this graph, the motivating constraint that we are
/// attempting to maintain is that function-local optimization, CGSCC-local
/// optimizations, and optimizations transforming a pair of functions connected
/// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
/// DAG. That is, no optimizations will delete, remove, or add an edge such
/// that functions already visited in a bottom-up order of the SCC DAG are no
/// longer valid to have visited, or such that functions not yet visited in
/// a bottom-up order of the SCC DAG are not required to have already been
/// visited.
///
/// Within this constraint, the desire is to minimize the merge points of the
/// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
/// in the SCC DAG, the more independence there is in optimizing within it.
/// There is a strong desire to enable parallelization of optimizations over
/// the call graph, and both limited fanout and merge points will (artificially
/// in some cases) limit the scaling of such an effort.
///
/// To this end, graph represents both direct and any potential resolution to
/// an indirect call edge. Another way to think about it is that it represents
/// both the direct call edges and any direct call edges that might be formed
/// through static optimizations. Specifically, it considers taking the address
/// of a function to be an edge in the call graph because this might be
/// forwarded to become a direct call by some subsequent function-local
/// optimization. The result is that the graph closely follows the use-def
/// edges for functions. Walking "up" the graph can be done by looking at all
/// of the uses of a function.
///
/// The roots of the call graph are the external functions and functions
/// escaped into global variables. Those functions can be called from outside
/// of the module or via unknowable means in the IR -- we may not be able to
/// form even a potential call edge from a function body which may dynamically
/// load the function and call it.
///
/// This analysis still requires updates to remain valid after optimizations
/// which could potentially change the set of potential callees. The
/// constraints it operates under only make the traversal order remain valid.
///
/// The entire analysis must be re-computed if full interprocedural
/// optimizations run at any point. For example, globalopt completely
/// invalidates the information in this analysis.
///
/// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
/// it from the existing CallGraph. At some point, it is expected that this
/// will be the only call graph and it will be renamed accordingly.
class LazyCallGraph {
public:
class Node;
class SCC;
typedef SmallVector<PointerUnion<Function *, Node *>, 4> NodeVectorT;
typedef SmallVectorImpl<PointerUnion<Function *, Node *>> NodeVectorImplT;
/// \brief A lazy iterator used for both the entry nodes and child nodes.
///
/// When this iterator is dereferenced, if not yet available, a function will
/// be scanned for "calls" or uses of functions and its child information
/// will be constructed. All of these results are accumulated and cached in
/// the graph.
class iterator
: public iterator_adaptor_base<iterator, NodeVectorImplT::iterator,
std::forward_iterator_tag, Node> {
friend class LazyCallGraph;
friend class LazyCallGraph::Node;
LazyCallGraph *G;
NodeVectorImplT::iterator E;
// Build the iterator for a specific position in a node list.
iterator(LazyCallGraph &G, NodeVectorImplT::iterator NI,
NodeVectorImplT::iterator E)
: iterator_adaptor_base(NI), G(&G), E(E) {
while (I != E && I->isNull())
++I;
}
public:
iterator() {}
using iterator_adaptor_base::operator++;
iterator &operator++() {
do {
++I;
} while (I != E && I->isNull());
return *this;
}
reference operator*() const {
if (I->is<Node *>())
return *I->get<Node *>();
Function *F = I->get<Function *>();
Node &ChildN = G->get(*F);
*I = &ChildN;
return ChildN;
}
};
/// \brief A node in the call graph.
///
/// This represents a single node. It's primary roles are to cache the list of
/// callees, de-duplicate and provide fast testing of whether a function is
/// a callee, and facilitate iteration of child nodes in the graph.
class Node {
friend class LazyCallGraph;
friend class LazyCallGraph::SCC;
LazyCallGraph *G;
Function &F;
// We provide for the DFS numbering and Tarjan walk lowlink numbers to be
// stored directly within the node.
int DFSNumber;
int LowLink;
mutable NodeVectorT Callees;
DenseMap<Function *, size_t> CalleeIndexMap;
/// \brief Basic constructor implements the scanning of F into Callees and
/// CalleeIndexMap.
Node(LazyCallGraph &G, Function &F);
/// \brief Internal helper to insert a callee.
void insertEdgeInternal(Function &Callee);
/// \brief Internal helper to insert a callee.
void insertEdgeInternal(Node &CalleeN);
/// \brief Internal helper to remove a callee from this node.
void removeEdgeInternal(Function &Callee);
public:
typedef LazyCallGraph::iterator iterator;
Function &getFunction() const {
return F;
};
iterator begin() const {
return iterator(*G, Callees.begin(), Callees.end());
}
iterator end() const { return iterator(*G, Callees.end(), Callees.end()); }
/// Equality is defined as address equality.
bool operator==(const Node &N) const { return this == &N; }
bool operator!=(const Node &N) const { return !operator==(N); }
};
/// \brief An SCC of the call graph.
///
/// This represents a Strongly Connected Component of the call graph as
/// a collection of call graph nodes. While the order of nodes in the SCC is
/// stable, it is not any particular order.
class SCC {
friend class LazyCallGraph;
friend class LazyCallGraph::Node;
LazyCallGraph *G;
SmallPtrSet<SCC *, 1> ParentSCCs;
SmallVector<Node *, 1> Nodes;
SCC(LazyCallGraph &G) : G(&G) {}
void insert(Node &N);
void
internalDFS(SmallVectorImpl<std::pair<Node *, Node::iterator>> &DFSStack,
SmallVectorImpl<Node *> &PendingSCCStack, Node *N,
SmallVectorImpl<SCC *> &ResultSCCs);
public:
typedef SmallVectorImpl<Node *>::const_iterator iterator;
typedef pointee_iterator<SmallPtrSet<SCC *, 1>::const_iterator> parent_iterator;
iterator begin() const { return Nodes.begin(); }
iterator end() const { return Nodes.end(); }
parent_iterator parent_begin() const { return ParentSCCs.begin(); }
parent_iterator parent_end() const { return ParentSCCs.end(); }
iterator_range<parent_iterator> parents() const {
return iterator_range<parent_iterator>(parent_begin(), parent_end());
}
/// \brief Test if this SCC is a parent of \a C.
bool isParentOf(const SCC &C) const { return C.isChildOf(*this); }
/// \brief Test if this SCC is an ancestor of \a C.
bool isAncestorOf(const SCC &C) const { return C.isDescendantOf(*this); }
/// \brief Test if this SCC is a child of \a C.
bool isChildOf(const SCC &C) const {
return ParentSCCs.count(const_cast<SCC *>(&C));
}
/// \brief Test if this SCC is a descendant of \a C.
bool isDescendantOf(const SCC &C) const;
/// \brief Short name useful for debugging or logging.
///
/// We use the name of the first function in the SCC to name the SCC for
/// the purposes of debugging and logging.
StringRef getName() const { return (*begin())->getFunction().getName(); }
///@{
/// \name Mutation API
///
/// These methods provide the core API for updating the call graph in the
/// presence of a (potentially still in-flight) DFS-found SCCs.
///
/// Note that these methods sometimes have complex runtimes, so be careful
/// how you call them.
/// \brief Insert an edge from one node in this SCC to another in this SCC.
///
/// By the definition of an SCC, this does not change the nature or make-up
/// of any SCCs.
void insertIntraSCCEdge(Node &CallerN, Node &CalleeN);
/// \brief Insert an edge whose tail is in this SCC and head is in some
/// child SCC.
///
/// There must be an existing path from the caller to the callee. This
/// operation is inexpensive and does not change the set of SCCs in the
/// graph.
void insertOutgoingEdge(Node &CallerN, Node &CalleeN);
/// \brief Insert an edge whose tail is in a descendant SCC and head is in
/// this SCC.
///
/// There must be an existing path from the callee to the caller in this
/// case. NB! This is has the potential to be a very expensive function. It
/// inherently forms a cycle in the prior SCC DAG and we have to merge SCCs
/// to resolve that cycle. But finding all of the SCCs which participate in
/// the cycle can in the worst case require traversing every SCC in the
/// graph. Every attempt is made to avoid that, but passes must still
/// exercise caution calling this routine repeatedly.
///
/// FIXME: We could possibly optimize this quite a bit for cases where the
/// caller and callee are very nearby in the graph. See comments in the
/// implementation for details, but that use case might impact users.
SmallVector<SCC *, 1> insertIncomingEdge(Node &CallerN, Node &CalleeN);
/// \brief Remove an edge whose source is in this SCC and target is *not*.
///
/// This removes an inter-SCC edge. All inter-SCC edges originating from
/// this SCC have been fully explored by any in-flight DFS SCC formation,
/// so this is always safe to call once you have the source SCC.
///
/// This operation does not change the set of SCCs or the members of the
/// SCCs and so is very inexpensive. It may change the connectivity graph
/// of the SCCs though, so be careful calling this while iterating over
/// them.
void removeInterSCCEdge(Node &CallerN, Node &CalleeN);
/// \brief Remove an edge which is entirely within this SCC.
///
/// Both the \a Caller and the \a Callee must be within this SCC. Removing
/// such an edge make break cycles that form this SCC and thus this
/// operation may change the SCC graph significantly. In particular, this
/// operation will re-form new SCCs based on the remaining connectivity of
/// the graph. The following invariants are guaranteed to hold after
/// calling this method:
///
/// 1) This SCC is still an SCC in the graph.
/// 2) This SCC will be the parent of any new SCCs. Thus, this SCC is
/// preserved as the root of any new SCC directed graph formed.
/// 3) No SCC other than this SCC has its member set changed (this is
/// inherent in the definition of removing such an edge).
/// 4) All of the parent links of the SCC graph will be updated to reflect
/// the new SCC structure.
/// 5) All SCCs formed out of this SCC, excluding this SCC, will be
/// returned in a vector.
/// 6) The order of the SCCs in the vector will be a valid postorder
/// traversal of the new SCCs.
///
/// These invariants are very important to ensure that we can build
/// optimization pipeliens on top of the CGSCC pass manager which
/// intelligently update the SCC graph without invalidating other parts of
/// the SCC graph.
///
/// The runtime complexity of this method is, in the worst case, O(V+E)
/// where V is the number of nodes in this SCC and E is the number of edges
/// leaving the nodes in this SCC. Note that E includes both edges within
/// this SCC and edges from this SCC to child SCCs. Some effort has been
/// made to minimize the overhead of common cases such as self-edges and
/// edge removals which result in a spanning tree with no more cycles.
SmallVector<SCC *, 1> removeIntraSCCEdge(Node &CallerN, Node &CalleeN);
///@}
};
/// \brief A post-order depth-first SCC iterator over the call graph.
///
/// This iterator triggers the Tarjan DFS-based formation of the SCC DAG for
/// the call graph, walking it lazily in depth-first post-order. That is, it
/// always visits SCCs for a callee prior to visiting the SCC for a caller
/// (when they are in different SCCs).
class postorder_scc_iterator
: public iterator_facade_base<postorder_scc_iterator,
std::forward_iterator_tag, SCC> {
friend class LazyCallGraph;
friend class LazyCallGraph::Node;
/// \brief Nonce type to select the constructor for the end iterator.
struct IsAtEndT {};
LazyCallGraph *G;
SCC *C;
// Build the begin iterator for a node.
postorder_scc_iterator(LazyCallGraph &G) : G(&G) {
C = G.getNextSCCInPostOrder();
}
// Build the end iterator for a node. This is selected purely by overload.
postorder_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/)
: G(&G), C(nullptr) {}
public:
bool operator==(const postorder_scc_iterator &Arg) const {
return G == Arg.G && C == Arg.C;
}
reference operator*() const { return *C; }
using iterator_facade_base::operator++;
postorder_scc_iterator &operator++() {
C = G->getNextSCCInPostOrder();
return *this;
}
};
/// \brief Construct a graph for the given module.
///
/// This sets up the graph and computes all of the entry points of the graph.
/// No function definitions are scanned until their nodes in the graph are
/// requested during traversal.
LazyCallGraph(Module &M);
LazyCallGraph(LazyCallGraph &&G);
LazyCallGraph &operator=(LazyCallGraph &&RHS);
iterator begin() {
return iterator(*this, EntryNodes.begin(), EntryNodes.end());
}
iterator end() { return iterator(*this, EntryNodes.end(), EntryNodes.end()); }
postorder_scc_iterator postorder_scc_begin() {
return postorder_scc_iterator(*this);
}
postorder_scc_iterator postorder_scc_end() {
return postorder_scc_iterator(*this, postorder_scc_iterator::IsAtEndT());
}
iterator_range<postorder_scc_iterator> postorder_sccs() {
return iterator_range<postorder_scc_iterator>(postorder_scc_begin(),
postorder_scc_end());
}
/// \brief Lookup a function in the graph which has already been scanned and
/// added.
Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
/// \brief Lookup a function's SCC in the graph.
///
/// \returns null if the function hasn't been assigned an SCC via the SCC
/// iterator walk.
SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
/// \brief Get a graph node for a given function, scanning it to populate the
/// graph data as necessary.
Node &get(Function &F) {
Node *&N = NodeMap[&F];
if (N)
return *N;
return insertInto(F, N);
}
///@{
/// \name Pre-SCC Mutation API
///
/// These methods are only valid to call prior to forming any SCCs for this
/// call graph. They can be used to update the core node-graph during
/// a node-based inorder traversal that precedes any SCC-based traversal.
///
/// Once you begin manipulating a call graph's SCCs, you must perform all
/// mutation of the graph via the SCC methods.
/// \brief Update the call graph after inserting a new edge.
void insertEdge(Node &Caller, Function &Callee);
/// \brief Update the call graph after inserting a new edge.
void insertEdge(Function &Caller, Function &Callee) {
return insertEdge(get(Caller), Callee);
}
/// \brief Update the call graph after deleting an edge.
void removeEdge(Node &Caller, Function &Callee);
/// \brief Update the call graph after deleting an edge.
void removeEdge(Function &Caller, Function &Callee) {
return removeEdge(get(Caller), Callee);
}
///@}
private:
/// \brief Allocator that holds all the call graph nodes.
SpecificBumpPtrAllocator<Node> BPA;
/// \brief Maps function->node for fast lookup.
DenseMap<const Function *, Node *> NodeMap;
/// \brief The entry nodes to the graph.
///
/// These nodes are reachable through "external" means. Put another way, they
/// escape at the module scope.
NodeVectorT EntryNodes;
/// \brief Map of the entry nodes in the graph to their indices in
/// \c EntryNodes.
DenseMap<Function *, size_t> EntryIndexMap;
/// \brief Allocator that holds all the call graph SCCs.
SpecificBumpPtrAllocator<SCC> SCCBPA;
/// \brief Maps Function -> SCC for fast lookup.
DenseMap<Node *, SCC *> SCCMap;
/// \brief The leaf SCCs of the graph.
///
/// These are all of the SCCs which have no children.
SmallVector<SCC *, 4> LeafSCCs;
/// \brief Stack of nodes in the DFS walk.
SmallVector<std::pair<Node *, iterator>, 4> DFSStack;
/// \brief Set of entry nodes not-yet-processed into SCCs.
SmallVector<Function *, 4> SCCEntryNodes;
/// \brief Stack of nodes the DFS has walked but not yet put into a SCC.
SmallVector<Node *, 4> PendingSCCStack;
/// \brief Counter for the next DFS number to assign.
int NextDFSNumber;
/// \brief Helper to insert a new function, with an already looked-up entry in
/// the NodeMap.
Node &insertInto(Function &F, Node *&MappedN);
/// \brief Helper to update pointers back to the graph object during moves.
void updateGraphPtrs();
/// \brief Helper to form a new SCC out of the top of a DFSStack-like
/// structure.
SCC *formSCC(Node *RootN, SmallVectorImpl<Node *> &NodeStack);
/// \brief Retrieve the next node in the post-order SCC walk of the call graph.
SCC *getNextSCCInPostOrder();
};
// Provide GraphTraits specializations for call graphs.
template <> struct GraphTraits<LazyCallGraph::Node *> {
typedef LazyCallGraph::Node NodeType;
typedef LazyCallGraph::iterator ChildIteratorType;
static NodeType *getEntryNode(NodeType *N) { return N; }
static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
static ChildIteratorType child_end(NodeType *N) { return N->end(); }
};
template <> struct GraphTraits<LazyCallGraph *> {
typedef LazyCallGraph::Node NodeType;
typedef LazyCallGraph::iterator ChildIteratorType;
static NodeType *getEntryNode(NodeType *N) { return N; }
static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
static ChildIteratorType child_end(NodeType *N) { return N->end(); }
};
/// \brief An analysis pass which computes the call graph for a module.
class LazyCallGraphAnalysis {
public:
/// \brief Inform generic clients of the result type.
typedef LazyCallGraph Result;
static void *ID() { return (void *)&PassID; }
static StringRef name() { return "Lazy CallGraph Analysis"; }
/// \brief Compute the \c LazyCallGraph for the module \c M.
///
/// This just builds the set of entry points to the call graph. The rest is
/// built lazily as it is walked.
LazyCallGraph run(Module &M) { return LazyCallGraph(M); }
private:
static char PassID;
};
/// \brief A pass which prints the call graph to a \c raw_ostream.
///
/// This is primarily useful for testing the analysis.
class LazyCallGraphPrinterPass {
raw_ostream &OS;
public:
explicit LazyCallGraphPrinterPass(raw_ostream &OS);
PreservedAnalyses run(Module &M, ModuleAnalysisManager *AM);
static StringRef name() { return "LazyCallGraphPrinterPass"; }
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/Lint.h | //===-- llvm/Analysis/Lint.h - LLVM IR Lint ---------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines lint interfaces that can be used for some sanity checking
// of input to the system, and for checking that transformations
// haven't done something bad. In contrast to the Verifier, the Lint checker
// checks for undefined behavior or constructions with likely unintended
// behavior.
//
// To see what specifically is checked, look at Lint.cpp
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LINT_H
#define LLVM_ANALYSIS_LINT_H
namespace llvm {
class FunctionPass;
class Module;
class Function;
/// @brief Create a lint pass.
///
/// Check a module or function.
FunctionPass *createLintPass();
/// @brief Check a module.
///
/// This should only be used for debugging, because it plays games with
/// PassManagers and stuff.
void lintModule(
const Module &M ///< The module to be checked
);
// lintFunction - Check a function.
void lintFunction(
const Function &F ///< The function to be checked
);
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CFGPrinter.h | //===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the CFG printer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CFGPRINTER_H
#define LLVM_ANALYSIS_CFGPRINTER_H
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/GraphWriter.h"
namespace llvm {
template<>
struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const Function *F) {
return "CFG for '" + F->getName().str() + "' function";
}
static std::string getSimpleNodeLabel(const BasicBlock *Node,
const Function *) {
if (!Node->getName().empty())
return Node->getName().str();
std::string Str;
raw_string_ostream OS(Str);
Node->printAsOperand(OS, false);
return OS.str();
}
static std::string getCompleteNodeLabel(const BasicBlock *Node,
const Function *) {
enum { MaxColumns = 80 };
std::string Str;
raw_string_ostream OS(Str);
if (Node->getName().empty()) {
Node->printAsOperand(OS, false);
OS << ":";
}
OS << *Node;
std::string OutStr = OS.str();
if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
// Process string output to make it nicer...
unsigned ColNum = 0;
unsigned LastSpace = 0;
for (unsigned i = 0; i != OutStr.length(); ++i) {
if (OutStr[i] == '\n') { // Left justify
OutStr[i] = '\\';
OutStr.insert(OutStr.begin()+i+1, 'l');
ColNum = 0;
LastSpace = 0;
} else if (OutStr[i] == ';') { // Delete comments!
unsigned Idx = OutStr.find('\n', i+1); // Find end of line
OutStr.erase(OutStr.begin()+i, OutStr.begin()+Idx);
--i;
} else if (ColNum == MaxColumns) { // Wrap lines.
// Wrap very long names even though we can't find a space.
if (!LastSpace)
LastSpace = i;
OutStr.insert(LastSpace, "\\l...");
ColNum = i - LastSpace;
LastSpace = 0;
i += 3; // The loop will advance 'i' again.
}
else
++ColNum;
if (OutStr[i] == ' ')
LastSpace = i;
}
return OutStr;
}
std::string getNodeLabel(const BasicBlock *Node,
const Function *Graph) {
if (isSimple())
return getSimpleNodeLabel(Node, Graph);
else
return getCompleteNodeLabel(Node, Graph);
}
static std::string getEdgeSourceLabel(const BasicBlock *Node,
succ_const_iterator I) {
// Label source of conditional branches with "T" or "F"
if (const BranchInst *BI = dyn_cast<BranchInst>(Node->getTerminator()))
if (BI->isConditional())
return (I == succ_begin(Node)) ? "T" : "F";
// Label source of switch edges with the associated value.
if (const SwitchInst *SI = dyn_cast<SwitchInst>(Node->getTerminator())) {
unsigned SuccNo = I.getSuccessorIndex();
if (SuccNo == 0) return "def";
std::string Str;
raw_string_ostream OS(Str);
SwitchInst::ConstCaseIt Case =
SwitchInst::ConstCaseIt::fromSuccessorIndex(SI, SuccNo);
OS << Case.getCaseValue()->getValue();
return OS.str();
}
return "";
}
};
} // End llvm namespace
namespace llvm {
class PassRegistry; // HLSL Change
class FunctionPass;
FunctionPass *createCFGPrinterPass ();
FunctionPass *createCFGOnlyPrinterPass ();
void initializeCFGPrinterPasses(PassRegistry &Registry); // HLSL Change
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CaptureTracking.h | //===----- llvm/Analysis/CaptureTracking.h - Pointer capture ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains routines that help determine which pointers are captured.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
#define LLVM_ANALYSIS_CAPTURETRACKING_H
namespace llvm {
class Value;
class Use;
class Instruction;
class DominatorTree;
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
/// be expensive, so consider caching the results. The boolean ReturnCaptures
/// specifies whether returning the value (or part of it) from the function
/// counts as capturing it or not. The boolean StoreCaptures specified
/// whether storing the value (or part of it) into memory anywhere
/// automatically counts as capturing it or not.
bool PointerMayBeCaptured(const Value *V,
bool ReturnCaptures,
bool StoreCaptures);
/// PointerMayBeCapturedBefore - Return true if this pointer value may be
/// captured by the enclosing function (which is required to exist). If a
/// DominatorTree is provided, only captures which happen before the given
/// instruction are considered. This routine can be expensive, so consider
/// caching the results. The boolean ReturnCaptures specifies whether
/// returning the value (or part of it) from the function counts as capturing
/// it or not. The boolean StoreCaptures specified whether storing the value
/// (or part of it) into memory anywhere automatically counts as capturing it
/// or not. Captures by the provided instruction are considered if the
/// final parameter is true.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
bool StoreCaptures, const Instruction *I,
DominatorTree *DT, bool IncludeI = false);
/// This callback is used in conjunction with PointerMayBeCaptured. In
/// addition to the interface here, you'll need to provide your own getters
/// to see whether anything was captured.
struct CaptureTracker {
virtual ~CaptureTracker();
/// tooManyUses - The depth of traversal has breached a limit. There may be
/// capturing instructions that will not be passed into captured().
virtual void tooManyUses() = 0;
/// shouldExplore - This is the use of a value derived from the pointer.
/// To prune the search (ie., assume that none of its users could possibly
/// capture) return false. To search it, return true.
///
/// U->getUser() is always an Instruction.
virtual bool shouldExplore(const Use *U);
/// captured - Information about the pointer was captured by the user of
/// use U. Return true to stop the traversal or false to continue looking
/// for more capturing instructions.
virtual bool captured(const Use *U) = 0;
};
/// PointerMayBeCaptured - Visit the value and the values derived from it and
/// find values which appear to be capturing the pointer value. This feeds
/// results into and is controlled by the CaptureTracker object.
void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker);
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/MemoryDependenceAnalysis.h | //===- llvm/Analysis/MemoryDependenceAnalysis.h - Memory Deps --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MemoryDependenceAnalysis analysis pass.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
#define LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/PredIteratorCache.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
namespace llvm {
class Function;
class FunctionPass;
class Instruction;
class CallSite;
class AliasAnalysis;
class AssumptionCache;
class MemoryDependenceAnalysis;
class PredIteratorCache;
class DominatorTree;
class PHITransAddr;
/// MemDepResult - A memory dependence query can return one of three different
/// answers, described below.
class MemDepResult {
enum DepType {
/// Invalid - Clients of MemDep never see this.
Invalid = 0,
/// Clobber - This is a dependence on the specified instruction which
/// clobbers the desired value. The pointer member of the MemDepResult
/// pair holds the instruction that clobbers the memory. For example,
/// this occurs when we see a may-aliased store to the memory location we
/// care about.
///
/// There are several cases that may be interesting here:
/// 1. Loads are clobbered by may-alias stores.
/// 2. Loads are considered clobbered by partially-aliased loads. The
/// client may choose to analyze deeper into these cases.
Clobber,
/// Def - This is a dependence on the specified instruction which
/// defines/produces the desired memory location. The pointer member of
/// the MemDepResult pair holds the instruction that defines the memory.
/// Cases of interest:
/// 1. This could be a load or store for dependence queries on
/// load/store. The value loaded or stored is the produced value.
/// Note that the pointer operand may be different than that of the
/// queried pointer due to must aliases and phi translation. Note
/// that the def may not be the same type as the query, the pointers
/// may just be must aliases.
/// 2. For loads and stores, this could be an allocation instruction. In
/// this case, the load is loading an undef value or a store is the
/// first store to (that part of) the allocation.
/// 3. Dependence queries on calls return Def only when they are
/// readonly calls or memory use intrinsics with identical callees
/// and no intervening clobbers. No validation is done that the
/// operands to the calls are the same.
Def,
/// Other - This marker indicates that the query has no known dependency
/// in the specified block. More detailed state info is encoded in the
/// upper part of the pair (i.e. the Instruction*)
Other
};
/// If DepType is "Other", the upper part of the pair
/// (i.e. the Instruction* part) is instead used to encode more detailed
/// type information as follows
enum OtherType {
/// NonLocal - This marker indicates that the query has no dependency in
/// the specified block. To find out more, the client should query other
/// predecessor blocks.
NonLocal = 0x4,
/// NonFuncLocal - This marker indicates that the query has no
/// dependency in the specified function.
NonFuncLocal = 0x8,
/// Unknown - This marker indicates that the query dependency
/// is unknown.
Unknown = 0xc
};
typedef PointerIntPair<Instruction*, 2, DepType> PairTy;
PairTy Value;
explicit MemDepResult(PairTy V) : Value(V) {}
public:
MemDepResult() : Value(nullptr, Invalid) {}
/// get methods: These are static ctor methods for creating various
/// MemDepResult kinds.
static MemDepResult getDef(Instruction *Inst) {
assert(Inst && "Def requires inst");
return MemDepResult(PairTy(Inst, Def));
}
static MemDepResult getClobber(Instruction *Inst) {
assert(Inst && "Clobber requires inst");
return MemDepResult(PairTy(Inst, Clobber));
}
static MemDepResult getNonLocal() {
return MemDepResult(
PairTy(reinterpret_cast<Instruction*>(NonLocal), Other));
}
static MemDepResult getNonFuncLocal() {
return MemDepResult(
PairTy(reinterpret_cast<Instruction*>(NonFuncLocal), Other));
}
static MemDepResult getUnknown() {
return MemDepResult(
PairTy(reinterpret_cast<Instruction*>(Unknown), Other));
}
/// isClobber - Return true if this MemDepResult represents a query that is
/// an instruction clobber dependency.
bool isClobber() const { return Value.getInt() == Clobber; }
/// isDef - Return true if this MemDepResult represents a query that is
/// an instruction definition dependency.
bool isDef() const { return Value.getInt() == Def; }
/// isNonLocal - Return true if this MemDepResult represents a query that
/// is transparent to the start of the block, but where a non-local hasn't
/// been done.
bool isNonLocal() const {
return Value.getInt() == Other
&& Value.getPointer() == reinterpret_cast<Instruction*>(NonLocal);
}
/// isNonFuncLocal - Return true if this MemDepResult represents a query
/// that is transparent to the start of the function.
bool isNonFuncLocal() const {
return Value.getInt() == Other
&& Value.getPointer() == reinterpret_cast<Instruction*>(NonFuncLocal);
}
/// isUnknown - Return true if this MemDepResult represents a query which
/// cannot and/or will not be computed.
bool isUnknown() const {
return Value.getInt() == Other
&& Value.getPointer() == reinterpret_cast<Instruction*>(Unknown);
}
/// getInst() - If this is a normal dependency, return the instruction that
/// is depended on. Otherwise, return null.
Instruction *getInst() const {
if (Value.getInt() == Other) return nullptr;
return Value.getPointer();
}
bool operator==(const MemDepResult &M) const { return Value == M.Value; }
bool operator!=(const MemDepResult &M) const { return Value != M.Value; }
bool operator<(const MemDepResult &M) const { return Value < M.Value; }
bool operator>(const MemDepResult &M) const { return Value > M.Value; }
private:
friend class MemoryDependenceAnalysis;
/// Dirty - Entries with this marker occur in a LocalDeps map or
/// NonLocalDeps map when the instruction they previously referenced was
/// removed from MemDep. In either case, the entry may include an
/// instruction pointer. If so, the pointer is an instruction in the
/// block where scanning can start from, saving some work.
///
/// In a default-constructed MemDepResult object, the type will be Dirty
/// and the instruction pointer will be null.
///
/// isDirty - Return true if this is a MemDepResult in its dirty/invalid.
/// state.
bool isDirty() const { return Value.getInt() == Invalid; }
static MemDepResult getDirty(Instruction *Inst) {
return MemDepResult(PairTy(Inst, Invalid));
}
};
/// NonLocalDepEntry - This is an entry in the NonLocalDepInfo cache. For
/// each BasicBlock (the BB entry) it keeps a MemDepResult.
class NonLocalDepEntry {
BasicBlock *BB;
MemDepResult Result;
public:
NonLocalDepEntry(BasicBlock *bb, MemDepResult result)
: BB(bb), Result(result) {}
// This is used for searches.
NonLocalDepEntry(BasicBlock *bb) : BB(bb) {}
// BB is the sort key, it can't be changed.
BasicBlock *getBB() const { return BB; }
void setResult(const MemDepResult &R) { Result = R; }
const MemDepResult &getResult() const { return Result; }
bool operator<(const NonLocalDepEntry &RHS) const {
return BB < RHS.BB;
}
};
/// NonLocalDepResult - This is a result from a NonLocal dependence query.
/// For each BasicBlock (the BB entry) it keeps a MemDepResult and the
/// (potentially phi translated) address that was live in the block.
class NonLocalDepResult {
NonLocalDepEntry Entry;
Value *Address;
public:
NonLocalDepResult(BasicBlock *bb, MemDepResult result, Value *address)
: Entry(bb, result), Address(address) {}
// BB is the sort key, it can't be changed.
BasicBlock *getBB() const { return Entry.getBB(); }
void setResult(const MemDepResult &R, Value *Addr) {
Entry.setResult(R);
Address = Addr;
}
const MemDepResult &getResult() const { return Entry.getResult(); }
/// getAddress - Return the address of this pointer in this block. This can
/// be different than the address queried for the non-local result because
/// of phi translation. This returns null if the address was not available
/// in a block (i.e. because phi translation failed) or if this is a cached
/// result and that address was deleted.
///
/// The address is always null for a non-local 'call' dependence.
Value *getAddress() const { return Address; }
};
/// MemoryDependenceAnalysis - This is an analysis that determines, for a
/// given memory operation, what preceding memory operations it depends on.
/// It builds on alias analysis information, and tries to provide a lazy,
/// caching interface to a common kind of alias information query.
///
/// The dependency information returned is somewhat unusual, but is pragmatic.
/// If queried about a store or call that might modify memory, the analysis
/// will return the instruction[s] that may either load from that memory or
/// store to it. If queried with a load or call that can never modify memory,
/// the analysis will return calls and stores that might modify the pointer,
/// but generally does not return loads unless a) they are volatile, or
/// b) they load from *must-aliased* pointers. Returning a dependence on
/// must-alias'd pointers instead of all pointers interacts well with the
/// internal caching mechanism.
///
class MemoryDependenceAnalysis : public FunctionPass {
// A map from instructions to their dependency.
typedef DenseMap<Instruction*, MemDepResult> LocalDepMapType;
LocalDepMapType LocalDeps;
public:
typedef std::vector<NonLocalDepEntry> NonLocalDepInfo;
private:
/// ValueIsLoadPair - This is a pair<Value*, bool> where the bool is true if
/// the dependence is a read only dependence, false if read/write.
typedef PointerIntPair<const Value*, 1, bool> ValueIsLoadPair;
/// BBSkipFirstBlockPair - This pair is used when caching information for a
/// block. If the pointer is null, the cache value is not a full query that
/// starts at the specified block. If non-null, the bool indicates whether
/// or not the contents of the block was skipped.
typedef PointerIntPair<BasicBlock*, 1, bool> BBSkipFirstBlockPair;
/// NonLocalPointerInfo - This record is the information kept for each
/// (value, is load) pair.
struct NonLocalPointerInfo {
/// Pair - The pair of the block and the skip-first-block flag.
BBSkipFirstBlockPair Pair;
/// NonLocalDeps - The results of the query for each relevant block.
NonLocalDepInfo NonLocalDeps;
/// Size - The maximum size of the dereferences of the
/// pointer. May be UnknownSize if the sizes are unknown.
uint64_t Size;
/// AATags - The AA tags associated with dereferences of the
/// pointer. The members may be null if there are no tags or
/// conflicting tags.
AAMDNodes AATags;
NonLocalPointerInfo() : Size(MemoryLocation::UnknownSize) {}
};
/// CachedNonLocalPointerInfo - This map stores the cached results of doing
/// a pointer lookup at the bottom of a block. The key of this map is the
/// pointer+isload bit, the value is a list of <bb->result> mappings.
typedef DenseMap<ValueIsLoadPair,
NonLocalPointerInfo> CachedNonLocalPointerInfo;
CachedNonLocalPointerInfo NonLocalPointerDeps;
// A map from instructions to their non-local pointer dependencies.
typedef DenseMap<Instruction*,
SmallPtrSet<ValueIsLoadPair, 4> > ReverseNonLocalPtrDepTy;
ReverseNonLocalPtrDepTy ReverseNonLocalPtrDeps;
/// PerInstNLInfo - This is the instruction we keep for each cached access
/// that we have for an instruction. The pointer is an owning pointer and
/// the bool indicates whether we have any dirty bits in the set.
typedef std::pair<NonLocalDepInfo, bool> PerInstNLInfo;
// A map from instructions to their non-local dependencies.
typedef DenseMap<Instruction*, PerInstNLInfo> NonLocalDepMapType;
NonLocalDepMapType NonLocalDeps;
// A reverse mapping from dependencies to the dependees. This is
// used when removing instructions to keep the cache coherent.
typedef DenseMap<Instruction*,
SmallPtrSet<Instruction*, 4> > ReverseDepMapType;
ReverseDepMapType ReverseLocalDeps;
// A reverse mapping from dependencies to the non-local dependees.
ReverseDepMapType ReverseNonLocalDeps;
/// Current AA implementation, just a cache.
AliasAnalysis *AA;
DominatorTree *DT;
AssumptionCache *AC;
PredIteratorCache PredCache;
public:
MemoryDependenceAnalysis();
~MemoryDependenceAnalysis() override;
static char ID;
/// Pass Implementation stuff. This doesn't do any analysis eagerly.
bool runOnFunction(Function &) override;
/// Clean up memory in between runs
void releaseMemory() override;
/// getAnalysisUsage - Does not modify anything. It uses Value Numbering
/// and Alias Analysis.
///
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// getDependency - Return the instruction on which a memory operation
/// depends. See the class comment for more details. It is illegal to call
/// this on non-memory instructions.
MemDepResult getDependency(Instruction *QueryInst, unsigned ScanLimit = 0);
/// getNonLocalCallDependency - Perform a full dependency query for the
/// specified call, returning the set of blocks that the value is
/// potentially live across. The returned set of results will include a
/// "NonLocal" result for all blocks where the value is live across.
///
/// This method assumes the instruction returns a "NonLocal" dependency
/// within its own block.
///
/// This returns a reference to an internal data structure that may be
/// invalidated on the next non-local query or when an instruction is
/// removed. Clients must copy this data if they want it around longer than
/// that.
const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS);
/// getNonLocalPointerDependency - Perform a full dependency query for an
/// access to the QueryInst's specified memory location, returning the set
/// of instructions that either define or clobber the value.
///
/// Warning: For a volatile query instruction, the dependencies will be
/// accurate, and thus usable for reordering, but it is never legal to
/// remove the query instruction.
///
/// This method assumes the pointer has a "NonLocal" dependency within
/// QueryInst's parent basic block.
void getNonLocalPointerDependency(Instruction *QueryInst,
SmallVectorImpl<NonLocalDepResult> &Result);
/// removeInstruction - Remove an instruction from the dependence analysis,
/// updating the dependence of instructions that previously depended on it.
void removeInstruction(Instruction *InstToRemove);
/// invalidateCachedPointerInfo - This method is used to invalidate cached
/// information about the specified pointer, because it may be too
/// conservative in memdep. This is an optional call that can be used when
/// the client detects an equivalence between the pointer and some other
/// value and replaces the other value with ptr. This can make Ptr available
/// in more places that cached info does not necessarily keep.
void invalidateCachedPointerInfo(Value *Ptr);
/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
/// This needs to be done when the CFG changes, e.g., due to splitting
/// critical edges.
void invalidateCachedPredecessors();
/// getPointerDependencyFrom - Return the instruction on which a memory
/// location depends. If isLoad is true, this routine ignores may-aliases
/// with read-only operations. If isLoad is false, this routine ignores
/// may-aliases with reads from read-only locations. If possible, pass
/// the query instruction as well; this function may take advantage of
/// the metadata annotated to the query instruction to refine the result.
///
/// Note that this is an uncached query, and thus may be inefficient.
///
MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc,
bool isLoad,
BasicBlock::iterator ScanIt,
BasicBlock *BB,
Instruction *QueryInst = nullptr,
unsigned Limit = 0);
/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
/// looks at a memory location for a load (specified by MemLocBase, Offs,
/// and Size) and compares it against a load. If the specified load could
/// be safely widened to a larger integer load that is 1) still efficient,
/// 2) safe for the target, and 3) would provide the specified memory
/// location value, then this function returns the size in bytes of the
/// load width to use. If not, this returns zero.
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase,
int64_t MemLocOffs,
unsigned MemLocSize,
const LoadInst *LI);
private:
MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
BasicBlock::iterator ScanIt,
BasicBlock *BB);
bool getNonLocalPointerDepFromBB(Instruction *QueryInst,
const PHITransAddr &Pointer,
const MemoryLocation &Loc, bool isLoad,
BasicBlock *BB,
SmallVectorImpl<NonLocalDepResult> &Result,
DenseMap<BasicBlock *, Value *> &Visited,
bool SkipFirstBlock = false);
MemDepResult GetNonLocalInfoForBlock(Instruction *QueryInst,
const MemoryLocation &Loc, bool isLoad,
BasicBlock *BB, NonLocalDepInfo *Cache,
unsigned NumSortedEntries);
void RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P);
/// verifyRemoved - Verify that the specified instruction does not occur
/// in our internal data structures.
void verifyRemoved(Instruction *Inst) const;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/Loads.h | //===- Loads.h - Local load analysis --------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares simple local analyses for load instructions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOADS_H
#define LLVM_ANALYSIS_LOADS_H
#include "llvm/IR/BasicBlock.h"
namespace llvm {
class AliasAnalysis;
class DataLayout;
class MDNode;
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap. If it is not obviously safe to load from the
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align);
/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
/// the instruction before ScanFrom) checking to see if we have the value at
/// the memory address *Ptr locally available within a small number of
/// instructions. If the value is available, return it.
///
/// If not, return the iterator for the last validated instruction that the
/// value would be live through. If we scanned the entire block and didn't
/// find something that invalidates *Ptr or provides it, ScanFrom would be
/// left at begin() and this returns null. ScanFrom could also be left
///
/// MaxInstsToScan specifies the maximum instructions to scan in the block.
/// If it is set to 0, it will scan the whole block. You can also optionally
/// specify an alias analysis implementation, which makes this more precise.
///
/// If AATags is non-null and a load or store is found, the AA tags from the
/// load or store are recorded there. If there are no AA tags or if no access
/// is found, it is left unmodified.
Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan = 6,
AliasAnalysis *AA = nullptr,
AAMDNodes *AATags = nullptr);
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/TargetFolder.h | //====- TargetFolder.h - Constant folding helper ---------------*- C++ -*-====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the TargetFolder class, a helper for IRBuilder.
// It provides IRBuilder with a set of methods for creating constants with
// target dependent folding, in addition to the same target-independent
// folding that the ConstantFolder class provides. For general constant
// creation and folding, use ConstantExpr and the routines in
// llvm/Analysis/ConstantFolding.h.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_TARGETFOLDER_H
#define LLVM_ANALYSIS_TARGETFOLDER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
namespace llvm {
class DataLayout;
/// TargetFolder - Create constants with target dependent folding.
class TargetFolder {
const DataLayout &DL;
/// Fold - Fold the constant using target specific information.
Constant *Fold(Constant *C) const {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *CF = ConstantFoldConstantExpression(CE, DL))
return CF;
return C;
}
public:
explicit TargetFolder(const DataLayout &DL) : DL(DL) {}
//===--------------------------------------------------------------------===//
// Binary Operators
//===--------------------------------------------------------------------===//
Constant *CreateAdd(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
return Fold(ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW));
}
Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFAdd(LHS, RHS));
}
Constant *CreateSub(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
return Fold(ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW));
}
Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFSub(LHS, RHS));
}
Constant *CreateMul(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
return Fold(ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW));
}
Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFMul(LHS, RHS));
}
Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
return Fold(ConstantExpr::getUDiv(LHS, RHS, isExact));
}
Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
return Fold(ConstantExpr::getSDiv(LHS, RHS, isExact));
}
Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFDiv(LHS, RHS));
}
Constant *CreateURem(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getURem(LHS, RHS));
}
Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getSRem(LHS, RHS));
}
Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getFRem(LHS, RHS));
}
Constant *CreateShl(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const {
return Fold(ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW));
}
Constant *CreateLShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
return Fold(ConstantExpr::getLShr(LHS, RHS, isExact));
}
Constant *CreateAShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
return Fold(ConstantExpr::getAShr(LHS, RHS, isExact));
}
Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getAnd(LHS, RHS));
}
Constant *CreateOr(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getOr(LHS, RHS));
}
Constant *CreateXor(Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::getXor(LHS, RHS));
}
Constant *CreateBinOp(Instruction::BinaryOps Opc,
Constant *LHS, Constant *RHS) const {
return Fold(ConstantExpr::get(Opc, LHS, RHS));
}
//===--------------------------------------------------------------------===//
// Unary Operators
//===--------------------------------------------------------------------===//
Constant *CreateNeg(Constant *C,
bool HasNUW = false, bool HasNSW = false) const {
return Fold(ConstantExpr::getNeg(C, HasNUW, HasNSW));
}
Constant *CreateFNeg(Constant *C) const {
return Fold(ConstantExpr::getFNeg(C));
}
Constant *CreateNot(Constant *C) const {
return Fold(ConstantExpr::getNot(C));
}
//===--------------------------------------------------------------------===//
// Memory Instructions
//===--------------------------------------------------------------------===//
Constant *CreateGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Constant *> IdxList) const {
return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
}
Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
return Fold(ConstantExpr::getGetElementPtr(Ty, C, Idx));
}
Constant *CreateGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList) const {
return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
}
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Constant *> IdxList) const {
return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
}
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
Constant *Idx) const {
// This form of the function only exists to avoid ambiguous overload
// warnings about whether to convert Idx to ArrayRef<Constant *> or
// ArrayRef<Value *>.
return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx));
}
Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
ArrayRef<Value *> IdxList) const {
return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
}
//===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
Constant *CreateCast(Instruction::CastOps Op, Constant *C,
Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getCast(Op, C, DestTy));
}
Constant *CreateIntCast(Constant *C, Type *DestTy,
bool isSigned) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
}
Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getPointerCast(C, DestTy));
}
Constant *CreateFPCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getFPCast(C, DestTy));
}
Constant *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
}
Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getZExtOrBitCast(C, DestTy));
}
Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getSExtOrBitCast(C, DestTy));
}
Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getTruncOrBitCast(C, DestTy));
}
Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
Type *DestTy) const {
if (C->getType() == DestTy)
return C; // avoid calling Fold
return Fold(ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy));
}
//===--------------------------------------------------------------------===//
// Compare Instructions
//===--------------------------------------------------------------------===//
Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const {
return Fold(ConstantExpr::getCompare(P, LHS, RHS));
}
Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const {
return Fold(ConstantExpr::getCompare(P, LHS, RHS));
}
//===--------------------------------------------------------------------===//
// Other Instructions
//===--------------------------------------------------------------------===//
Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const {
return Fold(ConstantExpr::getSelect(C, True, False));
}
Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const {
return Fold(ConstantExpr::getExtractElement(Vec, Idx));
}
Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
Constant *Idx) const {
return Fold(ConstantExpr::getInsertElement(Vec, NewElt, Idx));
}
Constant *CreateShuffleVector(Constant *V1, Constant *V2,
Constant *Mask) const {
return Fold(ConstantExpr::getShuffleVector(V1, V2, Mask));
}
Constant *CreateExtractValue(Constant *Agg,
ArrayRef<unsigned> IdxList) const {
return Fold(ConstantExpr::getExtractValue(Agg, IdxList));
}
Constant *CreateInsertValue(Constant *Agg, Constant *Val,
ArrayRef<unsigned> IdxList) const {
return Fold(ConstantExpr::getInsertValue(Agg, Val, IdxList));
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CodeMetrics.h | //===- CodeMetrics.h - Code cost measurements -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements various weight measurements for code, helping
// the Inliner and other passes decide whether to duplicate its contents.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CODEMETRICS_H
#define LLVM_ANALYSIS_CODEMETRICS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/CallSite.h"
namespace llvm {
class AssumptionCache;
class BasicBlock;
class Loop;
class Function;
class Instruction;
class DataLayout;
class TargetTransformInfo;
class Value;
/// \brief Check whether a call will lower to something small.
///
/// This tests checks whether this callsite will lower to something
/// significantly cheaper than a traditional call, often a single
/// instruction. Note that if isInstructionFree(CS.getInstruction()) would
/// return true, so will this function.
bool callIsSmall(ImmutableCallSite CS);
/// \brief Utility to calculate the size and a few similar metrics for a set
/// of basic blocks.
struct CodeMetrics {
/// \brief True if this function contains a call to setjmp or other functions
/// with attribute "returns twice" without having the attribute itself.
bool exposesReturnsTwice;
/// \brief True if this function calls itself.
bool isRecursive;
/// \brief True if this function cannot be duplicated.
///
/// True if this function contains one or more indirect branches, or it contains
/// one or more 'noduplicate' instructions.
bool notDuplicatable;
/// \brief True if this function calls alloca (in the C sense).
bool usesDynamicAlloca;
/// \brief Number of instructions in the analyzed blocks.
unsigned NumInsts;
/// \brief Number of analyzed blocks.
unsigned NumBlocks;
/// \brief Keeps track of basic block code size estimates.
DenseMap<const BasicBlock *, unsigned> NumBBInsts;
/// \brief Keep track of the number of calls to 'big' functions.
unsigned NumCalls;
/// \brief The number of calls to internal functions with a single caller.
///
/// These are likely targets for future inlining, likely exposed by
/// interleaved devirtualization.
unsigned NumInlineCandidates;
/// \brief How many instructions produce vector values.
///
/// The inliner is more aggressive with inlining vector kernels.
unsigned NumVectorInsts;
/// \brief How many 'ret' instructions the blocks contain.
unsigned NumRets;
CodeMetrics()
: exposesReturnsTwice(false), isRecursive(false), notDuplicatable(false),
usesDynamicAlloca(false), NumInsts(0), NumBlocks(0), NumCalls(0),
NumInlineCandidates(0), NumVectorInsts(0), NumRets(0) {}
/// \brief Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
SmallPtrSetImpl<const Value*> &EphValues);
/// \brief Collect a loop's ephemeral values (those used only by an assume
/// or similar intrinsics in the loop).
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);
/// \brief Collect a functions's ephemeral values (those used only by an
/// assume or similar intrinsics in the function).
static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CallGraphSCCPass.h | //===- CallGraphSCCPass.h - Pass that operates BU on call graph -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the CallGraphSCCPass class, which is used for passes which
// are implemented as bottom-up traversals on the call graph. Because there may
// be cycles in the call graph, passes of this type operate on the call-graph in
// SCC order: that is, they process function bottom-up, except for recursive
// functions, which they process all at once.
//
// These passes are inherently interprocedural, and are required to keep the
// call graph up-to-date if they do anything which could modify it.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
#define LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Pass.h"
namespace llvm {
class CallGraphNode;
class CallGraph;
class PMStack;
class CallGraphSCC;
class CallGraphSCCPass : public Pass {
public:
explicit CallGraphSCCPass(char &pid) : Pass(PT_CallGraphSCC, pid) {}
/// createPrinterPass - Get a pass that prints the Module
/// corresponding to a CallGraph.
Pass *createPrinterPass(raw_ostream &O,
const std::string &Banner) const override;
using llvm::Pass::doInitialization;
using llvm::Pass::doFinalization;
/// doInitialization - This method is called before the SCC's of the program
/// has been processed, allowing the pass to do initialization as necessary.
virtual bool doInitialization(CallGraph &CG) {
return false;
}
/// runOnSCC - This method should be implemented by the subclass to perform
/// whatever action is necessary for the specified SCC. Note that
/// non-recursive (or only self-recursive) functions will have an SCC size of
/// 1, where recursive portions of the call graph will have SCC size > 1.
///
/// SCC passes that add or delete functions to the SCC are required to update
/// the SCC list, otherwise stale pointers may be dereferenced.
///
virtual bool runOnSCC(CallGraphSCC &SCC) = 0;
/// doFinalization - This method is called after the SCC's of the program has
/// been processed, allowing the pass to do final cleanup as necessary.
virtual bool doFinalization(CallGraph &CG) {
return false;
}
/// Assign pass manager to manager this pass
void assignPassManager(PMStack &PMS, PassManagerType PMT) override;
/// Return what kind of Pass Manager can manage this pass.
PassManagerType getPotentialPassManagerType() const override {
return PMT_CallGraphPassManager;
}
/// getAnalysisUsage - For this class, we declare that we require and preserve
/// the call graph. If the derived class implements this method, it should
/// always explicitly call the implementation here.
void getAnalysisUsage(AnalysisUsage &Info) const override;
};
/// CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
class CallGraphSCC {
void *Context; // The CGPassManager object that is vending this.
std::vector<CallGraphNode*> Nodes;
public:
CallGraphSCC(void *context) : Context(context) {}
void initialize(CallGraphNode*const*I, CallGraphNode*const*E) {
Nodes.assign(I, E);
}
bool isSingular() const { return Nodes.size() == 1; }
unsigned size() const { return Nodes.size(); }
/// ReplaceNode - This informs the SCC and the pass manager that the specified
/// Old node has been deleted, and New is to be used in its place.
void ReplaceNode(CallGraphNode *Old, CallGraphNode *New);
typedef std::vector<CallGraphNode*>::const_iterator iterator;
iterator begin() const { return Nodes.begin(); }
iterator end() const { return Nodes.end(); }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/LoopIterator.h | //===--------- LoopIterator.h - Iterate over loop blocks --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This file defines iterators to visit the basic blocks within a loop.
//
// These iterators currently visit blocks within subloops as well.
// Unfortunately we have no efficient way of summarizing loop exits which would
// allow skipping subloops during traversal.
//
// If you want to visit all blocks in a loop and don't need an ordered traveral,
// use Loop::block_begin() instead.
//
// This is intentionally designed to work with ill-formed loops in which the
// backedge has been deleted. The only prerequisite is that all blocks
// contained within the loop according to the most recent LoopInfo analysis are
// reachable from the loop header.
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPITERATOR_H
#define LLVM_ANALYSIS_LOOPITERATOR_H
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/LoopInfo.h"
namespace llvm {
class LoopBlocksTraversal;
/// Store the result of a depth first search within basic blocks contained by a
/// single loop.
///
/// TODO: This could be generalized for any CFG region, or the entire CFG.
class LoopBlocksDFS {
public:
/// Postorder list iterators.
typedef std::vector<BasicBlock*>::const_iterator POIterator;
typedef std::vector<BasicBlock*>::const_reverse_iterator RPOIterator;
friend class LoopBlocksTraversal;
private:
Loop *L;
/// Map each block to its postorder number. A block is only mapped after it is
/// preorder visited by DFS. It's postorder number is initially zero and set
/// to nonzero after it is finished by postorder traversal.
DenseMap<BasicBlock*, unsigned> PostNumbers;
std::vector<BasicBlock*> PostBlocks;
public:
LoopBlocksDFS(Loop *Container) :
L(Container), PostNumbers(NextPowerOf2(Container->getNumBlocks())) {
PostBlocks.reserve(Container->getNumBlocks());
}
Loop *getLoop() const { return L; }
/// Traverse the loop blocks and store the DFS result.
void perform(LoopInfo *LI);
/// Return true if postorder numbers are assigned to all loop blocks.
bool isComplete() const { return PostBlocks.size() == L->getNumBlocks(); }
/// Iterate over the cached postorder blocks.
POIterator beginPostorder() const {
assert(isComplete() && "bad loop DFS");
return PostBlocks.begin();
}
POIterator endPostorder() const { return PostBlocks.end(); }
/// Reverse iterate over the cached postorder blocks.
RPOIterator beginRPO() const {
assert(isComplete() && "bad loop DFS");
return PostBlocks.rbegin();
}
RPOIterator endRPO() const { return PostBlocks.rend(); }
/// Return true if this block has been preorder visited.
bool hasPreorder(BasicBlock *BB) const { return PostNumbers.count(BB); }
/// Return true if this block has a postorder number.
bool hasPostorder(BasicBlock *BB) const {
DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
return I != PostNumbers.end() && I->second;
}
/// Get a block's postorder number.
unsigned getPostorder(BasicBlock *BB) const {
DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
assert(I != PostNumbers.end() && "block not visited by DFS");
assert(I->second && "block not finished by DFS");
return I->second;
}
/// Get a block's reverse postorder number.
unsigned getRPO(BasicBlock *BB) const {
return 1 + PostBlocks.size() - getPostorder(BB);
}
void clear() {
PostNumbers.clear();
PostBlocks.clear();
}
};
/// Specialize po_iterator_storage to record postorder numbers.
template<> class po_iterator_storage<LoopBlocksTraversal, true> {
LoopBlocksTraversal &LBT;
public:
po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
// These functions are defined below.
bool insertEdge(BasicBlock *From, BasicBlock *To);
void finishPostorder(BasicBlock *BB);
};
/// Traverse the blocks in a loop using a depth-first search.
class LoopBlocksTraversal {
public:
/// Graph traversal iterator.
typedef po_iterator<BasicBlock*, LoopBlocksTraversal, true> POTIterator;
private:
LoopBlocksDFS &DFS;
LoopInfo *LI;
public:
LoopBlocksTraversal(LoopBlocksDFS &Storage, LoopInfo *LInfo) :
DFS(Storage), LI(LInfo) {}
/// Postorder traversal over the graph. This only needs to be done once.
/// po_iterator "automatically" calls back to visitPreorder and
/// finishPostorder to record the DFS result.
POTIterator begin() {
assert(DFS.PostBlocks.empty() && "Need clear DFS result before traversing");
assert(DFS.L->getNumBlocks() && "po_iterator cannot handle an empty graph");
return po_ext_begin(DFS.L->getHeader(), *this);
}
POTIterator end() {
// po_ext_end interface requires a basic block, but ignores its value.
return po_ext_end(DFS.L->getHeader(), *this);
}
/// Called by po_iterator upon reaching a block via a CFG edge. If this block
/// is contained in the loop and has not been visited, then mark it preorder
/// visited and return true.
///
/// TODO: If anyone is interested, we could record preorder numbers here.
bool visitPreorder(BasicBlock *BB) {
if (!DFS.L->contains(LI->getLoopFor(BB)))
return false;
return DFS.PostNumbers.insert(std::make_pair(BB, 0)).second;
}
/// Called by po_iterator each time it advances, indicating a block's
/// postorder.
void finishPostorder(BasicBlock *BB) {
assert(DFS.PostNumbers.count(BB) && "Loop DFS skipped preorder");
DFS.PostBlocks.push_back(BB);
DFS.PostNumbers[BB] = DFS.PostBlocks.size();
}
};
inline bool po_iterator_storage<LoopBlocksTraversal, true>::
insertEdge(BasicBlock *From, BasicBlock *To) {
return LBT.visitPreorder(To);
}
inline void po_iterator_storage<LoopBlocksTraversal, true>::
finishPostorder(BasicBlock *BB) {
LBT.finishPostorder(BB);
}
} // End namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/PostDominators.h | //=- llvm/Analysis/PostDominators.h - Post Dominator Calculation-*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file exposes interfaces to post dominance information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_POSTDOMINATORS_H
#define LLVM_ANALYSIS_POSTDOMINATORS_H
#include "llvm/IR/Dominators.h"
namespace llvm {
/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used to
/// compute the post-dominator tree.
///
struct PostDominatorTree : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
DominatorTreeBase<BasicBlock>* DT;
PostDominatorTree() : FunctionPass(ID) {
initializePostDominatorTreePass(*PassRegistry::getPassRegistry());
DT = new DominatorTreeBase<BasicBlock>(true);
}
~PostDominatorTree() override;
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
inline const std::vector<BasicBlock*> &getRoots() const {
return DT->getRoots();
}
inline DomTreeNode *getRootNode() const {
return DT->getRootNode();
}
inline DomTreeNode *operator[](BasicBlock *BB) const {
return DT->getNode(BB);
}
inline DomTreeNode *getNode(BasicBlock *BB) const {
return DT->getNode(BB);
}
inline bool dominates(DomTreeNode* A, DomTreeNode* B) const {
return DT->dominates(A, B);
}
inline bool dominates(const BasicBlock* A, const BasicBlock* B) const {
return DT->dominates(A, B);
}
inline bool properlyDominates(const DomTreeNode* A, DomTreeNode* B) const {
return DT->properlyDominates(A, B);
}
inline bool properlyDominates(BasicBlock* A, BasicBlock* B) const {
return DT->properlyDominates(A, B);
}
inline BasicBlock *findNearestCommonDominator(BasicBlock *A, BasicBlock *B) {
return DT->findNearestCommonDominator(A, B);
}
inline const BasicBlock *findNearestCommonDominator(const BasicBlock *A,
const BasicBlock *B) {
return DT->findNearestCommonDominator(A, B);
}
/// Get all nodes post-dominated by R, including R itself.
void getDescendants(BasicBlock *R,
SmallVectorImpl<BasicBlock *> &Result) const {
DT->getDescendants(R, Result);
}
void releaseMemory() override {
DT->releaseMemory();
}
void print(raw_ostream &OS, const Module*) const override;
};
FunctionPass* createPostDomTree();
template <> struct GraphTraits<PostDominatorTree*>
: public GraphTraits<DomTreeNode*> {
static NodeType *getEntryNode(PostDominatorTree *DT) {
return DT->getRootNode();
}
static nodes_iterator nodes_begin(PostDominatorTree *N) {
if (getEntryNode(N))
return df_begin(getEntryNode(N));
else
return df_end(getEntryNode(N));
}
static nodes_iterator nodes_end(PostDominatorTree *N) {
return df_end(getEntryNode(N));
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/BlockFrequencyInfoImpl.h | //==- BlockFrequencyInfoImpl.h - Block Frequency Implementation -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Shared implementation of BlockFrequency for IR and Machine Instructions.
// See the documentation below for BlockFrequencyInfoImpl for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
#define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ScaledNumber.h"
#include "llvm/Support/raw_ostream.h"
#include <deque>
#include <list>
#include <string>
#include <vector>
#define DEBUG_TYPE "block-freq"
namespace llvm {
class BasicBlock;
class BranchProbabilityInfo;
class Function;
class Loop;
class LoopInfo;
class MachineBasicBlock;
class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineLoop;
class MachineLoopInfo;
namespace bfi_detail {
struct IrreducibleGraph;
// This is part of a workaround for a GCC 4.7 crash on lambdas.
template <class BT> struct BlockEdgesAdder;
/// \brief Mass of a block.
///
/// This class implements a sort of fixed-point fraction always between 0.0 and
/// 1.0. getMass() == UINT64_MAX indicates a value of 1.0.
///
/// Masses can be added and subtracted. Simple saturation arithmetic is used,
/// so arithmetic operations never overflow or underflow.
///
/// Masses can be multiplied. Multiplication treats full mass as 1.0 and uses
/// an inexpensive floating-point algorithm that's off-by-one (almost, but not
/// quite, maximum precision).
///
/// Masses can be scaled by \a BranchProbability at maximum precision.
class BlockMass {
uint64_t Mass;
public:
BlockMass() : Mass(0) {}
explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
static BlockMass getEmpty() { return BlockMass(); }
static BlockMass getFull() { return BlockMass(UINT64_MAX); }
uint64_t getMass() const { return Mass; }
bool isFull() const { return Mass == UINT64_MAX; }
bool isEmpty() const { return !Mass; }
bool operator!() const { return isEmpty(); }
/// \brief Add another mass.
///
/// Adds another mass, saturating at \a isFull() rather than overflowing.
BlockMass &operator+=(const BlockMass &X) {
uint64_t Sum = Mass + X.Mass;
Mass = Sum < Mass ? UINT64_MAX : Sum;
return *this;
}
/// \brief Subtract another mass.
///
/// Subtracts another mass, saturating at \a isEmpty() rather than
/// undeflowing.
BlockMass &operator-=(const BlockMass &X) {
uint64_t Diff = Mass - X.Mass;
Mass = Diff > Mass ? 0 : Diff;
return *this;
}
BlockMass &operator*=(const BranchProbability &P) {
Mass = P.scale(Mass);
return *this;
}
bool operator==(const BlockMass &X) const { return Mass == X.Mass; }
bool operator!=(const BlockMass &X) const { return Mass != X.Mass; }
bool operator<=(const BlockMass &X) const { return Mass <= X.Mass; }
bool operator>=(const BlockMass &X) const { return Mass >= X.Mass; }
bool operator<(const BlockMass &X) const { return Mass < X.Mass; }
bool operator>(const BlockMass &X) const { return Mass > X.Mass; }
/// \brief Convert to scaled number.
///
/// Convert to \a ScaledNumber. \a isFull() gives 1.0, while \a isEmpty()
/// gives slightly above 0.0.
ScaledNumber<uint64_t> toScaled() const;
void dump() const;
raw_ostream &print(raw_ostream &OS) const;
};
inline BlockMass operator+(const BlockMass &L, const BlockMass &R) {
return BlockMass(L) += R;
}
inline BlockMass operator-(const BlockMass &L, const BlockMass &R) {
return BlockMass(L) -= R;
}
inline BlockMass operator*(const BlockMass &L, const BranchProbability &R) {
return BlockMass(L) *= R;
}
inline BlockMass operator*(const BranchProbability &L, const BlockMass &R) {
return BlockMass(R) *= L;
}
inline raw_ostream &operator<<(raw_ostream &OS, const BlockMass &X) {
return X.print(OS);
}
} // end namespace bfi_detail
template <> struct isPodLike<bfi_detail::BlockMass> {
static const bool value = true;
};
/// \brief Base class for BlockFrequencyInfoImpl
///
/// BlockFrequencyInfoImplBase has supporting data structures and some
/// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
/// the block type (or that call such algorithms) are skipped here.
///
/// Nevertheless, the majority of the overall algorithm documention lives with
/// BlockFrequencyInfoImpl. See there for details.
class BlockFrequencyInfoImplBase {
public:
typedef ScaledNumber<uint64_t> Scaled64;
typedef bfi_detail::BlockMass BlockMass;
/// \brief Representative of a block.
///
/// This is a simple wrapper around an index into the reverse-post-order
/// traversal of the blocks.
///
/// Unlike a block pointer, its order has meaning (location in the
/// topological sort) and it's class is the same regardless of block type.
struct BlockNode {
typedef uint32_t IndexType;
IndexType Index;
bool operator==(const BlockNode &X) const { return Index == X.Index; }
bool operator!=(const BlockNode &X) const { return Index != X.Index; }
bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
bool operator<(const BlockNode &X) const { return Index < X.Index; }
bool operator>(const BlockNode &X) const { return Index > X.Index; }
BlockNode() : Index(UINT32_MAX) {}
BlockNode(IndexType Index) : Index(Index) {}
bool isValid() const { return Index <= getMaxIndex(); }
static size_t getMaxIndex() { return UINT32_MAX - 1; }
};
/// \brief Stats about a block itself.
struct FrequencyData {
Scaled64 Scaled;
uint64_t Integer;
};
/// \brief Data about a loop.
///
/// Contains the data necessary to represent a loop as a pseudo-node once it's
/// packaged.
struct LoopData {
typedef SmallVector<std::pair<BlockNode, BlockMass>, 4> ExitMap;
typedef SmallVector<BlockNode, 4> NodeList;
typedef SmallVector<BlockMass, 1> HeaderMassList;
LoopData *Parent; ///< The parent loop.
bool IsPackaged; ///< Whether this has been packaged.
uint32_t NumHeaders; ///< Number of headers.
ExitMap Exits; ///< Successor edges (and weights).
NodeList Nodes; ///< Header and the members of the loop.
HeaderMassList BackedgeMass; ///< Mass returned to each loop header.
BlockMass Mass;
Scaled64 Scale;
LoopData(LoopData *Parent, const BlockNode &Header)
: Parent(Parent), IsPackaged(false), NumHeaders(1), Nodes(1, Header),
BackedgeMass(1) {}
template <class It1, class It2>
LoopData(LoopData *Parent, It1 FirstHeader, It1 LastHeader, It2 FirstOther,
It2 LastOther)
: Parent(Parent), IsPackaged(false), Nodes(FirstHeader, LastHeader) {
NumHeaders = Nodes.size();
Nodes.insert(Nodes.end(), FirstOther, LastOther);
BackedgeMass.resize(NumHeaders);
}
bool isHeader(const BlockNode &Node) const {
if (isIrreducible())
return std::binary_search(Nodes.begin(), Nodes.begin() + NumHeaders,
Node);
return Node == Nodes[0];
}
BlockNode getHeader() const { return Nodes[0]; }
bool isIrreducible() const { return NumHeaders > 1; }
HeaderMassList::difference_type getHeaderIndex(const BlockNode &B) {
assert(isHeader(B) && "this is only valid on loop header blocks");
if (isIrreducible())
return std::lower_bound(Nodes.begin(), Nodes.begin() + NumHeaders, B) -
Nodes.begin();
return 0;
}
NodeList::const_iterator members_begin() const {
return Nodes.begin() + NumHeaders;
}
NodeList::const_iterator members_end() const { return Nodes.end(); }
iterator_range<NodeList::const_iterator> members() const {
return make_range(members_begin(), members_end());
}
};
/// \brief Index of loop information.
struct WorkingData {
BlockNode Node; ///< This node.
LoopData *Loop; ///< The loop this block is inside.
BlockMass Mass; ///< Mass distribution from the entry block.
WorkingData(const BlockNode &Node) : Node(Node), Loop(nullptr) {}
bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }
bool isDoubleLoopHeader() const {
return isLoopHeader() && Loop->Parent && Loop->Parent->isIrreducible() &&
Loop->Parent->isHeader(Node);
}
LoopData *getContainingLoop() const {
if (!isLoopHeader())
return Loop;
if (!isDoubleLoopHeader())
return Loop->Parent;
return Loop->Parent->Parent;
}
/// \brief Resolve a node to its representative.
///
/// Get the node currently representing Node, which could be a containing
/// loop.
///
/// This function should only be called when distributing mass. As long as
/// there are no irreducible edges to Node, then it will have complexity
/// O(1) in this context.
///
/// In general, the complexity is O(L), where L is the number of loop
/// headers Node has been packaged into. Since this method is called in
/// the context of distributing mass, L will be the number of loop headers
/// an early exit edge jumps out of.
BlockNode getResolvedNode() const {
auto L = getPackagedLoop();
return L ? L->getHeader() : Node;
}
LoopData *getPackagedLoop() const {
if (!Loop || !Loop->IsPackaged)
return nullptr;
auto L = Loop;
while (L->Parent && L->Parent->IsPackaged)
L = L->Parent;
return L;
}
/// \brief Get the appropriate mass for a node.
///
/// Get appropriate mass for Node. If Node is a loop-header (whose loop
/// has been packaged), returns the mass of its pseudo-node. If it's a
/// node inside a packaged loop, it returns the loop's mass.
BlockMass &getMass() {
if (!isAPackage())
return Mass;
if (!isADoublePackage())
return Loop->Mass;
return Loop->Parent->Mass;
}
/// \brief Has ContainingLoop been packaged up?
bool isPackaged() const { return getResolvedNode() != Node; }
/// \brief Has Loop been packaged up?
bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
/// \brief Has Loop been packaged up twice?
bool isADoublePackage() const {
return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
}
};
/// \brief Unscaled probability weight.
///
/// Probability weight for an edge in the graph (including the
/// successor/target node).
///
/// All edges in the original function are 32-bit. However, exit edges from
/// loop packages are taken from 64-bit exit masses, so we need 64-bits of
/// space in general.
///
/// In addition to the raw weight amount, Weight stores the type of the edge
/// in the current context (i.e., the context of the loop being processed).
/// Is this a local edge within the loop, an exit from the loop, or a
/// backedge to the loop header?
struct Weight {
enum DistType { Local, Exit, Backedge };
DistType Type;
BlockNode TargetNode;
uint64_t Amount;
Weight() : Type(Local), Amount(0) {}
Weight(DistType Type, BlockNode TargetNode, uint64_t Amount)
: Type(Type), TargetNode(TargetNode), Amount(Amount) {}
};
/// \brief Distribution of unscaled probability weight.
///
/// Distribution of unscaled probability weight to a set of successors.
///
/// This class collates the successor edge weights for later processing.
///
/// \a DidOverflow indicates whether \a Total did overflow while adding to
/// the distribution. It should never overflow twice.
struct Distribution {
typedef SmallVector<Weight, 4> WeightList;
WeightList Weights; ///< Individual successor weights.
uint64_t Total; ///< Sum of all weights.
bool DidOverflow; ///< Whether \a Total did overflow.
Distribution() : Total(0), DidOverflow(false) {}
void addLocal(const BlockNode &Node, uint64_t Amount) {
add(Node, Amount, Weight::Local);
}
void addExit(const BlockNode &Node, uint64_t Amount) {
add(Node, Amount, Weight::Exit);
}
void addBackedge(const BlockNode &Node, uint64_t Amount) {
add(Node, Amount, Weight::Backedge);
}
/// \brief Normalize the distribution.
///
/// Combines multiple edges to the same \a Weight::TargetNode and scales
/// down so that \a Total fits into 32-bits.
///
/// This is linear in the size of \a Weights. For the vast majority of
/// cases, adjacent edge weights are combined by sorting WeightList and
/// combining adjacent weights. However, for very large edge lists an
/// auxiliary hash table is used.
void normalize();
private:
void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
};
/// \brief Data about each block. This is used downstream.
std::vector<FrequencyData> Freqs;
/// \brief Loop data: see initializeLoops().
std::vector<WorkingData> Working;
/// \brief Indexed information about loops.
std::list<LoopData> Loops;
/// \brief Add all edges out of a packaged loop to the distribution.
///
/// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
/// successor edge.
///
/// \return \c true unless there's an irreducible backedge.
bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
Distribution &Dist);
/// \brief Add an edge to the distribution.
///
/// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
/// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
/// every edge should be a local edge (since all the loops are packaged up).
///
/// \return \c true unless aborted due to an irreducible backedge.
bool addToDist(Distribution &Dist, const LoopData *OuterLoop,
const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
LoopData &getLoopPackage(const BlockNode &Head) {
assert(Head.Index < Working.size());
assert(Working[Head.Index].isLoopHeader());
return *Working[Head.Index].Loop;
}
/// \brief Analyze irreducible SCCs.
///
/// Separate irreducible SCCs from \c G, which is an explict graph of \c
/// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
/// Insert them into \a Loops before \c Insert.
///
/// \return the \c LoopData nodes representing the irreducible SCCs.
iterator_range<std::list<LoopData>::iterator>
analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
/// \brief Update a loop after packaging irreducible SCCs inside of it.
///
/// Update \c OuterLoop. Before finding irreducible control flow, it was
/// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
/// LoopData::BackedgeMass need to be reset. Also, nodes that were packaged
/// up need to be removed from \a OuterLoop::Nodes.
void updateLoopWithIrreducible(LoopData &OuterLoop);
/// \brief Distribute mass according to a distribution.
///
/// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
/// backedges and exits are stored in its entry in Loops.
///
/// Mass is distributed in parallel from two copies of the source mass.
void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
Distribution &Dist);
/// \brief Compute the loop scale for a loop.
void computeLoopScale(LoopData &Loop);
/// Adjust the mass of all headers in an irreducible loop.
///
/// Initially, irreducible loops are assumed to distribute their mass
/// equally among its headers. This can lead to wrong frequency estimates
/// since some headers may be executed more frequently than others.
///
/// This adjusts header mass distribution so it matches the weights of
/// the backedges going into each of the loop headers.
void adjustLoopHeaderMass(LoopData &Loop);
/// \brief Package up a loop.
void packageLoop(LoopData &Loop);
/// \brief Unwrap loops.
void unwrapLoops();
/// \brief Finalize frequency metrics.
///
/// Calculates final frequencies and cleans up no-longer-needed data
/// structures.
void finalizeMetrics();
/// \brief Clear all memory.
void clear();
virtual std::string getBlockName(const BlockNode &Node) const;
std::string getLoopName(const LoopData &Loop) const;
virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
void dump() const { print(dbgs()); }
Scaled64 getFloatingBlockFreq(const BlockNode &Node) const;
BlockFrequency getBlockFreq(const BlockNode &Node) const;
raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
raw_ostream &printBlockFreq(raw_ostream &OS,
const BlockFrequency &Freq) const;
uint64_t getEntryFreq() const {
assert(!Freqs.empty());
return Freqs[0].Integer;
}
/// \brief Virtual destructor.
///
/// Need a virtual destructor to mask the compiler warning about
/// getBlockName().
virtual ~BlockFrequencyInfoImplBase() {}
};
namespace bfi_detail {
template <class BlockT> struct TypeMap {};
template <> struct TypeMap<BasicBlock> {
typedef BasicBlock BlockT;
typedef Function FunctionT;
typedef BranchProbabilityInfo BranchProbabilityInfoT;
typedef Loop LoopT;
typedef LoopInfo LoopInfoT;
};
template <> struct TypeMap<MachineBasicBlock> {
typedef MachineBasicBlock BlockT;
typedef MachineFunction FunctionT;
typedef MachineBranchProbabilityInfo BranchProbabilityInfoT;
typedef MachineLoop LoopT;
typedef MachineLoopInfo LoopInfoT;
};
/// \brief Get the name of a MachineBasicBlock.
///
/// Get the name of a MachineBasicBlock. It's templated so that including from
/// CodeGen is unnecessary (that would be a layering issue).
///
/// This is used mainly for debug output. The name is similar to
/// MachineBasicBlock::getFullName(), but skips the name of the function.
template <class BlockT> std::string getBlockName(const BlockT *BB) {
assert(BB && "Unexpected nullptr");
auto MachineName = "BB" + Twine(BB->getNumber());
if (BB->getBasicBlock())
return (MachineName + "[" + BB->getName() + "]").str();
return MachineName.str();
}
/// \brief Get the name of a BasicBlock.
template <> inline std::string getBlockName(const BasicBlock *BB) {
assert(BB && "Unexpected nullptr");
return BB->getName().str();
}
/// \brief Graph of irreducible control flow.
///
/// This graph is used for determining the SCCs in a loop (or top-level
/// function) that has irreducible control flow.
///
/// During the block frequency algorithm, the local graphs are defined in a
/// light-weight way, deferring to the \a BasicBlock or \a MachineBasicBlock
/// graphs for most edges, but getting others from \a LoopData::ExitMap. The
/// latter only has successor information.
///
/// \a IrreducibleGraph makes this graph explicit. It's in a form that can use
/// \a GraphTraits (so that \a analyzeIrreducible() can use \a scc_iterator),
/// and it explicitly lists predecessors and successors. The initialization
/// that relies on \c MachineBasicBlock is defined in the header.
struct IrreducibleGraph {
typedef BlockFrequencyInfoImplBase BFIBase;
BFIBase &BFI;
typedef BFIBase::BlockNode BlockNode;
struct IrrNode {
BlockNode Node;
unsigned NumIn;
std::deque<const IrrNode *> Edges;
IrrNode(const BlockNode &Node) : Node(Node), NumIn(0) {}
typedef std::deque<const IrrNode *>::const_iterator iterator;
iterator pred_begin() const { return Edges.begin(); }
iterator succ_begin() const { return Edges.begin() + NumIn; }
iterator pred_end() const { return succ_begin(); }
iterator succ_end() const { return Edges.end(); }
};
BlockNode Start;
const IrrNode *StartIrr;
std::vector<IrrNode> Nodes;
SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
/// \brief Construct an explicit graph containing irreducible control flow.
///
/// Construct an explicit graph of the control flow in \c OuterLoop (or the
/// top-level function, if \c OuterLoop is \c nullptr). Uses \c
/// addBlockEdges to add block successors that have not been packaged into
/// loops.
///
/// \a BlockFrequencyInfoImpl::computeIrreducibleMass() is the only expected
/// user of this.
template <class BlockEdgesAdder>
IrreducibleGraph(BFIBase &BFI, const BFIBase::LoopData *OuterLoop,
BlockEdgesAdder addBlockEdges)
: BFI(BFI), StartIrr(nullptr) {
initialize(OuterLoop, addBlockEdges);
}
template <class BlockEdgesAdder>
void initialize(const BFIBase::LoopData *OuterLoop,
BlockEdgesAdder addBlockEdges);
void addNodesInLoop(const BFIBase::LoopData &OuterLoop);
void addNodesInFunction();
void addNode(const BlockNode &Node) {
Nodes.emplace_back(Node);
BFI.Working[Node.Index].getMass() = BlockMass::getEmpty();
}
void indexNodes();
template <class BlockEdgesAdder>
void addEdges(const BlockNode &Node, const BFIBase::LoopData *OuterLoop,
BlockEdgesAdder addBlockEdges);
void addEdge(IrrNode &Irr, const BlockNode &Succ,
const BFIBase::LoopData *OuterLoop);
};
template <class BlockEdgesAdder>
void IrreducibleGraph::initialize(const BFIBase::LoopData *OuterLoop,
BlockEdgesAdder addBlockEdges) {
if (OuterLoop) {
addNodesInLoop(*OuterLoop);
for (auto N : OuterLoop->Nodes)
addEdges(N, OuterLoop, addBlockEdges);
} else {
addNodesInFunction();
for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
addEdges(Index, OuterLoop, addBlockEdges);
}
StartIrr = Lookup[Start.Index];
}
template <class BlockEdgesAdder>
void IrreducibleGraph::addEdges(const BlockNode &Node,
const BFIBase::LoopData *OuterLoop,
BlockEdgesAdder addBlockEdges) {
auto L = Lookup.find(Node.Index);
if (L == Lookup.end())
return;
IrrNode &Irr = *L->second;
const auto &Working = BFI.Working[Node.Index];
if (Working.isAPackage())
for (const auto &I : Working.Loop->Exits)
addEdge(Irr, I.first, OuterLoop);
else
addBlockEdges(*this, Irr, OuterLoop);
}
}
/// \brief Shared implementation for block frequency analysis.
///
/// This is a shared implementation of BlockFrequencyInfo and
/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
/// blocks.
///
/// LoopInfo defines a loop as a "non-trivial" SCC dominated by a single block,
/// which is called the header. A given loop, L, can have sub-loops, which are
/// loops within the subgraph of L that exclude its header. (A "trivial" SCC
/// consists of a single block that does not have a self-edge.)
///
/// In addition to loops, this algorithm has limited support for irreducible
/// SCCs, which are SCCs with multiple entry blocks. Irreducible SCCs are
/// discovered on they fly, and modelled as loops with multiple headers.
///
/// The headers of irreducible sub-SCCs consist of its entry blocks and all
/// nodes that are targets of a backedge within it (excluding backedges within
/// true sub-loops). Block frequency calculations act as if a block is
/// inserted that intercepts all the edges to the headers. All backedges and
/// entries point to this block. Its successors are the headers, which split
/// the frequency evenly.
///
/// This algorithm leverages BlockMass and ScaledNumber to maintain precision,
/// separates mass distribution from loop scaling, and dithers to eliminate
/// probability mass loss.
///
/// The implementation is split between BlockFrequencyInfoImpl, which knows the
/// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
/// BlockFrequencyInfoImplBase, which doesn't. The base class uses \a
/// BlockNode, a wrapper around a uint32_t. BlockNode is numbered from 0 in
/// reverse-post order. This gives two advantages: it's easy to compare the
/// relative ordering of two nodes, and maps keyed on BlockT can be represented
/// by vectors.
///
/// This algorithm is O(V+E), unless there is irreducible control flow, in
/// which case it's O(V*E) in the worst case.
///
/// These are the main stages:
///
/// 0. Reverse post-order traversal (\a initializeRPOT()).
///
/// Run a single post-order traversal and save it (in reverse) in RPOT.
/// All other stages make use of this ordering. Save a lookup from BlockT
/// to BlockNode (the index into RPOT) in Nodes.
///
/// 1. Loop initialization (\a initializeLoops()).
///
/// Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
/// the algorithm. In particular, store the immediate members of each loop
/// in reverse post-order.
///
/// 2. Calculate mass and scale in loops (\a computeMassInLoops()).
///
/// For each loop (bottom-up), distribute mass through the DAG resulting
/// from ignoring backedges and treating sub-loops as a single pseudo-node.
/// Track the backedge mass distributed to the loop header, and use it to
/// calculate the loop scale (number of loop iterations). Immediate
/// members that represent sub-loops will already have been visited and
/// packaged into a pseudo-node.
///
/// Distributing mass in a loop is a reverse-post-order traversal through
/// the loop. Start by assigning full mass to the Loop header. For each
/// node in the loop:
///
/// - Fetch and categorize the weight distribution for its successors.
/// If this is a packaged-subloop, the weight distribution is stored
/// in \a LoopData::Exits. Otherwise, fetch it from
/// BranchProbabilityInfo.
///
/// - Each successor is categorized as \a Weight::Local, a local edge
/// within the current loop, \a Weight::Backedge, a backedge to the
/// loop header, or \a Weight::Exit, any successor outside the loop.
/// The weight, the successor, and its category are stored in \a
/// Distribution. There can be multiple edges to each successor.
///
/// - If there's a backedge to a non-header, there's an irreducible SCC.
/// The usual flow is temporarily aborted. \a
/// computeIrreducibleMass() finds the irreducible SCCs within the
/// loop, packages them up, and restarts the flow.
///
/// - Normalize the distribution: scale weights down so that their sum
/// is 32-bits, and coalesce multiple edges to the same node.
///
/// - Distribute the mass accordingly, dithering to minimize mass loss,
/// as described in \a distributeMass().
///
/// In the case of irreducible loops, instead of a single loop header,
/// there will be several. The computation of backedge masses is similar
/// but instead of having a single backedge mass, there will be one
/// backedge per loop header. In these cases, each backedge will carry
/// a mass proportional to the edge weights along the corresponding
/// path.
///
/// At the end of propagation, the full mass assigned to the loop will be
/// distributed among the loop headers proportionally according to the
/// mass flowing through their backedges.
///
/// Finally, calculate the loop scale from the accumulated backedge mass.
///
/// 3. Distribute mass in the function (\a computeMassInFunction()).
///
/// Finally, distribute mass through the DAG resulting from packaging all
/// loops in the function. This uses the same algorithm as distributing
/// mass in a loop, except that there are no exit or backedge edges.
///
/// 4. Unpackage loops (\a unwrapLoops()).
///
/// Initialize each block's frequency to a floating point representation of
/// its mass.
///
/// Visit loops top-down, scaling the frequencies of its immediate members
/// by the loop's pseudo-node's frequency.
///
/// 5. Convert frequencies to a 64-bit range (\a finalizeMetrics()).
///
/// Using the min and max frequencies as a guide, translate floating point
/// frequencies to an appropriate range in uint64_t.
///
/// It has some known flaws.
///
/// - The model of irreducible control flow is a rough approximation.
///
/// Modelling irreducible control flow exactly involves setting up and
/// solving a group of infinite geometric series. Such precision is
/// unlikely to be worthwhile, since most of our algorithms give up on
/// irreducible control flow anyway.
///
/// Nevertheless, we might find that we need to get closer. Here's a sort
/// of TODO list for the model with diminishing returns, to be completed as
/// necessary.
///
/// - The headers for the \a LoopData representing an irreducible SCC
/// include non-entry blocks. When these extra blocks exist, they
/// indicate a self-contained irreducible sub-SCC. We could treat them
/// as sub-loops, rather than arbitrarily shoving the problematic
/// blocks into the headers of the main irreducible SCC.
///
/// - Entry frequencies are assumed to be evenly split between the
/// headers of a given irreducible SCC, which is the only option if we
/// need to compute mass in the SCC before its parent loop. Instead,
/// we could partially compute mass in the parent loop, and stop when
/// we get to the SCC. Here, we have the correct ratio of entry
/// masses, which we can use to adjust their relative frequencies.
/// Compute mass in the SCC, and then continue propagation in the
/// parent.
///
/// - We can propagate mass iteratively through the SCC, for some fixed
/// number of iterations. Each iteration starts by assigning the entry
/// blocks their backedge mass from the prior iteration. The final
/// mass for each block (and each exit, and the total backedge mass
/// used for computing loop scale) is the sum of all iterations.
/// (Running this until fixed point would "solve" the geometric
/// series by simulation.)
template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
typedef typename bfi_detail::TypeMap<BT>::BlockT BlockT;
typedef typename bfi_detail::TypeMap<BT>::FunctionT FunctionT;
typedef typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT
BranchProbabilityInfoT;
typedef typename bfi_detail::TypeMap<BT>::LoopT LoopT;
typedef typename bfi_detail::TypeMap<BT>::LoopInfoT LoopInfoT;
// This is part of a workaround for a GCC 4.7 crash on lambdas.
friend struct bfi_detail::BlockEdgesAdder<BT>;
typedef GraphTraits<const BlockT *> Successor;
typedef GraphTraits<Inverse<const BlockT *>> Predecessor;
const BranchProbabilityInfoT *BPI;
const LoopInfoT *LI;
const FunctionT *F;
// All blocks in reverse postorder.
std::vector<const BlockT *> RPOT;
DenseMap<const BlockT *, BlockNode> Nodes;
typedef typename std::vector<const BlockT *>::const_iterator rpot_iterator;
rpot_iterator rpot_begin() const { return RPOT.begin(); }
rpot_iterator rpot_end() const { return RPOT.end(); }
size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
BlockNode getNode(const rpot_iterator &I) const {
return BlockNode(getIndex(I));
}
BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
const BlockT *getBlock(const BlockNode &Node) const {
assert(Node.Index < RPOT.size());
return RPOT[Node.Index];
}
/// \brief Run (and save) a post-order traversal.
///
/// Saves a reverse post-order traversal of all the nodes in \a F.
void initializeRPOT();
/// \brief Initialize loop data.
///
/// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
/// each block to the deepest loop it's in, but we need the inverse. For each
/// loop, we store in reverse post-order its "immediate" members, defined as
/// the header, the headers of immediate sub-loops, and all other blocks in
/// the loop that are not in sub-loops.
void initializeLoops();
/// \brief Propagate to a block's successors.
///
/// In the context of distributing mass through \c OuterLoop, divide the mass
/// currently assigned to \c Node between its successors.
///
/// \return \c true unless there's an irreducible backedge.
bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
/// \brief Compute mass in a particular loop.
///
/// Assign mass to \c Loop's header, and then for each block in \c Loop in
/// reverse post-order, distribute mass to its successors. Only visits nodes
/// that have not been packaged into sub-loops.
///
/// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
/// \return \c true unless there's an irreducible backedge.
bool computeMassInLoop(LoopData &Loop);
/// \brief Try to compute mass in the top-level function.
///
/// Assign mass to the entry block, and then for each block in reverse
/// post-order, distribute mass to its successors. Skips nodes that have
/// been packaged into loops.
///
/// \pre \a computeMassInLoops() has been called.
/// \return \c true unless there's an irreducible backedge.
bool tryToComputeMassInFunction();
/// \brief Compute mass in (and package up) irreducible SCCs.
///
/// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
/// of \c Insert), and call \a computeMassInLoop() on each of them.
///
/// If \c OuterLoop is \c nullptr, it refers to the top-level function.
///
/// \pre \a computeMassInLoop() has been called for each subloop of \c
/// OuterLoop.
/// \pre \c Insert points at the last loop successfully processed by \a
/// computeMassInLoop().
/// \pre \c OuterLoop has irreducible SCCs.
void computeIrreducibleMass(LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
/// \brief Compute mass in all loops.
///
/// For each loop bottom-up, call \a computeMassInLoop().
///
/// \a computeMassInLoop() aborts (and returns \c false) on loops that
/// contain a irreducible sub-SCCs. Use \a computeIrreducibleMass() and then
/// re-enter \a computeMassInLoop().
///
/// \post \a computeMassInLoop() has returned \c true for every loop.
void computeMassInLoops();
/// \brief Compute mass in the top-level function.
///
/// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
/// compute mass in the top-level function.
///
/// \post \a tryToComputeMassInFunction() has returned \c true.
void computeMassInFunction();
std::string getBlockName(const BlockNode &Node) const override {
return bfi_detail::getBlockName(getBlock(Node));
}
public:
const FunctionT *getFunction() const { return F; }
void doFunction(const FunctionT *F, const BranchProbabilityInfoT *BPI,
const LoopInfoT *LI);
BlockFrequencyInfoImpl() : BPI(nullptr), LI(nullptr), F(nullptr) {}
using BlockFrequencyInfoImplBase::getEntryFreq;
BlockFrequency getBlockFreq(const BlockT *BB) const {
return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
}
Scaled64 getFloatingBlockFreq(const BlockT *BB) const {
return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
}
/// \brief Print the frequencies for the current function.
///
/// Prints the frequencies for the blocks in the current function.
///
/// Blocks are printed in the natural iteration order of the function, rather
/// than reverse post-order. This provides two advantages: writing -analyze
/// tests is easier (since blocks come out in source order), and even
/// unreachable blocks are printed.
///
/// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
/// we need to override it here.
raw_ostream &print(raw_ostream &OS) const override;
using BlockFrequencyInfoImplBase::dump;
using BlockFrequencyInfoImplBase::printBlockFreq;
raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
}
};
template <class BT>
void BlockFrequencyInfoImpl<BT>::doFunction(const FunctionT *F,
const BranchProbabilityInfoT *BPI,
const LoopInfoT *LI) {
// Save the parameters.
this->BPI = BPI;
this->LI = LI;
this->F = F;
// Clean up left-over data structures.
BlockFrequencyInfoImplBase::clear();
RPOT.clear();
Nodes.clear();
// Initialize.
DEBUG(dbgs() << "\nblock-frequency: " << F->getName() << "\n================="
<< std::string(F->getName().size(), '=') << "\n");
initializeRPOT();
initializeLoops();
// Visit loops in post-order to find the local mass distribution, and then do
// the full function.
computeMassInLoops();
computeMassInFunction();
unwrapLoops();
finalizeMetrics();
}
template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
const BlockT *Entry = F->begin();
RPOT.reserve(F->size());
std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
std::reverse(RPOT.begin(), RPOT.end());
assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
"More nodes in function than Block Frequency Info supports");
DEBUG(dbgs() << "reverse-post-order-traversal\n");
for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
BlockNode Node = getNode(I);
DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
Nodes[*I] = Node;
}
Working.reserve(RPOT.size());
for (size_t Index = 0; Index < RPOT.size(); ++Index)
Working.emplace_back(Index);
Freqs.resize(RPOT.size());
}
template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
DEBUG(dbgs() << "loop-detection\n");
if (LI->empty())
return;
// Visit loops top down and assign them an index.
std::deque<std::pair<const LoopT *, LoopData *>> Q;
for (const LoopT *L : *LI)
Q.emplace_back(L, nullptr);
while (!Q.empty()) {
const LoopT *Loop = Q.front().first;
LoopData *Parent = Q.front().second;
Q.pop_front();
BlockNode Header = getNode(Loop->getHeader());
assert(Header.isValid());
Loops.emplace_back(Parent, Header);
Working[Header.Index].Loop = &Loops.back();
DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
for (const LoopT *L : *Loop)
Q.emplace_back(L, &Loops.back());
}
// Visit nodes in reverse post-order and add them to their deepest containing
// loop.
for (size_t Index = 0; Index < RPOT.size(); ++Index) {
// Loop headers have already been mostly mapped.
if (Working[Index].isLoopHeader()) {
LoopData *ContainingLoop = Working[Index].getContainingLoop();
if (ContainingLoop)
ContainingLoop->Nodes.push_back(Index);
continue;
}
const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
if (!Loop)
continue;
// Add this node to its containing loop's member list.
BlockNode Header = getNode(Loop->getHeader());
assert(Header.isValid());
const auto &HeaderData = Working[Header.Index];
assert(HeaderData.isLoopHeader());
Working[Index].Loop = HeaderData.Loop;
HeaderData.Loop->Nodes.push_back(Index);
DEBUG(dbgs() << " - loop = " << getBlockName(Header)
<< ": member = " << getBlockName(Index) << "\n");
}
}
template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
// Visit loops with the deepest first, and the top-level loops last.
for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
if (computeMassInLoop(*L))
continue;
auto Next = std::next(L);
computeIrreducibleMass(&*L, L.base());
L = std::prev(Next);
if (computeMassInLoop(*L))
continue;
llvm_unreachable("unhandled irreducible control flow");
}
}
template <class BT>
bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
// Compute mass in loop.
DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
if (Loop.isIrreducible()) {
BlockMass Remaining = BlockMass::getFull();
for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
auto &Mass = Working[Loop.Nodes[H].Index].getMass();
Mass = Remaining * BranchProbability(1, Loop.NumHeaders - H);
Remaining -= Mass;
}
for (const BlockNode &M : Loop.Nodes)
if (!propagateMassToSuccessors(&Loop, M))
llvm_unreachable("unhandled irreducible control flow");
adjustLoopHeaderMass(Loop);
} else {
Working[Loop.getHeader().Index].getMass() = BlockMass::getFull();
if (!propagateMassToSuccessors(&Loop, Loop.getHeader()))
llvm_unreachable("irreducible control flow to loop header!?");
for (const BlockNode &M : Loop.members())
if (!propagateMassToSuccessors(&Loop, M))
// Irreducible backedge.
return false;
}
computeLoopScale(Loop);
packageLoop(Loop);
return true;
}
template <class BT>
bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
// Compute mass in function.
DEBUG(dbgs() << "compute-mass-in-function\n");
assert(!Working.empty() && "no blocks in function");
assert(!Working[0].isLoopHeader() && "entry block is a loop header");
Working[0].getMass() = BlockMass::getFull();
for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
// Check for nodes that have been packaged.
BlockNode Node = getNode(I);
if (Working[Node.Index].isPackaged())
continue;
if (!propagateMassToSuccessors(nullptr, Node))
return false;
}
return true;
}
template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
if (tryToComputeMassInFunction())
return;
computeIrreducibleMass(nullptr, Loops.begin());
if (tryToComputeMassInFunction())
return;
llvm_unreachable("unhandled irreducible control flow");
}
/// \note This should be a lambda, but that crashes GCC 4.7.
namespace bfi_detail {
template <class BT> struct BlockEdgesAdder {
typedef BT BlockT;
typedef BlockFrequencyInfoImplBase::LoopData LoopData;
typedef GraphTraits<const BlockT *> Successor;
const BlockFrequencyInfoImpl<BT> &BFI;
explicit BlockEdgesAdder(const BlockFrequencyInfoImpl<BT> &BFI)
: BFI(BFI) {}
void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
const LoopData *OuterLoop) {
const BlockT *BB = BFI.RPOT[Irr.Node.Index];
for (auto I = Successor::child_begin(BB), E = Successor::child_end(BB);
I != E; ++I)
G.addEdge(Irr, BFI.getNode(*I), OuterLoop);
}
};
}
template <class BT>
void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
DEBUG(dbgs() << "analyze-irreducible-in-";
if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
else dbgs() << "function\n");
using namespace bfi_detail;
// Ideally, addBlockEdges() would be declared here as a lambda, but that
// crashes GCC 4.7.
BlockEdgesAdder<BT> addBlockEdges(*this);
IrreducibleGraph G(*this, OuterLoop, addBlockEdges);
for (auto &L : analyzeIrreducible(G, OuterLoop, Insert))
computeMassInLoop(L);
if (!OuterLoop)
return;
updateLoopWithIrreducible(*OuterLoop);
}
template <class BT>
bool
BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
const BlockNode &Node) {
DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
// Calculate probability for successors.
Distribution Dist;
if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
assert(Loop != OuterLoop && "Cannot propagate mass in a packaged loop");
if (!addLoopSuccessorsToDist(OuterLoop, *Loop, Dist))
// Irreducible backedge.
return false;
} else {
const BlockT *BB = getBlock(Node);
for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
SI != SE; ++SI)
// Do not dereference SI, or getEdgeWeight() is linear in the number of
// successors.
if (!addToDist(Dist, OuterLoop, Node, getNode(*SI),
BPI->getEdgeWeight(BB, SI)))
// Irreducible backedge.
return false;
}
// Distribute mass to successors, saving exit and backedge data in the
// loop header.
distributeMass(Node, OuterLoop, Dist);
return true;
}
template <class BT>
raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
if (!F)
return OS;
OS << "block-frequency-info: " << F->getName() << "\n";
for (const BlockT &BB : *F)
OS << " - " << bfi_detail::getBlockName(&BB)
<< ": float = " << getFloatingBlockFreq(&BB)
<< ", int = " << getBlockFreq(&BB).getFrequency() << "\n";
// Add an extra newline for readability.
OS << "\n";
return OS;
}
} // end namespace llvm
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/ScalarEvolutionNormalization.h | //===- llvm/Analysis/ScalarEvolutionNormalization.h - See below -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines utilities for working with "normalized" ScalarEvolution
// expressions.
//
// The following example illustrates post-increment uses and how normalized
// expressions help.
//
// for (i=0; i!=n; ++i) {
// ...
// }
// use(i);
//
// While the expression for most uses of i inside the loop is {0,+,1}<%L>, the
// expression for the use of i outside the loop is {1,+,1}<%L>, since i is
// incremented at the end of the loop body. This is inconveient, since it
// suggests that we need two different induction variables, one that starts
// at 0 and one that starts at 1. We'd prefer to be able to think of these as
// the same induction variable, with uses inside the loop using the
// "pre-incremented" value, and uses after the loop using the
// "post-incremented" value.
//
// Expressions for post-incremented uses are represented as an expression
// paired with a set of loops for which the expression is in "post-increment"
// mode (there may be multiple loops).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
#include "llvm/ADT/SmallPtrSet.h"
namespace llvm {
class Instruction;
class DominatorTree;
class Loop;
class ScalarEvolution;
class SCEV;
class Value;
/// TransformKind - Different types of transformations that
/// TransformForPostIncUse can do.
enum TransformKind {
/// Normalize - Normalize according to the given loops.
Normalize,
/// NormalizeAutodetect - Detect post-inc opportunities on new expressions,
/// update the given loop set, and normalize.
NormalizeAutodetect,
/// Denormalize - Perform the inverse transform on the expression with the
/// given loop set.
Denormalize
};
/// PostIncLoopSet - A set of loops.
typedef SmallPtrSet<const Loop *, 2> PostIncLoopSet;
/// TransformForPostIncUse - Transform the given expression according to the
/// given transformation kind.
const SCEV *TransformForPostIncUse(TransformKind Kind,
const SCEV *S,
Instruction *User,
Value *OperandValToReplace,
PostIncLoopSet &Loops,
ScalarEvolution &SE,
DominatorTree &DT);
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/RegionIterator.h | //===- RegionIterator.h - Iterators to iteratate over Regions ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This file defines the iterators to iterate over the elements of a Region.
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_REGIONITERATOR_H
#define LLVM_ANALYSIS_REGIONITERATOR_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/IR/CFG.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
//===----------------------------------------------------------------------===//
/// @brief Hierarchical RegionNode successor iterator.
///
/// This iterator iterates over all successors of a RegionNode.
///
/// For a BasicBlock RegionNode it skips all BasicBlocks that are not part of
/// the parent Region. Furthermore for BasicBlocks that start a subregion, a
/// RegionNode representing the subregion is returned.
///
/// For a subregion RegionNode there is just one successor. The RegionNode
/// representing the exit of the subregion.
template<class NodeType, class BlockT, class RegionT>
class RNSuccIterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = NodeType;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
private:
typedef GraphTraits<BlockT*> BlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
// The iterator works in two modes, bb mode or region mode.
enum ItMode {
// In BB mode it returns all successors of this BasicBlock as its
// successors.
ItBB,
// In region mode there is only one successor, thats the regionnode mapping
// to the exit block of the regionnode
ItRgBegin, // At the beginning of the regionnode successor.
ItRgEnd // At the end of the regionnode successor.
};
// Use two bit to represent the mode iterator.
PointerIntPair<NodeType*, 2, ItMode> Node;
// The block successor iterator.
SuccIterTy BItor;
// advanceRegionSucc - A region node has only one successor. It reaches end
// once we advance it.
void advanceRegionSucc() {
assert(Node.getInt() == ItRgBegin && "Cannot advance region successor!");
Node.setInt(ItRgEnd);
}
NodeType* getNode() const{ return Node.getPointer(); }
// isRegionMode - Is the current iterator in region mode?
bool isRegionMode() const { return Node.getInt() != ItBB; }
// Get the immediate successor. This function may return a Basic Block
// RegionNode or a subregion RegionNode.
NodeType* getISucc(BlockT* BB) const {
NodeType *succ;
succ = getNode()->getParent()->getNode(BB);
assert(succ && "BB not in Region or entered subregion!");
return succ;
}
// getRegionSucc - Return the successor basic block of a SubRegion RegionNode.
inline BlockT* getRegionSucc() const {
assert(Node.getInt() == ItRgBegin && "Cannot get the region successor!");
return getNode()->template getNodeAs<RegionT>()->getExit();
}
// isExit - Is this the exit BB of the Region?
inline bool isExit(BlockT* BB) const {
return getNode()->getParent()->getExit() == BB;
}
public:
typedef RNSuccIterator<NodeType, BlockT, RegionT> Self;
/// @brief Create begin iterator of a RegionNode.
inline RNSuccIterator(NodeType* node)
: Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
BItor(BlockTraits::child_begin(node->getEntry())) {
// Skip the exit block
if (!isRegionMode())
while (BlockTraits::child_end(node->getEntry()) != BItor && isExit(*BItor))
++BItor;
if (isRegionMode() && isExit(getRegionSucc()))
advanceRegionSucc();
}
/// @brief Create an end iterator.
inline RNSuccIterator(NodeType* node, bool)
: Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
BItor(BlockTraits::child_end(node->getEntry())) {}
inline bool operator==(const Self& x) const {
assert(isRegionMode() == x.isRegionMode() && "Broken iterator!");
if (isRegionMode())
return Node.getInt() == x.Node.getInt();
else
return BItor == x.BItor;
}
inline bool operator!=(const Self& x) const { return !operator==(x); }
inline pointer operator*() const {
BlockT *BB = isRegionMode() ? getRegionSucc() : *BItor;
assert(!isExit(BB) && "Iterator out of range!");
return getISucc(BB);
}
inline Self& operator++() {
if(isRegionMode()) {
// The Region only has 1 successor.
advanceRegionSucc();
} else {
// Skip the exit.
do
++BItor;
while (BItor != BlockTraits::child_end(getNode()->getEntry())
&& isExit(*BItor));
}
return *this;
}
inline Self operator++(int) {
Self tmp = *this;
++*this;
return tmp;
}
};
// //
///////////////////////////////////////////////////////////////////////////////
/// @brief Flat RegionNode iterator.
///
/// The Flat Region iterator will iterate over all BasicBlock RegionNodes that
/// are contained in the Region and its subregions. This is close to a virtual
/// control flow graph of the Region.
template<class NodeType, class BlockT, class RegionT>
class RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT> {
typedef GraphTraits<BlockT*> BlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
NodeType* Node;
SuccIterTy Itor;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = NodeType;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
typedef RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT> Self;
/// @brief Create the iterator from a RegionNode.
///
/// Note that the incoming node must be a bb node, otherwise it will trigger
/// an assertion when we try to get a BasicBlock.
inline RNSuccIterator(NodeType* node) :
Node(node),
Itor(BlockTraits::child_begin(node->getEntry())) {
assert(!Node->isSubRegion()
&& "Subregion node not allowed in flat iterating mode!");
assert(Node->getParent() && "A BB node must have a parent!");
// Skip the exit block of the iterating region.
while (BlockTraits::child_end(Node->getEntry()) != Itor
&& Node->getParent()->getExit() == *Itor)
++Itor;
}
/// @brief Create an end iterator
inline RNSuccIterator(NodeType* node, bool) :
Node(node),
Itor(BlockTraits::child_end(node->getEntry())) {
assert(!Node->isSubRegion()
&& "Subregion node not allowed in flat iterating mode!");
}
inline bool operator==(const Self& x) const {
assert(Node->getParent() == x.Node->getParent()
&& "Cannot compare iterators of different regions!");
return Itor == x.Itor && Node == x.Node;
}
inline bool operator!=(const Self& x) const { return !operator==(x); }
inline pointer operator*() const {
BlockT *BB = *Itor;
// Get the iterating region.
RegionT *Parent = Node->getParent();
// The only case that the successor reaches out of the region is it reaches
// the exit of the region.
assert(Parent->getExit() != BB && "iterator out of range!");
return Parent->getBBNode(BB);
}
inline Self& operator++() {
// Skip the exit block of the iterating region.
do
++Itor;
while (Itor != succ_end(Node->getEntry())
&& Node->getParent()->getExit() == *Itor);
return *this;
}
inline Self operator++(int) {
Self tmp = *this;
++*this;
return tmp;
}
};
template<class NodeType, class BlockT, class RegionT>
inline RNSuccIterator<NodeType, BlockT, RegionT> succ_begin(NodeType* Node) {
return RNSuccIterator<NodeType, BlockT, RegionT>(Node);
}
template<class NodeType, class BlockT, class RegionT>
inline RNSuccIterator<NodeType, BlockT, RegionT> succ_end(NodeType* Node) {
return RNSuccIterator<NodeType, BlockT, RegionT>(Node, true);
}
//===--------------------------------------------------------------------===//
// RegionNode GraphTraits specialization so the bbs in the region can be
// iterate by generic graph iterators.
//
// NodeT can either be region node or const region node, otherwise child_begin
// and child_end fail.
#define RegionNodeGraphTraits(NodeT, BlockT, RegionT) \
template<> struct GraphTraits<NodeT*> { \
typedef NodeT NodeType; \
typedef RNSuccIterator<NodeType, BlockT, RegionT> ChildIteratorType; \
static NodeType *getEntryNode(NodeType* N) { return N; } \
static inline ChildIteratorType child_begin(NodeType *N) { \
return RNSuccIterator<NodeType, BlockT, RegionT>(N); \
} \
static inline ChildIteratorType child_end(NodeType *N) { \
return RNSuccIterator<NodeType, BlockT, RegionT>(N, true); \
} \
}; \
template<> struct GraphTraits<FlatIt<NodeT*>> { \
typedef NodeT NodeType; \
typedef RNSuccIterator<FlatIt<NodeT>, BlockT, RegionT > ChildIteratorType; \
static NodeType *getEntryNode(NodeType* N) { return N; } \
static inline ChildIteratorType child_begin(NodeType *N) { \
return RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>(N); \
} \
static inline ChildIteratorType child_end(NodeType *N) { \
return RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>(N, true); \
} \
}
#define RegionGraphTraits(RegionT, NodeT) \
template<> struct GraphTraits<RegionT*> \
: public GraphTraits<NodeT*> { \
typedef df_iterator<NodeType*> nodes_iterator; \
static NodeType *getEntryNode(RegionT* R) { \
return R->getNode(R->getEntry()); \
} \
static nodes_iterator nodes_begin(RegionT* R) { \
return nodes_iterator::begin(getEntryNode(R)); \
} \
static nodes_iterator nodes_end(RegionT* R) { \
return nodes_iterator::end(getEntryNode(R)); \
} \
}; \
template<> struct GraphTraits<FlatIt<RegionT*> > \
: public GraphTraits<FlatIt<NodeT*> > { \
typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false, \
GraphTraits<FlatIt<NodeType*> > > nodes_iterator; \
static NodeType *getEntryNode(RegionT* R) { \
return R->getBBNode(R->getEntry()); \
} \
static nodes_iterator nodes_begin(RegionT* R) { \
return nodes_iterator::begin(getEntryNode(R)); \
} \
static nodes_iterator nodes_end(RegionT* R) { \
return nodes_iterator::end(getEntryNode(R)); \
} \
}
RegionNodeGraphTraits(RegionNode, BasicBlock, Region);
RegionNodeGraphTraits(const RegionNode, BasicBlock, Region);
RegionGraphTraits(Region, RegionNode);
RegionGraphTraits(const Region, const RegionNode);
template <> struct GraphTraits<RegionInfo*>
: public GraphTraits<FlatIt<RegionNode*> > {
typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
static NodeType *getEntryNode(RegionInfo *RI) {
return GraphTraits<FlatIt<Region*> >::getEntryNode(RI->getTopLevelRegion());
}
static nodes_iterator nodes_begin(RegionInfo* RI) {
return nodes_iterator::begin(getEntryNode(RI));
}
static nodes_iterator nodes_end(RegionInfo *RI) {
return nodes_iterator::end(getEntryNode(RI));
}
};
template <> struct GraphTraits<RegionInfoPass*>
: public GraphTraits<RegionInfo *> {
typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
static NodeType *getEntryNode(RegionInfoPass *RI) {
return GraphTraits<RegionInfo*>::getEntryNode(&RI->getRegionInfo());
}
static nodes_iterator nodes_begin(RegionInfoPass* RI) {
return GraphTraits<RegionInfo*>::nodes_begin(&RI->getRegionInfo());
}
static nodes_iterator nodes_end(RegionInfoPass *RI) {
return GraphTraits<RegionInfo*>::nodes_end(&RI->getRegionInfo());
}
};
} // End namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/AssumptionCache.h | //===- llvm/Analysis/AssumptionCache.h - Track @llvm.assume ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that keeps track of @llvm.assume intrinsics in
// the functions of a module (allowing assumptions within any function to be
// found cheaply by other parts of the optimizer).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_ASSUMPTIONCACHE_H
#define LLVM_ANALYSIS_ASSUMPTIONCACHE_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <memory>
namespace llvm {
// FIXME: Replace this brittle forward declaration with the include of the new
// PassManager.h when doing so doesn't break the PassManagerBuilder.
template <typename IRUnitT> class AnalysisManager;
class PreservedAnalyses;
/// \brief A cache of @llvm.assume calls within a function.
///
/// This cache provides fast lookup of assumptions within a function by caching
/// them and amortizing the cost of scanning for them across all queries. The
/// cache is also conservatively self-updating so that it will never return
/// incorrect results about a function even as the function is being mutated.
/// However, flushing the cache and rebuilding it (or explicitly updating it)
/// may allow it to discover new assumptions.
class AssumptionCache {
/// \brief The function for which this cache is handling assumptions.
///
/// We track this to lazily populate our assumptions.
Function &F;
/// \brief Vector of weak value handles to calls of the @llvm.assume
/// intrinsic.
SmallVector<WeakTrackingVH, 4> AssumeHandles;
/// \brief Flag tracking whether we have scanned the function yet.
///
/// We want to be as lazy about this as possible, and so we scan the function
/// at the last moment.
bool Scanned;
/// \brief Scan the function for assumptions and add them to the cache.
void scanFunction();
public:
/// \brief Construct an AssumptionCache from a function by scanning all of
/// its instructions.
AssumptionCache(Function &F) : F(F), Scanned(false) {}
/// \brief Add an @llvm.assume intrinsic to this function's cache.
///
/// The call passed in must be an instruction within this fuction and must
/// not already be in the cache.
void registerAssumption(CallInst *CI);
/// \brief Clear the cache of @llvm.assume intrinsics for a function.
///
/// It will be re-scanned the next time it is requested.
void clear() {
AssumeHandles.clear();
Scanned = false;
}
/// \brief Access the list of assumption handles currently tracked for this
/// fuction.
///
/// Note that these produce weak handles that may be null. The caller must
/// handle that case.
/// FIXME: We should replace this with pointee_iterator<filter_iterator<...>>
/// when we can write that to filter out the null values. Then caller code
/// will become simpler.
MutableArrayRef<WeakTrackingVH> assumptions() {
if (!Scanned)
scanFunction();
return AssumeHandles;
}
};
/// \brief A function analysis which provides an \c AssumptionCache.
///
/// This analysis is intended for use with the new pass manager and will vend
/// assumption caches for a given function.
class AssumptionAnalysis {
static char PassID;
public:
typedef AssumptionCache Result;
/// \brief Opaque, unique identifier for this analysis pass.
static void *ID() { return (void *)&PassID; }
/// \brief Provide a name for the analysis for debugging and logging.
static StringRef name() { return "AssumptionAnalysis"; }
AssumptionAnalysis() {}
AssumptionAnalysis(const AssumptionAnalysis &Arg) {}
AssumptionAnalysis(AssumptionAnalysis &&Arg) {}
AssumptionAnalysis &operator=(const AssumptionAnalysis &RHS) { return *this; }
AssumptionAnalysis &operator=(AssumptionAnalysis &&RHS) { return *this; }
AssumptionCache run(Function &F) { return AssumptionCache(F); }
};
/// \brief Printer pass for the \c AssumptionAnalysis results.
class AssumptionPrinterPass {
raw_ostream &OS;
public:
explicit AssumptionPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
static StringRef name() { return "AssumptionPrinterPass"; }
};
/// \brief An immutable pass that tracks lazily created \c AssumptionCache
/// objects.
///
/// This is essentially a workaround for the legacy pass manager's weaknesses
/// which associates each assumption cache with Function and clears it if the
/// function is deleted. The nature of the AssumptionCache is that it is not
/// invalidated by any changes to the function body and so this is sufficient
/// to be conservatively correct.
class AssumptionCacheTracker : public ImmutablePass {
/// A callback value handle applied to function objects, which we use to
/// delete our cache of intrinsics for a function when it is deleted.
class FunctionCallbackVH : public CallbackVH {
AssumptionCacheTracker *ACT;
void deleted() override;
public:
typedef DenseMapInfo<Value *> DMI;
FunctionCallbackVH(Value *V, AssumptionCacheTracker *ACT = nullptr)
: CallbackVH(V), ACT(ACT) {}
};
friend FunctionCallbackVH;
typedef DenseMap<FunctionCallbackVH, std::unique_ptr<AssumptionCache>,
FunctionCallbackVH::DMI> FunctionCallsMap;
FunctionCallsMap AssumptionCaches;
public:
/// \brief Get the cached assumptions for a function.
///
/// If no assumptions are cached, this will scan the function. Otherwise, the
/// existing cache will be returned.
AssumptionCache &getAssumptionCache(Function &F);
AssumptionCacheTracker();
~AssumptionCacheTracker() override;
void releaseMemory() override { AssumptionCaches.shrink_and_clear(); }
void verifyAnalysis() const override;
bool doFinalization(Module &) override {
verifyAnalysis();
return false;
}
static char ID; // Pass identification, replacement for typeid
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/Passes.h | //===-- llvm/Analysis/Passes.h - Constructors for analyses ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This header file defines prototypes for accessor functions that expose passes
// in the analysis libraries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_PASSES_H
#define LLVM_ANALYSIS_PASSES_H
namespace llvm {
class FunctionPass;
class ImmutablePass;
class LoopPass;
class ModulePass;
class Pass;
class PassInfo;
class LibCallInfo;
//===--------------------------------------------------------------------===//
//
// createGlobalsModRefPass - This pass provides alias and mod/ref info for
// global values that do not have their addresses taken.
//
Pass *createGlobalsModRefPass();
//===--------------------------------------------------------------------===//
//
// createAliasDebugger - This pass helps debug clients of AA
//
Pass *createAliasDebugger();
//===--------------------------------------------------------------------===//
//
// createAliasAnalysisCounterPass - This pass counts alias queries and how the
// alias analysis implementation responds.
//
ModulePass *createAliasAnalysisCounterPass();
//===--------------------------------------------------------------------===//
//
// createAAEvalPass - This pass implements a simple N^2 alias analysis
// accuracy evaluator.
//
FunctionPass *createAAEvalPass();
//===--------------------------------------------------------------------===//
//
// createNoAAPass - This pass implements a "I don't know" alias analysis.
//
ImmutablePass *createNoAAPass();
//===--------------------------------------------------------------------===//
//
// createBasicAliasAnalysisPass - This pass implements the stateless alias
// analysis.
//
ImmutablePass *createBasicAliasAnalysisPass();
//===--------------------------------------------------------------------===//
//
// createCFLAliasAnalysisPass - This pass implements a set-based approach to
// alias analysis.
//
ImmutablePass *createCFLAliasAnalysisPass();
//===--------------------------------------------------------------------===//
//
/// createLibCallAliasAnalysisPass - Create an alias analysis pass that knows
/// about the semantics of a set of libcalls specified by LCI. The newly
/// constructed pass takes ownership of the pointer that is provided.
///
FunctionPass *createLibCallAliasAnalysisPass(LibCallInfo *LCI);
//===--------------------------------------------------------------------===//
//
// createScalarEvolutionAliasAnalysisPass - This pass implements a simple
// alias analysis using ScalarEvolution queries.
//
FunctionPass *createScalarEvolutionAliasAnalysisPass();
//===--------------------------------------------------------------------===//
//
// createTypeBasedAliasAnalysisPass - This pass implements metadata-based
// type-based alias analysis.
//
ImmutablePass *createTypeBasedAliasAnalysisPass();
//===--------------------------------------------------------------------===//
//
// createScopedNoAliasAAPass - This pass implements metadata-based
// scoped noalias analysis.
//
ImmutablePass *createScopedNoAliasAAPass();
//===--------------------------------------------------------------------===//
//
// createObjCARCAliasAnalysisPass - This pass implements ObjC-ARC-based
// alias analysis.
//
ImmutablePass *createObjCARCAliasAnalysisPass();
FunctionPass *createPAEvalPass();
//===--------------------------------------------------------------------===//
//
/// createLazyValueInfoPass - This creates an instance of the LazyValueInfo
/// pass.
FunctionPass *createLazyValueInfoPass();
//===--------------------------------------------------------------------===//
//
// createDependenceAnalysisPass - This creates an instance of the
// DependenceAnalysis pass.
//
FunctionPass *createDependenceAnalysisPass();
//===--------------------------------------------------------------------===//
//
// createCostModelAnalysisPass - This creates an instance of the
// CostModelAnalysis pass.
//
FunctionPass *createCostModelAnalysisPass();
//===--------------------------------------------------------------------===//
//
// createDelinearizationPass - This pass implements attempts to restore
// multidimensional array indices from linearized expressions.
//
FunctionPass *createDelinearizationPass();
//===--------------------------------------------------------------------===//
//
// createDivergenceAnalysisPass - This pass determines which branches in a GPU
// program are divergent.
//
FunctionPass *createDivergenceAnalysisPass();
//===--------------------------------------------------------------------===//
//
// Minor pass prototypes, allowing us to expose them through bugpoint and
// analyze.
FunctionPass *createInstCountPass();
//===--------------------------------------------------------------------===//
//
// createRegionInfoPass - This pass finds all single entry single exit regions
// in a function and builds the region hierarchy.
//
FunctionPass *createRegionInfoPass();
// Print module-level debug info metadata in human-readable form.
ModulePass *createModuleDebugInfoPrinterPass();
//===--------------------------------------------------------------------===//
//
// createMemDepPrinter - This pass exhaustively collects all memdep
// information and prints it with -analyze.
//
FunctionPass *createMemDepPrinter();
//===--------------------------------------------------------------------===//
//
// createMemDerefPrinter - This pass collects memory dereferenceability
// information and prints it with -analyze.
//
FunctionPass *createMemDerefPrinter();
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DxilConstantFolding.h | //===-- DxilConstantFolding.h - Constant folding for Dxil ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
// Copyright (C) Microsoft Corporation. All rights reserved.
//===----------------------------------------------------------------------===//
//
// This file declares routines for folding dxil intrinsics into constants when
// all operands are constants.
//
// We hook into the LLVM routines for constant folding so the function
// interfaces are dictated by what llvm provides.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_HLSLCONSTANTFOLDING_H
#define LLVM_ANALYSIS_HLSLCONSTANTFOLDING_H
#include "llvm/ADT/StringRef.h"
namespace llvm {
class Constant;
class Function;
class Type;
template <typename T> class ArrayRef;
} // namespace llvm
namespace hlsl {
/// ConstantFoldScalarCall - Try to constant fold the call instruction.
/// If successful, the constant result is returned, if not, null is returned.
llvm::Constant *
ConstantFoldScalarCall(llvm::StringRef Name, llvm::Type *Ty,
llvm::ArrayRef<llvm::Constant *> Operands);
/// ConstantFoldScalarCallExt
/// Hook point for constant folding of extensions.
llvm::Constant *
ConstantFoldScalarCallExt(llvm::StringRef Name, llvm::Type *Ty,
llvm::ArrayRef<llvm::Constant *> Operands);
/// CanConstantFoldCallTo - Return true if we can potentially constant
/// fold a call to the given function.
bool CanConstantFoldCallTo(const llvm::Function *F);
/// CanConstantFoldCallToExt
/// Hook point for constant folding of extensions.
bool CanConstantFoldCallToExt(const llvm::Function *F);
} // namespace hlsl
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/CallGraph.h | //===- CallGraph.h - Build a Module's call graph ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides interfaces used to build and manipulate a call graph,
/// which is a very useful tool for interprocedural optimization.
///
/// Every function in a module is represented as a node in the call graph. The
/// callgraph node keeps track of which functions are called by the function
/// corresponding to the node.
///
/// A call graph may contain nodes where the function that they correspond to
/// is null. These 'external' nodes are used to represent control flow that is
/// not represented (or analyzable) in the module. In particular, this
/// analysis builds one external node such that:
/// 1. All functions in the module without internal linkage will have edges
/// from this external node, indicating that they could be called by
/// functions outside of the module.
/// 2. All functions whose address is used for something more than a direct
/// call, for example being stored into a memory location will also have
/// an edge from this external node. Since they may be called by an
/// unknown caller later, they must be tracked as such.
///
/// There is a second external node added for calls that leave this module.
/// Functions have a call edge to the external node iff:
/// 1. The function is external, reflecting the fact that they could call
/// anything without internal linkage or that has its address taken.
/// 2. The function contains an indirect function call.
///
/// As an extension in the future, there may be multiple nodes with a null
/// function. These will be used when we can prove (through pointer analysis)
/// that an indirect call site can call only a specific set of functions.
///
/// Because of these properties, the CallGraph captures a conservative superset
/// of all of the caller-callee relationships, which is useful for
/// transformations.
///
/// The CallGraph class also attempts to figure out what the root of the
/// CallGraph is, which it currently does by looking for a function named
/// 'main'. If no function named 'main' is found, the external node is used as
/// the entry node, reflecting the fact that any function without internal
/// linkage could be called into (which is common for libraries).
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CALLGRAPH_H
#define LLVM_ANALYSIS_CALLGRAPH_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <map>
namespace llvm {
class Function;
class Module;
class CallGraphNode;
/// \brief The basic data container for the call graph of a \c Module of IR.
///
/// This class exposes both the interface to the call graph for a module of IR.
///
/// The core call graph itself can also be updated to reflect changes to the IR.
class CallGraph {
Module &M;
typedef std::map<const Function *, std::unique_ptr<CallGraphNode>> FunctionMapTy;
/// \brief A map from \c Function* to \c CallGraphNode*.
FunctionMapTy FunctionMap;
/// \brief Root is root of the call graph, or the external node if a 'main'
/// function couldn't be found.
CallGraphNode *Root;
/// \brief This node has edges to all external functions and those internal
/// functions that have their address taken.
CallGraphNode *ExternalCallingNode;
/// \brief This node has edges to it from all functions making indirect calls
/// or calling an external function.
std::unique_ptr<CallGraphNode> CallsExternalNode;
/// \brief Replace the function represented by this node by another.
///
/// This does not rescan the body of the function, so it is suitable when
/// splicing the body of one function to another while also updating all
/// callers from the old function to the new.
void spliceFunction(const Function *From, const Function *To);
/// \brief Add a function to the call graph, and link the node to all of the
/// functions that it calls.
void addToCallGraph(Function *F);
void reset(); // HLSL Change
public:
explicit CallGraph(Module &M);
CallGraph(CallGraph &&Arg);
~CallGraph();
void print(raw_ostream &OS) const;
void dump() const;
typedef FunctionMapTy::iterator iterator;
typedef FunctionMapTy::const_iterator const_iterator;
/// \brief Returns the module the call graph corresponds to.
Module &getModule() const { return M; }
inline iterator begin() { return FunctionMap.begin(); }
inline iterator end() { return FunctionMap.end(); }
inline const_iterator begin() const { return FunctionMap.begin(); }
inline const_iterator end() const { return FunctionMap.end(); }
/// \brief Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
/// \brief Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
/// \brief Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const { return ExternalCallingNode; }
CallGraphNode *getCallsExternalNode() const { return CallsExternalNode.get(); }
//===---------------------------------------------------------------------
// Functions to keep a call graph up to date with a function that has been
// modified.
//
/// \brief Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
/// functions (ie, there are no edges in it's CGN). The easiest way to do
/// this is to dropAllReferences before calling this.
Function *removeFunctionFromModule(CallGraphNode *CGN);
/// \brief Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F);
};
/// \brief A node in the call graph for a module.
///
/// Typically represents a function in the call graph. There are also special
/// "null" nodes used to represent theoretical entries in the call graph.
class CallGraphNode {
public:
/// \brief A pair of the calling instruction (a call or invoke)
/// and the call graph node being called.
typedef std::pair<WeakTrackingVH, CallGraphNode *> CallRecord;
public:
typedef std::vector<CallRecord> CalledFunctionsVector;
/// \brief Creates a node for the specified function.
inline CallGraphNode(Function *F) : F(F), NumReferences(0) {}
~CallGraphNode() {
assert(NumReferences == 0 && "Node deleted while references remain");
}
typedef std::vector<CallRecord>::iterator iterator;
typedef std::vector<CallRecord>::const_iterator const_iterator;
/// \brief Returns the function that this call graph node represents.
Function *getFunction() const { return F; }
inline iterator begin() { return CalledFunctions.begin(); }
inline iterator end() { return CalledFunctions.end(); }
inline const_iterator begin() const { return CalledFunctions.begin(); }
inline const_iterator end() const { return CalledFunctions.end(); }
inline bool empty() const { return CalledFunctions.empty(); }
inline unsigned size() const { return (unsigned)CalledFunctions.size(); }
/// \brief Returns the number of other CallGraphNodes in this CallGraph that
/// reference this node in their callee list.
unsigned getNumReferences() const { return NumReferences; }
/// \brief Returns the i'th called function.
CallGraphNode *operator[](unsigned i) const {
assert(i < CalledFunctions.size() && "Invalid index");
return CalledFunctions[i].second;
}
/// \brief Print out this call graph node.
void dump() const;
void print(raw_ostream &OS) const;
//===---------------------------------------------------------------------
// Methods to keep a call graph up to date with a function that has been
// modified
//
/// \brief Removes all edges from this CallGraphNode to any functions it
/// calls.
void removeAllCalledFunctions() {
while (!CalledFunctions.empty()) {
CalledFunctions.back().second->DropRef();
CalledFunctions.pop_back();
}
}
/// \brief Moves all the callee information from N to this node.
void stealCalledFunctionsFrom(CallGraphNode *N) {
assert(CalledFunctions.empty() &&
"Cannot steal callsite information if I already have some");
std::swap(CalledFunctions, N->CalledFunctions);
}
/// \brief Adds a function to the list of functions called by this one.
void addCalledFunction(CallSite CS, CallGraphNode *M) {
assert(!CS.getInstruction() || !CS.getCalledFunction() ||
!CS.getCalledFunction()->isIntrinsic() ||
!Intrinsic::isLeaf(CS.getCalledFunction()->getIntrinsicID()));
CalledFunctions.emplace_back(CS.getInstruction(), M);
M->AddRef();
}
void removeCallEdge(iterator I) {
I->second->DropRef();
*I = CalledFunctions.back();
CalledFunctions.pop_back();
}
/// \brief Removes the edge in the node for the specified call site.
///
/// Note that this method takes linear time, so it should be used sparingly.
void removeCallEdgeFor(CallSite CS);
/// \brief Removes all call edges from this node to the specified callee
/// function.
///
/// This takes more time to execute than removeCallEdgeTo, so it should not
/// be used unless necessary.
void removeAnyCallEdgeTo(CallGraphNode *Callee);
/// \brief Removes one edge associated with a null callsite from this node to
/// the specified callee function.
void removeOneAbstractEdgeTo(CallGraphNode *Callee);
/// \brief Replaces the edge in the node for the specified call site with a
/// new one.
///
/// Note that this method takes linear time, so it should be used sparingly.
void replaceCallEdge(CallSite CS, CallSite NewCS, CallGraphNode *NewNode);
private:
friend class CallGraph;
AssertingVH<Function> F;
std::vector<CallRecord> CalledFunctions;
/// \brief The number of times that this CallGraphNode occurs in the
/// CalledFunctions array of this or other CallGraphNodes.
unsigned NumReferences;
CallGraphNode(const CallGraphNode &) = delete;
void operator=(const CallGraphNode &) = delete;
void DropRef() { --NumReferences; }
void AddRef() { ++NumReferences; }
/// \brief A special function that should only be used by the CallGraph class.
void allReferencesDropped() { NumReferences = 0; }
};
/// \brief An analysis pass to compute the \c CallGraph for a \c Module.
///
/// This class implements the concept of an analysis pass used by the \c
/// ModuleAnalysisManager to run an analysis over a module and cache the
/// resulting data.
class CallGraphAnalysis {
public:
/// \brief A formulaic typedef to inform clients of the result type.
typedef CallGraph Result;
static void *ID() { return (void *)&PassID; }
/// \brief Compute the \c CallGraph for the module \c M.
///
/// The real work here is done in the \c CallGraph constructor.
CallGraph run(Module *M) { return CallGraph(*M); }
private:
static char PassID;
};
/// \brief The \c ModulePass which wraps up a \c CallGraph and the logic to
/// build it.
///
/// This class exposes both the interface to the call graph container and the
/// module pass which runs over a module of IR and produces the call graph. The
/// call graph interface is entirelly a wrapper around a \c CallGraph object
/// which is stored internally for each module.
class CallGraphWrapperPass : public ModulePass {
std::unique_ptr<CallGraph> G;
public:
static char ID; // Class identification, replacement for typeinfo
CallGraphWrapperPass();
~CallGraphWrapperPass() override;
/// \brief The internal \c CallGraph around which the rest of this interface
/// is wrapped.
const CallGraph &getCallGraph() const { return *G; }
CallGraph &getCallGraph() { return *G; }
typedef CallGraph::iterator iterator;
typedef CallGraph::const_iterator const_iterator;
/// \brief Returns the module the call graph corresponds to.
Module &getModule() const { return G->getModule(); }
inline iterator begin() { return G->begin(); }
inline iterator end() { return G->end(); }
inline const_iterator begin() const { return G->begin(); }
inline const_iterator end() const { return G->end(); }
/// \brief Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
return (*G)[F];
}
/// \brief Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) { return (*G)[F]; }
/// \brief Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const {
return G->getExternalCallingNode();
}
CallGraphNode *getCallsExternalNode() const {
return G->getCallsExternalNode();
}
//===---------------------------------------------------------------------
// Functions to keep a call graph up to date with a function that has been
// modified.
//
/// \brief Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
/// functions (ie, there are no edges in it's CGN). The easiest way to do
/// this is to dropAllReferences before calling this.
Function *removeFunctionFromModule(CallGraphNode *CGN) {
return G->removeFunctionFromModule(CGN);
}
/// \brief Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F) {
return G->getOrInsertFunction(F);
}
//===---------------------------------------------------------------------
// Implementation of the ModulePass interface needed here.
//
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnModule(Module &M) override;
void releaseMemory() override;
void print(raw_ostream &o, const Module *) const override;
void dump() const;
};
// //
///////////////////////////////////////////////////////////////////////////////
// GraphTraits specializations for call graphs so that they can be treated as
// graphs by the generic graph algorithms.
//
// Provide graph traits for tranversing call graphs using standard graph
// traversals.
template <> struct GraphTraits<CallGraphNode *> {
typedef CallGraphNode NodeType;
typedef CallGraphNode::CallRecord CGNPairTy;
static NodeType *getEntryNode(CallGraphNode *CGN) { return CGN; }
static CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }
typedef mapped_iterator<NodeType::iterator, decltype(&CGNGetValue)> ChildIteratorType;
static inline ChildIteratorType child_begin(NodeType *N) {
return ChildIteratorType(N->begin(), &CGNGetValue);
}
static inline ChildIteratorType child_end(NodeType *N) {
return ChildIteratorType(N->end(), &CGNGetValue);
}
};
template <> struct GraphTraits<const CallGraphNode *> {
typedef const CallGraphNode NodeType;
typedef CallGraphNode::CallRecord CGNPairTy;
static NodeType *getEntryNode(const CallGraphNode *CGN) { return CGN; }
static const CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }
typedef mapped_iterator<NodeType::const_iterator, decltype(&CGNGetValue)>
ChildIteratorType;
static inline ChildIteratorType child_begin(NodeType *N) {
return ChildIteratorType(N->begin(), &CGNGetValue);
}
static inline ChildIteratorType child_end(NodeType *N) {
return ChildIteratorType(N->end(), &CGNGetValue);
}
};
template <>
struct GraphTraits<CallGraph *> : public GraphTraits<CallGraphNode *> {
static NodeType *getEntryNode(CallGraph *CGN) {
return CGN->getExternalCallingNode(); // Start at the external node!
}
typedef std::pair<const Function *const, std::unique_ptr<CallGraphNode>>
PairTy;
static CallGraphNode *CGGetValuePtr(const PairTy &P) {
return P.second.get();
}
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
typedef mapped_iterator<CallGraph::iterator, decltype(&CGGetValuePtr)> nodes_iterator;
static nodes_iterator nodes_begin(CallGraph *CG) {
return nodes_iterator(CG->begin(), &CGGetValuePtr);
}
static nodes_iterator nodes_end(CallGraph *CG) {
return nodes_iterator(CG->end(), &CGGetValuePtr);
}
};
template <>
struct GraphTraits<const CallGraph *> : public GraphTraits<
const CallGraphNode *> {
static NodeType *getEntryNode(const CallGraph *CGN) {
return CGN->getExternalCallingNode(); // Start at the external node!
}
typedef std::pair<const Function *const, std::unique_ptr<CallGraphNode>>
PairTy;
static const CallGraphNode *CGGetValuePtr(const PairTy &P) {
return P.second.get();
}
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
typedef mapped_iterator<CallGraph::const_iterator, decltype(&CGGetValuePtr)>
nodes_iterator;
static nodes_iterator nodes_begin(const CallGraph *CG) {
return nodes_iterator(CG->begin(), &CGGetValuePtr);
}
static nodes_iterator nodes_end(const CallGraph *CG) {
return nodes_iterator(CG->end(), &CGGetValuePtr);
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/PtrUseVisitor.h | //===- PtrUseVisitor.h - InstVisitors over a pointers uses ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides a collection of visitors which walk the (instruction)
/// uses of a pointer. These visitors all provide the same essential behavior
/// as an InstVisitor with similar template-based flexibility and
/// implementation strategies.
///
/// These can be used, for example, to quickly analyze the uses of an alloca,
/// global variable, or function argument.
///
/// FIXME: Provide a variant which doesn't track offsets and is cheaper.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_PTRUSEVISITOR_H
#define LLVM_ANALYSIS_PTRUSEVISITOR_H
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
namespace detail {
/// \brief Implementation of non-dependent functionality for \c PtrUseVisitor.
///
/// See \c PtrUseVisitor for the public interface and detailed comments about
/// usage. This class is just a helper base class which is not templated and
/// contains all common code to be shared between different instantiations of
/// PtrUseVisitor.
class PtrUseVisitorBase {
public:
/// \brief This class provides information about the result of a visit.
///
/// After walking all the users (recursively) of a pointer, the basic
/// infrastructure records some commonly useful information such as escape
/// analysis and whether the visit completed or aborted early.
class PtrInfo {
public:
PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
/// \brief Reset the pointer info, clearing all state.
void reset() {
AbortedInfo.setPointer(nullptr);
AbortedInfo.setInt(false);
EscapedInfo.setPointer(nullptr);
EscapedInfo.setInt(false);
}
/// \brief Did we abort the visit early?
bool isAborted() const { return AbortedInfo.getInt(); }
/// \brief Is the pointer escaped at some point?
bool isEscaped() const { return EscapedInfo.getInt(); }
/// \brief Get the instruction causing the visit to abort.
/// \returns a pointer to the instruction causing the abort if one is
/// available; otherwise returns null.
Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }
/// \brief Get the instruction causing the pointer to escape.
/// \returns a pointer to the instruction which escapes the pointer if one
/// is available; otherwise returns null.
Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }
/// \brief Mark the visit as aborted. Intended for use in a void return.
/// \param I The instruction which caused the visit to abort, if available.
void setAborted(Instruction *I = nullptr) {
AbortedInfo.setInt(true);
AbortedInfo.setPointer(I);
}
/// \brief Mark the pointer as escaped. Intended for use in a void return.
/// \param I The instruction which escapes the pointer, if available.
void setEscaped(Instruction *I = nullptr) {
EscapedInfo.setInt(true);
EscapedInfo.setPointer(I);
}
/// \brief Mark the pointer as escaped, and the visit as aborted. Intended
/// for use in a void return.
/// \param I The instruction which both escapes the pointer and aborts the
/// visit, if available.
void setEscapedAndAborted(Instruction *I = nullptr) {
setEscaped(I);
setAborted(I);
}
private:
PointerIntPair<Instruction *, 1, bool> AbortedInfo, EscapedInfo;
};
protected:
const DataLayout &DL;
/// \name Visitation infrastructure
/// @{
/// \brief The info collected about the pointer being visited thus far.
PtrInfo PI;
/// \brief A struct of the data needed to visit a particular use.
///
/// This is used to maintain a worklist fo to-visit uses. This is used to
/// make the visit be iterative rather than recursive.
struct UseToVisit {
typedef PointerIntPair<Use *, 1, bool> UseAndIsOffsetKnownPair;
UseAndIsOffsetKnownPair UseAndIsOffsetKnown;
APInt Offset;
};
/// \brief The worklist of to-visit uses.
SmallVector<UseToVisit, 8> Worklist;
/// \brief A set of visited uses to break cycles in unreachable code.
SmallPtrSet<Use *, 8> VisitedUses;
/// @}
/// \name Per-visit state
/// This state is reset for each instruction visited.
/// @{
/// \brief The use currently being visited.
Use *U;
/// \brief True if we have a known constant offset for the use currently
/// being visited.
bool IsOffsetKnown;
/// \brief The constant offset of the use if that is known.
APInt Offset;
/// @}
/// Note that the constructor is protected because this class must be a base
/// class, we can't create instances directly of this class.
PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}
/// \brief Enqueue the users of this instruction in the visit worklist.
///
/// This will visit the users with the same offset of the current visit
/// (including an unknown offset if that is the current state).
void enqueueUsers(Instruction &I);
/// \brief Walk the operands of a GEP and adjust the offset as appropriate.
///
/// This routine does the heavy lifting of the pointer walk by computing
/// offsets and looking through GEPs.
bool adjustOffsetForGEP(GetElementPtrInst &GEPI);
};
} // end namespace detail
/// \brief A base class for visitors over the uses of a pointer value.
///
/// Once constructed, a user can call \c visit on a pointer value, and this
/// will walk its uses and visit each instruction using an InstVisitor. It also
/// provides visit methods which will recurse through any pointer-to-pointer
/// transformations such as GEPs and bitcasts.
///
/// During the visit, the current Use* being visited is available to the
/// subclass, as well as the current offset from the original base pointer if
/// known.
///
/// The recursive visit of uses is accomplished with a worklist, so the only
/// ordering guarantee is that an instruction is visited before any uses of it
/// are visited. Note that this does *not* mean before any of its users are
/// visited! This is because users can be visited multiple times due to
/// multiple, different uses of pointers derived from the same base.
///
/// A particular Use will only be visited once, but a User may be visited
/// multiple times, once per Use. This visits may notably have different
/// offsets.
///
/// All visit methods on the underlying InstVisitor return a boolean. This
/// return short-circuits the visit, stopping it immediately.
///
/// FIXME: Generalize this for all values rather than just instructions.
template <typename DerivedT>
class PtrUseVisitor : protected InstVisitor<DerivedT>,
public detail::PtrUseVisitorBase {
friend class InstVisitor<DerivedT>;
typedef InstVisitor<DerivedT> Base;
public:
PtrUseVisitor(const DataLayout &DL) : PtrUseVisitorBase(DL) {}
/// \brief Recursively visit the uses of the given pointer.
/// \returns An info struct about the pointer. See \c PtrInfo for details.
PtrInfo visitPtr(Instruction &I) {
// This must be a pointer type. Get an integer type suitable to hold
// offsets on this pointer.
// FIXME: Support a vector of pointers.
assert(I.getType()->isPointerTy());
IntegerType *IntPtrTy = cast<IntegerType>(DL.getIntPtrType(I.getType()));
IsOffsetKnown = true;
Offset = APInt(IntPtrTy->getBitWidth(), 0);
PI.reset();
// Enqueue the uses of this pointer.
enqueueUsers(I);
// Visit all the uses off the worklist until it is empty.
while (!Worklist.empty()) {
UseToVisit ToVisit = Worklist.pop_back_val();
U = ToVisit.UseAndIsOffsetKnown.getPointer();
IsOffsetKnown = ToVisit.UseAndIsOffsetKnown.getInt();
if (IsOffsetKnown)
Offset = std::move(ToVisit.Offset);
Instruction *I = cast<Instruction>(U->getUser());
static_cast<DerivedT*>(this)->visit(I);
if (PI.isAborted())
break;
}
return PI;
}
protected:
void visitStoreInst(StoreInst &SI) {
if (SI.getValueOperand() == U->get())
PI.setEscaped(&SI);
}
void visitBitCastInst(BitCastInst &BC) {
enqueueUsers(BC);
}
void visitPtrToIntInst(PtrToIntInst &I) {
PI.setEscaped(&I);
}
void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
if (GEPI.use_empty())
return;
// If we can't walk the GEP, clear the offset.
if (!adjustOffsetForGEP(GEPI)) {
IsOffsetKnown = false;
Offset = APInt();
}
// Enqueue the users now that the offset has been adjusted.
enqueueUsers(GEPI);
}
// No-op intrinsics which we know don't escape the pointer to to logic in
// some other function.
void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {}
void visitMemIntrinsic(MemIntrinsic &I) {}
void visitIntrinsicInst(IntrinsicInst &II) {
switch (II.getIntrinsicID()) {
default:
return Base::visitIntrinsicInst(II);
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
return; // No-op intrinsics.
}
}
// Generically, arguments to calls and invokes escape the pointer to some
// other function. Mark that.
void visitCallSite(CallSite CS) {
PI.setEscaped(CS.getInstruction());
Base::visitCallSite(CS);
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/BranchProbabilityInfo.h | //===--- BranchProbabilityInfo.h - Branch Probability Analysis --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass is used to evaluate branch probabilties.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
#define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/CFG.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/BranchProbability.h"
namespace llvm {
class LoopInfo;
class raw_ostream;
/// \brief Analysis pass providing branch probability information.
///
/// This is a function analysis pass which provides information on the relative
/// probabilities of each "edge" in the function's CFG where such an edge is
/// defined by a pair (PredBlock and an index in the successors). The
/// probability of an edge from one block is always relative to the
/// probabilities of other edges from the block. The probabilites of all edges
/// from a block sum to exactly one (100%).
/// We use a pair (PredBlock and an index in the successors) to uniquely
/// identify an edge, since we can have multiple edges from Src to Dst.
/// As an example, we can have a switch which jumps to Dst with value 0 and
/// value 10.
class BranchProbabilityInfo : public FunctionPass {
public:
static char ID;
BranchProbabilityInfo() : FunctionPass(ID) {
initializeBranchProbabilityInfoPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void print(raw_ostream &OS, const Module *M = nullptr) const override;
/// \brief Get an edge's probability, relative to other out-edges of the Src.
///
/// This routine provides access to the fractional probability between zero
/// (0%) and one (100%) of this edge executing, relative to other edges
/// leaving the 'Src' block. The returned probability is never zero, and can
/// only be one if the source block has only one successor.
BranchProbability getEdgeProbability(const BasicBlock *Src,
unsigned IndexInSuccessors) const;
/// \brief Get the probability of going from Src to Dst.
///
/// It returns the sum of all probabilities for edges from Src to Dst.
BranchProbability getEdgeProbability(const BasicBlock *Src,
const BasicBlock *Dst) const;
/// \brief Test if an edge is hot relative to other out-edges of the Src.
///
/// Check whether this edge out of the source block is 'hot'. We define hot
/// as having a relative probability >= 80%.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
/// \brief Retrieve the hot successor of a block if one exists.
///
/// Given a basic block, look through its successors and if one exists for
/// which \see isEdgeHot would return true, return that successor block.
BasicBlock *getHotSucc(BasicBlock *BB) const;
/// \brief Print an edge's probability.
///
/// Retrieves an edge's probability similarly to \see getEdgeProbability, but
/// then prints that probability to the provided stream. That stream is then
/// returned.
raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
const BasicBlock *Dst) const;
/// \brief Get the raw edge weight calculated for the edge.
///
/// This returns the raw edge weight. It is guaranteed to fall between 1 and
/// UINT32_MAX. Note that the raw edge weight is not meaningful in isolation.
/// This interface should be very carefully, and primarily by routines that
/// are updating the analysis by later calling setEdgeWeight.
uint32_t getEdgeWeight(const BasicBlock *Src,
unsigned IndexInSuccessors) const;
/// \brief Get the raw edge weight calculated for the block pair.
///
/// This returns the sum of all raw edge weights from Src to Dst.
/// It is guaranteed to fall between 1 and UINT32_MAX.
uint32_t getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const;
uint32_t getEdgeWeight(const BasicBlock *Src,
succ_const_iterator Dst) const;
/// \brief Set the raw edge weight for a given edge.
///
/// This allows a pass to explicitly set the edge weight for an edge. It can
/// be used when updating the CFG to update and preserve the branch
/// probability information. Read the implementation of how these edge
/// weights are calculated carefully before using!
void setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors,
uint32_t Weight);
static uint32_t getBranchWeightStackProtector(bool IsLikely) {
return IsLikely ? (1u << 20) - 1 : 1;
}
private:
// Since we allow duplicate edges from one basic block to another, we use
// a pair (PredBlock and an index in the successors) to specify an edge.
typedef std::pair<const BasicBlock *, unsigned> Edge;
// Default weight value. Used when we don't have information about the edge.
// TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
// the successors have a weight yet. But it doesn't make sense when providing
// weight to an edge that may have siblings with non-zero weights. This can
// be handled various ways, but it's probably fine for an edge with unknown
// weight to just "inherit" the non-zero weight of an adjacent successor.
static const uint32_t DEFAULT_WEIGHT = 16;
DenseMap<Edge, uint32_t> Weights;
/// \brief Handle to the LoopInfo analysis.
LoopInfo *LI;
/// \brief Track the last function we run over for printing.
Function *LastF;
/// \brief Track the set of blocks directly succeeded by a returning block.
SmallPtrSet<BasicBlock *, 16> PostDominatedByUnreachable;
/// \brief Track the set of blocks that always lead to a cold call.
SmallPtrSet<BasicBlock *, 16> PostDominatedByColdCall;
/// \brief Get sum of the block successors' weights.
uint32_t getSumForBlock(const BasicBlock *BB) const;
bool calcUnreachableHeuristics(BasicBlock *BB);
bool calcMetadataWeights(BasicBlock *BB);
bool calcColdCallHeuristics(BasicBlock *BB);
bool calcPointerHeuristics(BasicBlock *BB);
bool calcLoopBranchHeuristics(BasicBlock *BB);
bool calcZeroHeuristics(BasicBlock *BB);
bool calcFloatingPointHeuristics(BasicBlock *BB);
bool calcInvokeHeuristics(BasicBlock *BB);
};
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Analysis/DOTGraphTraitsPass.h | //===-- DOTGraphTraitsPass.h - Print/View dotty graphs-----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Templates to create dotty viewer and printer passes for GraphTraits graphs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H
#define LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H
#include "llvm/Analysis/CFGPrinter.h"
#include "llvm/Pass.h"
#include "llvm/Support/FileSystem.h"
namespace llvm {
/// \brief Default traits class for extracting a graph from an analysis pass.
///
/// This assumes that 'GraphT' is 'AnalysisT *' and so just passes it through.
template <typename AnalysisT, typename GraphT = AnalysisT *>
struct DefaultAnalysisGraphTraits {
static GraphT getGraph(AnalysisT *A) { return A; }
};
template <
typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
class DOTGraphTraitsViewer : public FunctionPass {
public:
DOTGraphTraitsViewer(StringRef GraphName, char &ID)
: FunctionPass(ID), Name(GraphName) {}
bool runOnFunction(Function &F) override {
GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
std::string Title = GraphName + " for '" + F.getName().str() + "' function";
// HLSL Change Starts
if (OSOverride != nullptr) {
*OSOverride << "\ngraph: " << GraphName << ".dot\n";
llvm::WriteGraph(*OSOverride, Graph, false, Title);
return false;
}
// HLSL Change Ends
ViewGraph(Graph, Name, IsSimple, Title);
return false;
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AnalysisT>();
}
private:
std::string Name;
};
template <
typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
class DOTGraphTraitsPrinter : public FunctionPass {
public:
DOTGraphTraitsPrinter(StringRef GraphName, char &ID)
: FunctionPass(ID), Name(GraphName) {}
bool runOnFunction(Function &F) override {
GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
// HLSL Change Starts
if (OSOverride != nullptr) {
*OSOverride << "\ngraph: " << Name + "." + F.getName().str() + ".dot\n";
llvm::WriteGraph(*OSOverride, Graph, false, DOTGraphTraits<GraphT>::getGraphName(Graph));
return false;
}
// HLSL Change Ends
std::string Filename = Name + "." + F.getName().str() + ".dot";
std::error_code EC;
errs() << "Writing '" << Filename << "'...";
raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
std::string Title = GraphName + " for '" + F.getName().str() + "' function";
if (!EC)
WriteGraph(File, Graph, IsSimple, Title);
else
errs() << " error opening file for writing!";
errs() << "\n";
return false;
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AnalysisT>();
}
private:
std::string Name;
};
template <
typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
class DOTGraphTraitsModuleViewer : public ModulePass {
public:
DOTGraphTraitsModuleViewer(StringRef GraphName, char &ID)
: ModulePass(ID), Name(GraphName) {}
bool runOnModule(Module &M) override {
GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);
// HLSL Change Starts
if (OSOverride != nullptr) {
*OSOverride << "\ngraph: " << Title << ".dot\n";
llvm::WriteGraph(*OSOverride, Graph, false, Title);
return false;
}
// HLSL Change Ends
ViewGraph(Graph, Name, IsSimple, Title);
return false;
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AnalysisT>();
}
private:
std::string Name;
};
template <
typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
class DOTGraphTraitsModulePrinter : public ModulePass {
public:
DOTGraphTraitsModulePrinter(StringRef GraphName, char &ID)
: ModulePass(ID), Name(GraphName) {}
bool runOnModule(Module &M) override {
GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
// HLSL Change Starts
if (OSOverride != nullptr) {
*OSOverride << "\ngraph: " << Name << ".dot\n";
llvm::WriteGraph(*OSOverride, Graph, false, DOTGraphTraits<GraphT>::getGraphName(Graph));
return false;
}
// HLSL Change Ends
std::string Filename = Name + ".dot";
std::error_code EC;
errs() << "Writing '" << Filename << "'...";
raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);
if (!EC)
WriteGraph(File, Graph, IsSimple, Title);
else
errs() << " error opening file for writing!";
errs() << "\n";
return false;
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AnalysisT>();
}
private:
std::string Name;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Linker/Linker.h | //===- Linker.h - Module Linker Interface -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LINKER_LINKER_H
#define LLVM_LINKER_LINKER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/IR/DiagnosticInfo.h"
namespace llvm {
class Module;
class StructType;
class Type;
/// This class provides the core functionality of linking in LLVM. It keeps a
/// pointer to the merged module so far. It doesn't take ownership of the
/// module since it is assumed that the user of this class will want to do
/// something with it after the linking.
class Linker {
public:
struct StructTypeKeyInfo {
struct KeyTy {
ArrayRef<Type *> ETypes;
bool IsPacked;
KeyTy(ArrayRef<Type *> E, bool P);
KeyTy(const StructType *ST);
bool operator==(const KeyTy &that) const;
bool operator!=(const KeyTy &that) const;
};
static StructType *getEmptyKey();
static StructType *getTombstoneKey();
static unsigned getHashValue(const KeyTy &Key);
static unsigned getHashValue(const StructType *ST);
static bool isEqual(const KeyTy &LHS, const StructType *RHS);
static bool isEqual(const StructType *LHS, const StructType *RHS);
};
typedef DenseSet<StructType *, StructTypeKeyInfo> NonOpaqueStructTypeSet;
typedef DenseSet<StructType *> OpaqueStructTypeSet;
struct IdentifiedStructTypeSet {
// The set of opaque types is the composite module.
OpaqueStructTypeSet OpaqueStructTypes;
// The set of identified but non opaque structures in the composite module.
NonOpaqueStructTypeSet NonOpaqueStructTypes;
void addNonOpaque(StructType *Ty);
void switchToNonOpaque(StructType *Ty);
void addOpaque(StructType *Ty);
StructType *findNonOpaque(ArrayRef<Type *> ETypes, bool IsPacked);
bool hasType(StructType *Ty);
};
Linker(Module *M, DiagnosticHandlerFunction DiagnosticHandler);
Linker(Module *M);
~Linker();
Module *getModule() const { return Composite; }
void deleteModule();
/// \brief Link \p Src into the composite. The source is destroyed.
/// Passing OverrideSymbols as true will have symbols from Src
/// shadow those in the Dest.
/// Returns true on error.
bool linkInModule(Module *Src, bool OverrideSymbols = false);
/// \brief Set the composite to the passed-in module.
void setModule(Module *Dst);
static bool LinkModules(Module *Dest, Module *Src,
DiagnosticHandlerFunction DiagnosticHandler);
static bool LinkModules(Module *Dest, Module *Src);
private:
void init(Module *M, DiagnosticHandlerFunction DiagnosticHandler);
Module *Composite;
IdentifiedStructTypeSet IdentifiedStructTypes;
DiagnosticHandlerFunction DiagnosticHandler;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetMachine.h | //===-- llvm/Target/TargetMachine.h - Target Information --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the TargetMachine and LLVMTargetMachine classes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETMACHINE_H
#define LLVM_TARGET_TARGETMACHINE_H
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
#include <cassert>
#include <string>
namespace llvm {
class InstrItineraryData;
class GlobalValue;
class Mangler;
class MachineFunctionInitializer;
class MCAsmInfo;
class MCCodeGenInfo;
class MCContext;
class MCInstrInfo;
class MCRegisterInfo;
class MCSubtargetInfo;
class MCSymbol;
class Target;
class DataLayout;
class TargetLibraryInfo;
class TargetFrameLowering;
class TargetIRAnalysis;
class TargetIntrinsicInfo;
class TargetLowering;
class TargetPassConfig;
class TargetRegisterInfo;
class TargetSelectionDAGInfo;
class TargetSubtargetInfo;
class TargetTransformInfo;
class formatted_raw_ostream;
class raw_ostream;
class raw_pwrite_stream;
class TargetLoweringObjectFile;
// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {
class PassManagerBase;
}
using legacy::PassManagerBase;
// //
///////////////////////////////////////////////////////////////////////////////
///
/// Primary interface to the complete machine description for the target
/// machine. All target-specific information should be accessible through this
/// interface.
///
class TargetMachine {
TargetMachine(const TargetMachine &) = delete;
void operator=(const TargetMachine &) = delete;
protected: // Can only create subclasses.
TargetMachine(const Target &T, StringRef DataLayoutString,
const Triple &TargetTriple, StringRef CPU, StringRef FS,
const TargetOptions &Options);
/// The Target that this machine was created for.
const Target &TheTarget;
/// For ABI type size and alignment.
const DataLayout DL;
/// Triple string, CPU name, and target feature strings the TargetMachine
/// instance is created with.
Triple TargetTriple;
std::string TargetCPU;
std::string TargetFS;
/// Low level target information such as relocation model. Non-const to
/// allow resetting optimization level per-function.
MCCodeGenInfo *CodeGenInfo;
/// Contains target specific asm information.
const MCAsmInfo *AsmInfo;
const MCRegisterInfo *MRI;
const MCInstrInfo *MII;
const MCSubtargetInfo *STI;
unsigned RequireStructuredCFG : 1;
public:
mutable TargetOptions Options;
virtual ~TargetMachine();
const Target &getTarget() const { return TheTarget; }
const Triple &getTargetTriple() const { return TargetTriple; }
StringRef getTargetCPU() const { return TargetCPU; }
StringRef getTargetFeatureString() const { return TargetFS; }
/// Virtual method implemented by subclasses that returns a reference to that
/// target's TargetSubtargetInfo-derived member variable.
virtual const TargetSubtargetInfo *getSubtargetImpl(const Function &) const {
return nullptr;
}
virtual TargetLoweringObjectFile *getObjFileLowering() const {
return nullptr;
}
/// This method returns a pointer to the specified type of
/// TargetSubtargetInfo. In debug builds, it verifies that the object being
/// returned is of the correct type.
template <typename STC> const STC &getSubtarget(const Function &F) const {
return *static_cast<const STC*>(getSubtargetImpl(F));
}
/// Deprecated in 3.7, will be removed in 3.8. Use createDataLayout() instead.
///
/// This method returns a pointer to the DataLayout for the target. It should
/// be unchanging for every subtarget.
const DataLayout *getDataLayout() const { return &DL; }
/// Create a DataLayout.
const DataLayout createDataLayout() const { return DL; }
/// \brief Reset the target options based on the function's attributes.
// FIXME: Remove TargetOptions that affect per-function code generation
// from TargetMachine.
void resetTargetOptions(const Function &F) const;
/// Return target specific asm information.
const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
const MCRegisterInfo *getMCRegisterInfo() const { return MRI; }
const MCInstrInfo *getMCInstrInfo() const { return MII; }
const MCSubtargetInfo *getMCSubtargetInfo() const { return STI; }
/// If intrinsic information is available, return it. If not, return null.
virtual const TargetIntrinsicInfo *getIntrinsicInfo() const {
return nullptr;
}
bool requiresStructuredCFG() const { return RequireStructuredCFG; }
void setRequiresStructuredCFG(bool Value) { RequireStructuredCFG = Value; }
/// Returns the code generation relocation model. The choices are static, PIC,
/// and dynamic-no-pic, and target default.
Reloc::Model getRelocationModel() const;
/// Returns the code model. The choices are small, kernel, medium, large, and
/// target default.
CodeModel::Model getCodeModel() const;
/// Returns the TLS model which should be used for the given global variable.
TLSModel::Model getTLSModel(const GlobalValue *GV) const;
/// Returns the optimization level: None, Less, Default, or Aggressive.
CodeGenOpt::Level getOptLevel() const;
/// \brief Overrides the optimization level.
void setOptLevel(CodeGenOpt::Level Level) const;
void setFastISel(bool Enable) { Options.EnableFastISel = Enable; }
bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
/// Returns the default value of asm verbosity.
///
bool getAsmVerbosityDefault() const {
return Options.MCOptions.AsmVerbose;
}
bool getUniqueSectionNames() const { return Options.UniqueSectionNames; }
/// Return true if data objects should be emitted into their own section,
/// corresponds to -fdata-sections.
bool getDataSections() const {
return Options.DataSections;
}
/// Return true if functions should be emitted into their own section,
/// corresponding to -ffunction-sections.
bool getFunctionSections() const {
return Options.FunctionSections;
}
/// \brief Get a \c TargetIRAnalysis appropriate for the target.
///
/// This is used to construct the new pass manager's target IR analysis pass,
/// set up appropriately for this target machine. Even the old pass manager
/// uses this to answer queries about the IR.
virtual TargetIRAnalysis getTargetIRAnalysis();
/// These enums are meant to be passed into addPassesToEmitFile to indicate
/// what type of file to emit, and returned by it to indicate what type of
/// file could actually be made.
enum CodeGenFileType {
CGFT_AssemblyFile,
CGFT_ObjectFile,
CGFT_Null // Do not emit any output.
};
/// Add passes to the specified pass manager to get the specified file
/// emitted. Typically this will involve several steps of code generation.
/// This method should return true if emission of this file type is not
/// supported, or false on success.
virtual bool addPassesToEmitFile(
PassManagerBase &, raw_pwrite_stream &, CodeGenFileType,
bool /*DisableVerify*/ = true, AnalysisID /*StartBefore*/ = nullptr,
AnalysisID /*StartAfter*/ = nullptr, AnalysisID /*StopAfter*/ = nullptr,
MachineFunctionInitializer * /*MFInitializer*/ = nullptr) {
return true;
}
/// Add passes to the specified pass manager to get machine code emitted with
/// the MCJIT. This method returns true if machine code is not supported. It
/// fills the MCContext Ctx pointer which can be used to build custom
/// MCStreamer.
///
virtual bool addPassesToEmitMC(PassManagerBase &, MCContext *&,
raw_pwrite_stream &,
bool /*DisableVerify*/ = true) {
return true;
}
void getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV,
Mangler &Mang, bool MayAlwaysUsePrivate = false) const;
MCSymbol *getSymbol(const GlobalValue *GV, Mangler &Mang) const;
};
/// This class describes a target machine that is implemented with the LLVM
/// target-independent code generator.
///
class LLVMTargetMachine : public TargetMachine {
protected: // Can only create subclasses.
LLVMTargetMachine(const Target &T, StringRef DataLayoutString,
const Triple &TargetTriple, StringRef CPU, StringRef FS,
TargetOptions Options, Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
void initAsmInfo();
public:
/// \brief Get a TargetIRAnalysis implementation for the target.
///
/// This analysis will produce a TTI result which uses the common code
/// generator to answer queries about the IR.
TargetIRAnalysis getTargetIRAnalysis() override;
/// Create a pass configuration object to be used by addPassToEmitX methods
/// for generating a pipeline of CodeGen passes.
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
/// Add passes to the specified pass manager to get the specified file
/// emitted. Typically this will involve several steps of code generation.
bool addPassesToEmitFile(
PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
bool DisableVerify = true, AnalysisID StartBefore = nullptr,
AnalysisID StartAfter = nullptr, AnalysisID StopAfter = nullptr,
MachineFunctionInitializer *MFInitializer = nullptr) override;
/// Add passes to the specified pass manager to get machine code emitted with
/// the MCJIT. This method returns true if machine code is not supported. It
/// fills the MCContext Ctx pointer which can be used to build custom
/// MCStreamer.
bool addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
raw_pwrite_stream &OS,
bool DisableVerify = true) override;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetInstrInfo.h | //===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the target machine instruction set to the code generator.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETINSTRINFO_H
#define LLVM_TARGET_TARGETINSTRINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
class InstrItineraryData;
class LiveVariables;
class MCAsmInfo;
class MachineMemOperand;
class MachineRegisterInfo;
class MDNode;
class MCInst;
struct MCSchedModel;
class MCSymbolRefExpr;
class SDNode;
class ScheduleHazardRecognizer;
class SelectionDAG;
class ScheduleDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
class BranchProbability;
class TargetSubtargetInfo;
class TargetSchedModel;
class DFAPacketizer;
template<class T> class SmallVectorImpl;
//---------------------------------------------------------------------------
///
/// TargetInstrInfo - Interface to description of machine instruction set
///
class TargetInstrInfo : public MCInstrInfo {
TargetInstrInfo(const TargetInstrInfo &) = delete;
void operator=(const TargetInstrInfo &) = delete;
public:
TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u)
: CallFrameSetupOpcode(CFSetupOpcode),
CallFrameDestroyOpcode(CFDestroyOpcode) {
}
virtual ~TargetInstrInfo();
/// Given a machine instruction descriptor, returns the register
/// class constraint for OpNum, or NULL.
const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF) const;
/// Return true if the instruction is trivially rematerializable, meaning it
/// has no side effects and requires no operands that aren't always available.
/// This means the only allowed uses are constants and unallocatable physical
/// registers so that the instructions result is independent of the place
/// in the function.
bool isTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA = nullptr) const {
return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
(MI->getDesc().isRematerializable() &&
(isReallyTriviallyReMaterializable(MI, AA) ||
isReallyTriviallyReMaterializableGeneric(MI, AA)));
}
protected:
/// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
/// set, this hook lets the target specify whether the instruction is actually
/// trivially rematerializable, taking into consideration its operands. This
/// predicate must return false if the instruction has any side effects other
/// than producing a value, or if it requres any address registers that are
/// not always available.
/// Requirements must be check as stated in isTriviallyReMaterializable() .
virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA) const {
return false;
}
private:
/// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
/// set and the target hook isReallyTriviallyReMaterializable returns false,
/// this function does target-independent tests to determine if the
/// instruction is really trivially rematerializable.
bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
AliasAnalysis *AA) const;
public:
/// These methods return the opcode of the frame setup/destroy instructions
/// if they exist (-1 otherwise). Some targets use pseudo instructions in
/// order to abstract away the difference between operating with a frame
/// pointer and operating without, through the use of these two instructions.
///
unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
/// Returns the actual stack pointer adjustment made by an instruction
/// as part of a call sequence. By default, only call frame setup/destroy
/// instructions adjust the stack, but targets may want to override this
/// to enable more fine-grained adjustment, or adjust by a different value.
virtual int getSPAdjust(const MachineInstr *MI) const;
/// Return true if the instruction is a "coalescable" extension instruction.
/// That is, it's like a copy where it's legal for the source to overlap the
/// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
/// expected the pre-extension value is available as a subreg of the result
/// register. This also returns the sub-register index in SubIdx.
virtual bool isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const {
return false;
}
/// If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than loading from the stack slot.
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic so it isn't reliable for correctness.
virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
/// If the specified machine instruction has a load from a stack slot,
/// return true along with the FrameIndex of the loaded stack slot and the
/// machine mem operand containing the reference.
/// If not, return false. Unlike isLoadFromStackSlot, this returns true for
/// any instructions that loads from the stack. This is just a hint, as some
/// cases may be missed.
virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
/// If the specified machine instruction is a direct
/// store to a stack slot, return the virtual or physical register number of
/// the source reg along with the FrameIndex of the loaded stack slot. If
/// not, return 0. This predicate must return 0 if the instruction has
/// any side effects other than storing to the stack slot.
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
/// Check for post-frame ptr elimination stack locations as well.
/// This uses a heuristic, so it isn't reliable for correctness.
virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const {
return 0;
}
/// If the specified machine instruction has a store to a stack slot,
/// return true along with the FrameIndex of the loaded stack slot and the
/// machine mem operand containing the reference.
/// If not, return false. Unlike isStoreToStackSlot,
/// this returns true for any instructions that stores to the
/// stack. This is just a hint, as some cases may be missed.
virtual bool hasStoreToStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
int &FrameIndex) const;
/// Return true if the specified machine instruction
/// is a copy of one stack slot to another and has no other effect.
/// Provide the identity of the two frame indices.
virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
int &SrcFrameIndex) const {
return false;
}
/// Compute the size in bytes and offset within a stack slot of a spilled
/// register or subregister.
///
/// \param [out] Size in bytes of the spilled value.
/// \param [out] Offset in bytes within the stack slot.
/// \returns true if both Size and Offset are successfully computed.
///
/// Not all subregisters have computable spill slots. For example,
/// subregisters registers may not be byte-sized, and a pair of discontiguous
/// subregisters has no single offset.
///
/// Targets with nontrivial bigendian implementations may need to override
/// this, particularly to support spilled vector registers.
virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
unsigned &Size, unsigned &Offset,
const MachineFunction &MF) const;
/// Return true if the instruction is as cheap as a move instruction.
///
/// Targets for different archs need to override this, and different
/// micro-architectures can also be finely tuned inside.
virtual bool isAsCheapAsAMove(const MachineInstr *MI) const {
return MI->isAsCheapAsAMove();
}
/// Re-issue the specified 'original' instruction at the
/// specific location targeting a new destination register.
/// The register in Orig->getOperand(0).getReg() will be substituted by
/// DestReg:SubIdx. Any existing subreg index is preserved or composed with
/// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const;
/// Create a duplicate of the Orig instruction in MF. This is like
/// MachineFunction::CloneMachineInstr(), but the target may update operands
/// that are required to be unique.
///
/// The instruction must be duplicable as indicated by isNotDuplicable().
virtual MachineInstr *duplicate(MachineInstr *Orig,
MachineFunction &MF) const;
/// This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
/// may be able to convert a two-address instruction into one or more true
/// three-address instructions on demand. This allows the X86 target (for
/// example) to convert ADD and SHL instructions into LEA instructions if they
/// would require register copies due to two-addressness.
///
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the last new instruction.
///
virtual MachineInstr *
convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
return nullptr;
}
/// If a target has any instructions that are commutable but require
/// converting to different instructions or making non-trivial changes to
/// commute them, this method can overloaded to do that.
/// The default implementation simply swaps the commutable operands.
/// If NewMI is false, MI is modified in place and returned; otherwise, a
/// new machine instruction is created and returned. Do not call this
/// method for a non-commutable instruction, but there may be some cases
/// where this method fails and returns null.
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const;
/// If specified MI is commutable, return the two operand indices that would
/// swap value. Return false if the instruction
/// is not in a form which this routine understands.
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// A pair composed of a register and a sub-register index.
/// Used to give some type checking when modeling Reg:SubReg.
struct RegSubRegPair {
unsigned Reg;
unsigned SubReg;
RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
: Reg(Reg), SubReg(SubReg) {}
};
/// A pair composed of a pair of a register and a sub-register index,
/// and another sub-register index.
/// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
struct RegSubRegPairAndIdx : RegSubRegPair {
unsigned SubIdx;
RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
unsigned SubIdx = 0)
: RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
};
/// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
/// and \p DefIdx.
/// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
/// the list is modeled as <Reg:SubReg, SubIdx>.
/// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
/// two elements:
/// - vreg1:sub1, sub0
/// - vreg2<:0>, sub1
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
///
/// \pre MI.isRegSequence() or MI.isRegSequenceLike().
///
/// \note The generic implementation does not provide any support for
/// MI.isRegSequenceLike(). In other words, one has to override
/// getRegSequenceLikeInputs for target specific instructions.
bool
getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
/// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
/// and \p DefIdx.
/// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
/// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
/// - vreg1:sub1, sub0
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
///
/// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
///
/// \note The generic implementation does not provide any support for
/// MI.isExtractSubregLike(). In other words, one has to override
/// getExtractSubregLikeInputs for target specific instructions.
bool
getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
RegSubRegPairAndIdx &InputReg) const;
/// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
/// and \p DefIdx.
/// \p [out] BaseReg and \p [out] InsertedReg contain
/// the equivalent inputs of INSERT_SUBREG.
/// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
/// - BaseReg: vreg0:sub0
/// - InsertedReg: vreg1:sub1, sub3
///
/// \returns true if it is possible to build such an input sequence
/// with the pair \p MI, \p DefIdx. False otherwise.
///
/// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
///
/// \note The generic implementation does not provide any support for
/// MI.isInsertSubregLike(). In other words, one has to override
/// getInsertSubregLikeInputs for target specific instructions.
bool
getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
RegSubRegPair &BaseReg,
RegSubRegPairAndIdx &InsertedReg) const;
/// Return true if two machine instructions would produce identical values.
/// By default, this is only true when the two instructions
/// are deemed identical except for defs. If this function is called when the
/// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
/// aggressive checks.
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1,
const MachineRegisterInfo *MRI = nullptr) const;
/// Analyze the branching code at the end of MBB, returning
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
/// implemented for a target). Upon success, this returns false and returns
/// with the following information in various cases:
///
/// 1. If this block ends with no branches (it just falls through to its succ)
/// just return false, leaving TBB/FBB null.
/// 2. If this block ends with only an unconditional branch, it sets TBB to be
/// the destination block.
/// 3. If this block ends with a conditional branch and it falls through to a
/// successor block, it sets TBB to be the branch destination block and a
/// list of operands that evaluate the condition. These operands can be
/// passed to other TargetInstrInfo methods to create new branches.
/// 4. If this block ends with a conditional branch followed by an
/// unconditional branch, it returns the 'true' destination in TBB, the
/// 'false' destination in FBB, and a list of operands that evaluate the
/// condition. These operands can be passed to other TargetInstrInfo
/// methods to create new branches.
///
/// Note that RemoveBranch and InsertBranch must be implemented to support
/// cases where this method returns success.
///
/// If AllowModify is true, then this routine is allowed to modify the basic
/// block (e.g. delete instructions after the unconditional branch).
///
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify = false) const {
return true;
}
/// Represents a predicate at the MachineFunction level. The control flow a
/// MachineBranchPredicate represents is:
///
/// Reg <def>= LHS `Predicate` RHS == ConditionDef
/// if Reg then goto TrueDest else goto FalseDest
///
struct MachineBranchPredicate {
enum ComparePredicate {
PRED_EQ, // True if two values are equal
PRED_NE, // True if two values are not equal
PRED_INVALID // Sentinel value
};
ComparePredicate Predicate;
MachineOperand LHS;
MachineOperand RHS;
MachineBasicBlock *TrueDest;
MachineBasicBlock *FalseDest;
MachineInstr *ConditionDef;
/// SingleUseCondition is true if ConditionDef is dead except for the
/// branch(es) at the end of the basic block.
///
bool SingleUseCondition;
explicit MachineBranchPredicate()
: Predicate(PRED_INVALID), LHS(MachineOperand::CreateImm(0)),
RHS(MachineOperand::CreateImm(0)), TrueDest(nullptr),
FalseDest(nullptr), ConditionDef(nullptr), SingleUseCondition(false) {
}
};
/// Analyze the branching code at the end of MBB and parse it into the
/// MachineBranchPredicate structure if possible. Returns false on success
/// and true on failure.
///
/// If AllowModify is true, then this routine is allowed to modify the basic
/// block (e.g. delete instructions after the unconditional branch).
///
virtual bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify = false) const {
return true;
}
/// Remove the branching code at the end of the specific MBB.
/// This is only invoked in cases where AnalyzeBranch returns success. It
/// returns the number of instructions that were removed.
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!");
}
/// Insert branch code into the end of the specified MachineBasicBlock.
/// The operands to this method are the same as those
/// returned by AnalyzeBranch. This is only invoked in cases where
/// AnalyzeBranch returns success. It returns the number of instructions
/// inserted.
///
/// It is also invoked by tail merging to add unconditional branches in
/// cases where AnalyzeBranch doesn't apply because there was no original
/// branch to analyze. At least this much must be implemented, else tail
/// merging needs to be disabled.
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
}
/// Delete the instruction OldInst and everything after it, replacing it with
/// an unconditional branch to NewDest. This is used by the tail merging pass.
virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MachineBasicBlock *NewDest) const;
/// Get an instruction that performs an unconditional branch to the given
/// symbol.
virtual void
getUnconditionalBranch(MCInst &MI,
const MCSymbolRefExpr *BranchTarget) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::getUnconditionalBranch!");
}
/// Get a machine trap instruction.
virtual void getTrap(MCInst &MI) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::getTrap!");
}
/// Get a number of bytes that suffices to hold
/// either the instruction returned by getUnconditionalBranch or the
/// instruction returned by getTrap. This only makes sense because
/// getUnconditionalBranch returns a single, specific instruction. This
/// information is needed by the jumptable construction code, since it must
/// decide how many bytes to use for a jumptable entry so it can generate the
/// right mask.
///
/// Note that if the jumptable instruction requires alignment, then that
/// alignment should be factored into this required bound so that the
/// resulting bound gives the right alignment for the instruction.
virtual unsigned getJumpInstrTableEntryBound() const {
// This method gets called by LLVMTargetMachine always, so it can't fail
// just because there happens to be no implementation for this target.
// Any code that tries to use a jumptable annotation without defining
// getUnconditionalBranch on the appropriate Target will fail anyway, and
// the value returned here won't matter in that case.
return 0;
}
/// Return true if it's legal to split the given basic
/// block at the specified instruction (i.e. instruction would be the start
/// of a new basic block).
virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const {
return true;
}
/// Return true if it's profitable to predicate
/// instructions with accumulated instruction latency of "NumCycles"
/// of the specified basic block, where the probability of the instructions
/// being executed is given by Probability, and Confidence is a measure
/// of our confidence that it will be properly predicted.
virtual
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
unsigned ExtraPredCycles,
const BranchProbability &Probability) const {
return false;
}
/// Second variant of isProfitableToIfCvt. This one
/// checks for the case where two basic blocks from true and false path
/// of a if-then-else (diamond) are predicated on mutally exclusive
/// predicates, where the probability of the true path being taken is given
/// by Probability, and Confidence is a measure of our confidence that it
/// will be properly predicted.
virtual bool
isProfitableToIfCvt(MachineBasicBlock &TMBB,
unsigned NumTCycles, unsigned ExtraTCycles,
MachineBasicBlock &FMBB,
unsigned NumFCycles, unsigned ExtraFCycles,
const BranchProbability &Probability) const {
return false;
}
/// Return true if it's profitable for if-converter to duplicate instructions
/// of specified accumulated instruction latencies in the specified MBB to
/// enable if-conversion.
/// The probability of the instructions being executed is given by
/// Probability, and Confidence is a measure of our confidence that it
/// will be properly predicted.
virtual bool
isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
const BranchProbability &Probability) const {
return false;
}
/// Return true if it's profitable to unpredicate
/// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
/// exclusive predicates.
/// e.g.
/// subeq r0, r1, #1
/// addne r0, r1, #1
/// =>
/// sub r0, r1, #1
/// addne r0, r1, #1
///
/// This may be profitable is conditional instructions are always executed.
virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
MachineBasicBlock &FMBB) const {
return false;
}
/// Return true if it is possible to insert a select
/// instruction that chooses between TrueReg and FalseReg based on the
/// condition code in Cond.
///
/// When successful, also return the latency in cycles from TrueReg,
/// FalseReg, and Cond to the destination register. In most cases, a select
/// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
///
/// Some x86 implementations have 2-cycle cmov instructions.
///
/// @param MBB Block where select instruction would be inserted.
/// @param Cond Condition returned by AnalyzeBranch.
/// @param TrueReg Virtual register to select when Cond is true.
/// @param FalseReg Virtual register to select when Cond is false.
/// @param CondCycles Latency from Cond+Branch to select output.
/// @param TrueCycles Latency from TrueReg to select output.
/// @param FalseCycles Latency from FalseReg to select output.
virtual bool canInsertSelect(const MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles,
int &TrueCycles, int &FalseCycles) const {
return false;
}
/// Insert a select instruction into MBB before I that will copy TrueReg to
/// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
///
/// This function can only be called after canInsertSelect() returned true.
/// The condition in Cond comes from AnalyzeBranch, and it can be assumed
/// that the same flags or registers required by Cond are available at the
/// insertion point.
///
/// @param MBB Block where select instruction should be inserted.
/// @param I Insertion point.
/// @param DL Source location for debugging.
/// @param DstReg Virtual register to be defined by select instruction.
/// @param Cond Condition as computed by AnalyzeBranch.
/// @param TrueReg Virtual register to copy when Cond is true.
/// @param FalseReg Virtual register to copy when Cons is false.
virtual void insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
}
/// Analyze the given select instruction, returning true if
/// it cannot be understood. It is assumed that MI->isSelect() is true.
///
/// When successful, return the controlling condition and the operands that
/// determine the true and false result values.
///
/// Result = SELECT Cond, TrueOp, FalseOp
///
/// Some targets can optimize select instructions, for example by predicating
/// the instruction defining one of the operands. Such targets should set
/// Optimizable.
///
/// @param MI Select instruction to analyze.
/// @param Cond Condition controlling the select.
/// @param TrueOp Operand number of the value selected when Cond is true.
/// @param FalseOp Operand number of the value selected when Cond is false.
/// @param Optimizable Returned as true if MI is optimizable.
/// @returns False on success.
virtual bool analyzeSelect(const MachineInstr *MI,
SmallVectorImpl<MachineOperand> &Cond,
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const {
assert(MI && MI->getDesc().isSelect() && "MI must be a select instruction");
return true;
}
/// Given a select instruction that was understood by
/// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
/// merging it with one of its operands. Returns NULL on failure.
///
/// When successful, returns the new select instruction. The client is
/// responsible for deleting MI.
///
/// If both sides of the select can be optimized, PreferFalse is used to pick
/// a side.
///
/// @param MI Optimizable select instruction.
/// @param NewMIs Set that record all MIs in the basic block up to \p
/// MI. Has to be updated with any newly created MI or deleted ones.
/// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
/// @returns Optimized instruction or NULL.
virtual MachineInstr *optimizeSelect(MachineInstr *MI,
SmallPtrSetImpl<MachineInstr *> &NewMIs,
bool PreferFalse = false) const {
// This function must be implemented if Optimizable is ever set.
llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
}
/// Emit instructions to copy a pair of physical registers.
///
/// This function should support copies within any legal register class as
/// well as any cross-class copies created during instruction selection.
///
/// The source and destination registers may overlap, which may require a
/// careful implementation when multiple copy instructions are required for
/// large registers. See for example the ARM target.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const {
llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
}
/// Store the specified register of the given register class to the specified
/// stack frame index. The store instruction is to be added to the given
/// machine basic block before the specified machine instruction. If isKill
/// is true, the register operand is the last use and must be marked kill.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::storeRegToStackSlot!");
}
/// Load the specified register of the given register class from the specified
/// stack frame index. The load instruction is to be added to the given
/// machine basic block before the specified machine instruction.
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::loadRegFromStackSlot!");
}
/// This function is called for all pseudo instructions
/// that remain after register allocation. Many pseudo instructions are
/// created to help register allocation. This is the place to convert them
/// into real instructions. The target can edit MI in place, or it can insert
/// new instructions and erase MI. The function should return true if
/// anything was changed.
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return false;
}
/// Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
/// operand folded, otherwise NULL is returned.
/// The new instruction is inserted before MI, and the client is responsible
/// for removing the old instruction.
MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
ArrayRef<unsigned> Ops, int FrameIndex) const;
/// Same as the previous version except it allows folding of any load and
/// store from / to any address, not just from a specific stack slot.
MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
ArrayRef<unsigned> Ops,
MachineInstr *LoadMI) const;
/// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in \p Root. All potential patterns are
/// returned in the \p Pattern vector. Pattern should be sorted in priority
/// order since the pattern evaluator stops checking as soon as it finds a
/// faster sequence.
/// \param Root - Instruction that could be combined with one of its operands
/// \param Pattern - Vector of possible combination patterns
virtual bool getMachineCombinerPatterns(
MachineInstr &Root,
SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern) const {
return false;
}
/// When getMachineCombinerPatterns() finds patterns, this function generates
/// the instructions that could replace the original code sequence. The client
/// has to decide whether the actual replacement is beneficial or not.
/// \param Root - Instruction that could be combined with one of its operands
/// \param Pattern - Combination pattern for Root
/// \param InsInstrs - Vector of new instructions that implement P
/// \param DelInstrs - Old instructions, including Root, that could be
/// replaced by InsInstr
/// \param InstrIdxForVirtReg - map of virtual register to instruction in
/// InsInstr that defines it
virtual void genAlternativeCodeSequence(
MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
return;
}
/// Return true when a target supports MachineCombiner.
virtual bool useMachineCombiner() const { return false; }
protected:
/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
return nullptr;
}
/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
/// The instruction and any auxiliary instructions necessary will be inserted
/// at InsertPt.
virtual MachineInstr *foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
return nullptr;
}
/// \brief Target-dependent implementation of getRegSequenceInputs.
///
/// \returns true if it is possible to build the equivalent
/// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
///
/// \pre MI.isRegSequenceLike().
///
/// \see TargetInstrInfo::getRegSequenceInputs.
virtual bool getRegSequenceLikeInputs(
const MachineInstr &MI, unsigned DefIdx,
SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
return false;
}
/// \brief Target-dependent implementation of getExtractSubregInputs.
///
/// \returns true if it is possible to build the equivalent
/// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
///
/// \pre MI.isExtractSubregLike().
///
/// \see TargetInstrInfo::getExtractSubregInputs.
virtual bool getExtractSubregLikeInputs(
const MachineInstr &MI, unsigned DefIdx,
RegSubRegPairAndIdx &InputReg) const {
return false;
}
/// \brief Target-dependent implementation of getInsertSubregInputs.
///
/// \returns true if it is possible to build the equivalent
/// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
///
/// \pre MI.isInsertSubregLike().
///
/// \see TargetInstrInfo::getInsertSubregInputs.
virtual bool
getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
RegSubRegPair &BaseReg,
RegSubRegPairAndIdx &InsertedReg) const {
return false;
}
public:
/// Returns true for the specified load / store if folding is possible.
virtual bool canFoldMemoryOperand(const MachineInstr *MI,
ArrayRef<unsigned> Ops) const;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
return false;
}
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const {
return false;
}
/// Returns the opcode of the would be new
/// instruction after load / store are unfolded from an instruction of the
/// specified opcode. It returns zero if the specified unfolding is not
/// possible. If LoadRegIndex is non-null, it is filled in with the operand
/// index of the operand which will hold the register holding the loaded
/// value.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex = nullptr) const {
return 0;
}
/// This is used by the pre-regalloc scheduler to determine if two loads are
/// loading from the same base address. It should only return true if the base
/// pointers are the same and the only differences between the two addresses
/// are the offset. It also returns the offsets by reference.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
int64_t &Offset1, int64_t &Offset2) const {
return false;
}
/// This is a used by the pre-regalloc scheduler to determine (in conjunction
/// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
/// On some targets if two loads are loading from
/// addresses in the same cache line, it's better if they are scheduled
/// together. This function takes two integers that represent the load offsets
/// from the common base address. It returns true if it decides it's desirable
/// to schedule the two loads together. "NumLoads" is the number of loads that
/// have already been scheduled after Load1.
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const {
return false;
}
/// Get the base register and byte offset of an instruction that reads/writes
/// memory.
virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
unsigned &Offset,
const TargetRegisterInfo *TRI) const {
return false;
}
virtual bool enableClusterLoads() const { return false; }
virtual bool shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
unsigned NumLoads) const {
return false;
}
/// Can this target fuse the given instructions if they are scheduled
/// adjacent.
virtual bool shouldScheduleAdjacent(MachineInstr* First,
MachineInstr *Second) const {
return false;
}
/// Reverses the branch condition of the specified condition list,
/// returning false on success and true if it cannot be reversed.
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
return true;
}
/// Insert a noop into the instruction stream at the specified point.
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
/// Return the noop instruction to use for a noop.
virtual void getNoopForMachoTarget(MCInst &NopInst) const;
/// Returns true if the instruction is already predicated.
virtual bool isPredicated(const MachineInstr *MI) const {
return false;
}
/// Returns true if the instruction is a
/// terminator instruction that has not been predicated.
virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
/// Convert the instruction into a predicated instruction.
/// It returns true if the operation was successful.
virtual
bool PredicateInstruction(MachineInstr *MI,
ArrayRef<MachineOperand> Pred) const;
/// Returns true if the first specified predicate
/// subsumes the second, e.g. GE subsumes GT.
virtual
bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
ArrayRef<MachineOperand> Pred2) const {
return false;
}
/// If the specified instruction defines any predicate
/// or condition code register(s) used for predication, returns true as well
/// as the definition predicate(s) by reference.
virtual bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
return false;
}
/// Return true if the specified instruction can be predicated.
/// By default, this returns true for every instruction with a
/// PredicateOperand.
virtual bool isPredicable(MachineInstr *MI) const {
return MI->getDesc().isPredicable();
}
/// Return true if it's safe to move a machine
/// instruction that defines the specified register class.
virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
return true;
}
/// Test if the given instruction should be considered a scheduling boundary.
/// This primarily includes labels and terminators.
virtual bool isSchedulingBoundary(const MachineInstr *MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const;
/// Measure the specified inline asm to determine an approximation of its
/// length.
virtual unsigned getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const;
/// Allocate and return a hazard recognizer to use for this target when
/// scheduling the machine instructions before register allocation.
virtual ScheduleHazardRecognizer*
CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
const ScheduleDAG *DAG) const;
/// Allocate and return a hazard recognizer to use for this target when
/// scheduling the machine instructions before register allocation.
virtual ScheduleHazardRecognizer*
CreateTargetMIHazardRecognizer(const InstrItineraryData*,
const ScheduleDAG *DAG) const;
/// Allocate and return a hazard recognizer to use for this target when
/// scheduling the machine instructions after register allocation.
virtual ScheduleHazardRecognizer*
CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
const ScheduleDAG *DAG) const;
/// Provide a global flag for disabling the PreRA hazard recognizer that
/// targets may choose to honor.
bool usePreRAHazardRecognizer() const;
/// For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
virtual bool analyzeCompare(const MachineInstr *MI,
unsigned &SrcReg, unsigned &SrcReg2,
int &Mask, int &Value) const {
return false;
}
/// See if the comparison instruction can be converted
/// into something more efficient. E.g., on ARM most instructions can set the
/// flags register, obviating the need for a separate CMP.
virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
unsigned SrcReg, unsigned SrcReg2,
int Mask, int Value,
const MachineRegisterInfo *MRI) const {
return false;
}
virtual bool optimizeCondBranch(MachineInstr *MI) const { return false; }
/// Try to remove the load by folding it to a register operand at the use.
/// We fold the load instructions if and only if the
/// def and use are in the same BB. We only look at one load and see
/// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
return nullptr;
}
/// 'Reg' is known to be defined by a move immediate instruction,
/// try to fold the immediate into the use instruction.
/// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
/// then the caller may assume that DefMI has been erased from its parent
/// block. The caller may assume that it will not be erased by this
/// function otherwise.
virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
unsigned Reg, MachineRegisterInfo *MRI) const {
return false;
}
/// Return the number of u-operations the given machine
/// instruction will be decoded to on the target cpu. The itinerary's
/// IssueWidth is the number of microops that can be dispatched each
/// cycle. An instruction with zero microops takes no dispatch resources.
virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
const MachineInstr *MI) const;
/// Return true for pseudo instructions that don't consume any
/// machine resources in their current form. These are common cases that the
/// scheduler should consider free, rather than conservatively handling them
/// as instructions with no itinerary.
bool isZeroCost(unsigned Opcode) const {
return Opcode <= TargetOpcode::COPY;
}
virtual int getOperandLatency(const InstrItineraryData *ItinData,
SDNode *DefNode, unsigned DefIdx,
SDNode *UseNode, unsigned UseIdx) const;
/// Compute and return the use operand latency of a given pair of def and use.
/// In most cases, the static scheduling itinerary was enough to determine the
/// operand latency. But it may not be possible for instructions with variable
/// number of defs / uses.
///
/// This is a raw interface to the itinerary that may be directly overridden
/// by a target. Use computeOperandLatency to get the best estimate of
/// latency.
virtual int getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
unsigned UseIdx) const;
/// Compute and return the latency of the given data
/// dependent def and use when the operand indices are already known.
unsigned computeOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx)
const;
/// Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost = nullptr) const;
virtual unsigned getPredicationCost(const MachineInstr *MI) const;
virtual int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const;
/// Return the default expected latency for a def based on it's opcode.
unsigned defaultDefLatency(const MCSchedModel &SchedModel,
const MachineInstr *DefMI) const;
int computeDefOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI) const;
/// Return true if this opcode has high latency to its result.
virtual bool isHighLatencyDef(int opc) const { return false; }
/// Compute operand latency between a def of 'Reg'
/// and a use in the current loop. Return true if the target considered
/// it 'high'. This is used by optimization passes such as machine LICM to
/// determine whether it makes sense to hoist an instruction out even in a
/// high register pressure situation.
virtual
bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
return false;
}
/// Compute operand latency of a def of 'Reg'. Return true
/// if the target considered it 'low'.
virtual
bool hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI, unsigned DefIdx) const;
/// Perform target-specific instruction verification.
virtual
bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const {
return true;
}
/// Return the current execution domain and bit mask of
/// possible domains for instruction.
///
/// Some micro-architectures have multiple execution domains, and multiple
/// opcodes that perform the same operation in different domains. For
/// example, the x86 architecture provides the por, orps, and orpd
/// instructions that all do the same thing. There is a latency penalty if a
/// register is written in one domain and read in another.
///
/// This function returns a pair (domain, mask) containing the execution
/// domain of MI, and a bit mask of possible domains. The setExecutionDomain
/// function can be used to change the opcode to one of the domains in the
/// bit mask. Instructions whose execution domain can't be changed should
/// return a 0 mask.
///
/// The execution domain numbers don't have any special meaning except domain
/// 0 is used for instructions that are not associated with any interesting
/// execution domain.
///
virtual std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr *MI) const {
return std::make_pair(0, 0);
}
/// Change the opcode of MI to execute in Domain.
///
/// The bit (1 << Domain) must be set in the mask returned from
/// getExecutionDomain(MI).
virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {}
/// Returns the preferred minimum clearance
/// before an instruction with an unwanted partial register update.
///
/// Some instructions only write part of a register, and implicitly need to
/// read the other parts of the register. This may cause unwanted stalls
/// preventing otherwise unrelated instructions from executing in parallel in
/// an out-of-order CPU.
///
/// For example, the x86 instruction cvtsi2ss writes its result to bits
/// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
/// the instruction needs to wait for the old value of the register to become
/// available:
///
/// addps %xmm1, %xmm0
/// movaps %xmm0, (%rax)
/// cvtsi2ss %rbx, %xmm0
///
/// In the code above, the cvtsi2ss instruction needs to wait for the addps
/// instruction before it can issue, even though the high bits of %xmm0
/// probably aren't needed.
///
/// This hook returns the preferred clearance before MI, measured in
/// instructions. Other defs of MI's operand OpNum are avoided in the last N
/// instructions before MI. It should only return a positive value for
/// unwanted dependencies. If the old bits of the defined register have
/// useful values, or if MI is determined to otherwise read the dependency,
/// the hook should return 0.
///
/// The unwanted dependency may be handled by:
///
/// 1. Allocating the same register for an MI def and use. That makes the
/// unwanted dependency identical to a required dependency.
///
/// 2. Allocating a register for the def that has no defs in the previous N
/// instructions.
///
/// 3. Calling breakPartialRegDependency() with the same arguments. This
/// allows the target to insert a dependency breaking instruction.
///
virtual unsigned
getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no partial register dependency.
return 0;
}
/// \brief Return the minimum clearance before an instruction that reads an
/// unused register.
///
/// For example, AVX instructions may copy part of a register operand into
/// the unused high bits of the destination register.
///
/// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
///
/// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
/// false dependence on any previous write to %xmm0.
///
/// This hook works similarly to getPartialRegUpdateClearance, except that it
/// does not take an operand index. Instead sets \p OpNum to the index of the
/// unused register.
virtual unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no undef register dependency.
return 0;
}
/// Insert a dependency-breaking instruction
/// before MI to eliminate an unwanted dependency on OpNum.
///
/// If it wasn't possible to avoid a def in the last N instructions before MI
/// (see getPartialRegUpdateClearance), this hook will be called to break the
/// unwanted dependency.
///
/// On x86, an xorps instruction can be used as a dependency breaker:
///
/// addps %xmm1, %xmm0
/// movaps %xmm0, (%rax)
/// xorps %xmm0, %xmm0
/// cvtsi2ss %rbx, %xmm0
///
/// An <imp-kill> operand should be added to MI if an instruction was
/// inserted. This ties the instructions together in the post-ra scheduler.
///
virtual void
breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {}
/// Create machine specific model for scheduling.
virtual DFAPacketizer *
CreateTargetScheduleState(const TargetSubtargetInfo &) const {
return nullptr;
}
// Sometimes, it is possible for the target
// to tell, even without aliasing information, that two MIs access different
// memory addresses. This function returns true if two MIs access different
// memory addresses and false otherwise.
virtual bool
areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
AliasAnalysis *AA = nullptr) const {
assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
"MIa must load from or modify a memory location");
assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
"MIb must load from or modify a memory location");
return false;
}
/// \brief Return the value to use for the MachineCSE's LookAheadLimit,
/// which is a heuristic used for CSE'ing phys reg defs.
virtual unsigned getMachineCSELookAheadLimit () const {
// The default lookahead is small to prevent unprofitable quadratic
// behavior.
return 5;
}
private:
unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetCallingConv.h | //===-- llvm/Target/TargetCallingConv.h - Calling Convention ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines types for working with calling-convention information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
#define LLVM_TARGET_TARGETCALLINGCONV_H
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include <string>
#include <limits.h>
namespace llvm {
namespace ISD {
struct ArgFlagsTy {
private:
static const uint64_t NoFlagSet = 0ULL;
static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
static const uint64_t ZExtOffs = 0;
static const uint64_t SExt = 1ULL<<1; ///< Sign extended
static const uint64_t SExtOffs = 1;
static const uint64_t InReg = 1ULL<<2; ///< Passed in register
static const uint64_t InRegOffs = 2;
static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
static const uint64_t SRetOffs = 3;
static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
static const uint64_t ByValOffs = 4;
static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
static const uint64_t NestOffs = 5;
static const uint64_t Returned = 1ULL<<6; ///< Always returned
static const uint64_t ReturnedOffs = 6;
static const uint64_t ByValAlign = 0xFULL<<7; ///< Struct alignment
static const uint64_t ByValAlignOffs = 7;
static const uint64_t Split = 1ULL<<11;
static const uint64_t SplitOffs = 11;
static const uint64_t InAlloca = 1ULL<<12; ///< Passed with inalloca
static const uint64_t InAllocaOffs = 12;
static const uint64_t OrigAlign = 0x1FULL<<27;
static const uint64_t OrigAlignOffs = 27;
static const uint64_t ByValSize = 0x3fffffffULL<<32; ///< Struct size
static const uint64_t ByValSizeOffs = 32;
static const uint64_t InConsecutiveRegsLast = 0x1ULL<<62; ///< Struct size
static const uint64_t InConsecutiveRegsLastOffs = 62;
static const uint64_t InConsecutiveRegs = 0x1ULL<<63; ///< Struct size
static const uint64_t InConsecutiveRegsOffs = 63;
static const uint64_t One = 1ULL; ///< 1 of this type, for shifts
uint64_t Flags;
public:
ArgFlagsTy() : Flags(0) { }
bool isZExt() const { return Flags & ZExt; }
void setZExt() { Flags |= One << ZExtOffs; }
bool isSExt() const { return Flags & SExt; }
void setSExt() { Flags |= One << SExtOffs; }
bool isInReg() const { return Flags & InReg; }
void setInReg() { Flags |= One << InRegOffs; }
bool isSRet() const { return Flags & SRet; }
void setSRet() { Flags |= One << SRetOffs; }
bool isByVal() const { return Flags & ByVal; }
void setByVal() { Flags |= One << ByValOffs; }
bool isInAlloca() const { return Flags & InAlloca; }
void setInAlloca() { Flags |= One << InAllocaOffs; }
bool isNest() const { return Flags & Nest; }
void setNest() { Flags |= One << NestOffs; }
bool isReturned() const { return Flags & Returned; }
void setReturned() { Flags |= One << ReturnedOffs; }
bool isInConsecutiveRegs() const { return Flags & InConsecutiveRegs; }
void setInConsecutiveRegs() { Flags |= One << InConsecutiveRegsOffs; }
bool isInConsecutiveRegsLast() const { return Flags & InConsecutiveRegsLast; }
void setInConsecutiveRegsLast() { Flags |= One << InConsecutiveRegsLastOffs; }
unsigned getByValAlign() const {
return (unsigned)
((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
}
void setByValAlign(unsigned A) {
Flags = (Flags & ~ByValAlign) |
(uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
}
bool isSplit() const { return Flags & Split; }
void setSplit() { Flags |= One << SplitOffs; }
unsigned getOrigAlign() const {
return (unsigned)
((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
}
void setOrigAlign(unsigned A) {
Flags = (Flags & ~OrigAlign) |
(uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
}
unsigned getByValSize() const {
return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
}
void setByValSize(unsigned S) {
Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
}
/// getRawBits - Represent the flags as a bunch of bits.
uint64_t getRawBits() const { return Flags; }
};
/// InputArg - This struct carries flags and type information about a
/// single incoming (formal) argument or incoming (from the perspective
/// of the caller) return value virtual register.
///
struct InputArg {
ArgFlagsTy Flags;
MVT VT;
EVT ArgVT;
bool Used;
/// Index original Function's argument.
unsigned OrigArgIndex;
/// Sentinel value for implicit machine-level input arguments.
static const unsigned NoArgIndex = UINT_MAX;
/// Offset in bytes of current input value relative to the beginning of
/// original argument. E.g. if argument was splitted into four 32 bit
/// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
unsigned PartOffset;
InputArg() : VT(MVT::Other), Used(false) {}
InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used,
unsigned origIdx, unsigned partOffs)
: Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
VT = vt.getSimpleVT();
ArgVT = argvt;
}
bool isOrigArg() const {
return OrigArgIndex != NoArgIndex;
}
unsigned getOrigArgIndex() const {
assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
return OrigArgIndex;
}
};
/// OutputArg - This struct carries flags and a value for a
/// single outgoing (actual) argument or outgoing (from the perspective
/// of the caller) return value virtual register.
///
struct OutputArg {
ArgFlagsTy Flags;
MVT VT;
EVT ArgVT;
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
bool IsFixed;
/// Index original Function's argument.
unsigned OrigArgIndex;
/// Offset in bytes of current output value relative to the beginning of
/// original argument. E.g. if argument was splitted into four 32 bit
/// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
unsigned PartOffset;
OutputArg() : IsFixed(false) {}
OutputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool isfixed,
unsigned origIdx, unsigned partOffs)
: Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
PartOffset(partOffs) {
VT = vt.getSimpleVT();
ArgVT = argvt;
}
};
}
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetOptions.h | //===-- llvm/Target/TargetOptions.h - Target Options ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines command line option flags that are shared across various
// targets.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETOPTIONS_H
#define LLVM_TARGET_TARGETOPTIONS_H
#include "llvm/Target/TargetRecip.h"
#include "llvm/MC/MCTargetOptions.h"
#include <string>
namespace llvm {
class MachineFunction;
class Module;
class StringRef;
namespace FloatABI {
enum ABIType {
Default, // Target-specific (either soft or hard depending on triple, etc).
Soft, // Soft float.
Hard // Hard float.
};
}
namespace FPOpFusion {
enum FPOpFusionMode {
Fast, // Enable fusion of FP ops wherever it's profitable.
Standard, // Only allow fusion of 'blessed' ops (currently just fmuladd).
Strict // Never fuse FP-ops.
};
}
namespace JumpTable {
enum JumpTableType {
Single, // Use a single table for all indirect jumptable calls.
Arity, // Use one table per number of function parameters.
Simplified, // Use one table per function type, with types projected
// into 4 types: pointer to non-function, struct,
// primitive, and function pointer.
Full // Use one table per unique function type
};
}
namespace ThreadModel {
enum Model {
POSIX, // POSIX Threads
Single // Single Threaded Environment
};
}
class TargetOptions {
public:
TargetOptions()
: PrintMachineCode(false),
LessPreciseFPMADOption(false), UnsafeFPMath(false),
NoInfsFPMath(false), NoNaNsFPMath(false),
HonorSignDependentRoundingFPMathOption(false),
NoZerosInBSS(false),
GuaranteedTailCallOpt(false),
StackAlignmentOverride(0),
EnableFastISel(false), PositionIndependentExecutable(false),
UseInitArray(false), DisableIntegratedAS(false),
CompressDebugSections(false), FunctionSections(false),
DataSections(false), UniqueSectionNames(true), TrapUnreachable(false),
FloatABIType(FloatABI::Default),
AllowFPOpFusion(FPOpFusion::Standard), Reciprocals(TargetRecip()),
JTType(JumpTable::Single),
ThreadModel(ThreadModel::POSIX) {}
/// PrintMachineCode - This flag is enabled when the -print-machineinstrs
/// option is specified on the command line, and should enable debugging
/// output from the code generator.
unsigned PrintMachineCode : 1;
/// DisableFramePointerElim - This returns true if frame pointer elimination
/// optimization should be disabled for the given machine function.
bool DisableFramePointerElim(const MachineFunction &MF) const;
/// LessPreciseFPMAD - This flag is enabled when the
/// -enable-fp-mad is specified on the command line. When this flag is off
/// (the default), the code generator is not allowed to generate mad
/// (multiply add) if the result is "less precise" than doing those
/// operations individually.
unsigned LessPreciseFPMADOption : 1;
bool LessPreciseFPMAD() const;
/// UnsafeFPMath - This flag is enabled when the
/// -enable-unsafe-fp-math flag is specified on the command line. When
/// this flag is off (the default), the code generator is not allowed to
/// produce results that are "less precise" than IEEE allows. This includes
/// use of X86 instructions like FSIN and FCOS instead of libcalls.
/// UnsafeFPMath implies LessPreciseFPMAD.
unsigned UnsafeFPMath : 1;
/// NoInfsFPMath - This flag is enabled when the
/// -enable-no-infs-fp-math flag is specified on the command line. When
/// this flag is off (the default), the code generator is not allowed to
/// assume the FP arithmetic arguments and results are never +-Infs.
unsigned NoInfsFPMath : 1;
/// NoNaNsFPMath - This flag is enabled when the
/// -enable-no-nans-fp-math flag is specified on the command line. When
/// this flag is off (the default), the code generator is not allowed to
/// assume the FP arithmetic arguments and results are never NaNs.
unsigned NoNaNsFPMath : 1;
/// HonorSignDependentRoundingFPMath - This returns true when the
/// -enable-sign-dependent-rounding-fp-math is specified. If this returns
/// false (the default), the code generator is allowed to assume that the
/// rounding behavior is the default (round-to-zero for all floating point
/// to integer conversions, and round-to-nearest for all other arithmetic
/// truncations). If this is enabled (set to true), the code generator must
/// assume that the rounding mode may dynamically change.
unsigned HonorSignDependentRoundingFPMathOption : 1;
bool HonorSignDependentRoundingFPMath() const;
/// NoZerosInBSS - By default some codegens place zero-initialized data to
/// .bss section. This flag disables such behaviour (necessary, e.g. for
/// crt*.o compiling).
unsigned NoZerosInBSS : 1;
/// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
/// specified on the commandline. When the flag is on, participating targets
/// will perform tail call optimization on all calls which use the fastcc
/// calling convention and which satisfy certain target-independent
/// criteria (being at the end of a function, having the same return type
/// as their parent function, etc.), using an alternate ABI if necessary.
unsigned GuaranteedTailCallOpt : 1;
/// StackAlignmentOverride - Override default stack alignment for target.
unsigned StackAlignmentOverride;
/// EnableFastISel - This flag enables fast-path instruction selection
/// which trades away generated code quality in favor of reducing
/// compile time.
unsigned EnableFastISel : 1;
/// PositionIndependentExecutable - This flag indicates whether the code
/// will eventually be linked into a single executable, despite the PIC
/// relocation model being in use. It's value is undefined (and irrelevant)
/// if the relocation model is anything other than PIC.
unsigned PositionIndependentExecutable : 1;
/// UseInitArray - Use .init_array instead of .ctors for static
/// constructors.
unsigned UseInitArray : 1;
/// Disable the integrated assembler.
unsigned DisableIntegratedAS : 1;
/// Compress DWARF debug sections.
unsigned CompressDebugSections : 1;
/// Emit functions into separate sections.
unsigned FunctionSections : 1;
/// Emit data into separate sections.
unsigned DataSections : 1;
unsigned UniqueSectionNames : 1;
/// Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned TrapUnreachable : 1;
/// FloatABIType - This setting is set by -float-abi=xxx option is specfied
/// on the command line. This setting may either be Default, Soft, or Hard.
/// Default selects the target's default behavior. Soft selects the ABI for
/// software floating point, but does not indicate that FP hardware may not
/// be used. Such a combination is unfortunately popular (e.g.
/// arm-apple-darwin). Hard presumes that the normal FP ABI is used.
FloatABI::ABIType FloatABIType;
/// AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
/// This controls the creation of fused FP ops that store intermediate
/// results in higher precision than IEEE allows (E.g. FMAs).
///
/// Fast mode - allows formation of fused FP ops whenever they're
/// profitable.
/// Standard mode - allow fusion only for 'blessed' FP ops. At present the
/// only blessed op is the fmuladd intrinsic. In the future more blessed ops
/// may be added.
/// Strict mode - allow fusion only if/when it can be proven that the excess
/// precision won't effect the result.
///
/// Note: This option only controls formation of fused ops by the
/// optimizers. Fused operations that are explicitly specified (e.g. FMA
/// via the llvm.fma.* intrinsic) will always be honored, regardless of
/// the value of this option.
FPOpFusion::FPOpFusionMode AllowFPOpFusion;
/// This class encapsulates options for reciprocal-estimate code generation.
TargetRecip Reciprocals;
/// JTType - This flag specifies the type of jump-instruction table to
/// create for functions that have the jumptable attribute.
JumpTable::JumpTableType JTType;
/// ThreadModel - This flag specifies the type of threading model to assume
/// for things like atomics
ThreadModel::Model ThreadModel;
/// Machine level options.
MCTargetOptions MCOptions;
};
// Comparison operators:
inline bool operator==(const TargetOptions &LHS,
const TargetOptions &RHS) {
#define ARE_EQUAL(X) LHS.X == RHS.X
return
ARE_EQUAL(UnsafeFPMath) &&
ARE_EQUAL(NoInfsFPMath) &&
ARE_EQUAL(NoNaNsFPMath) &&
ARE_EQUAL(HonorSignDependentRoundingFPMathOption) &&
ARE_EQUAL(NoZerosInBSS) &&
ARE_EQUAL(GuaranteedTailCallOpt) &&
ARE_EQUAL(StackAlignmentOverride) &&
ARE_EQUAL(EnableFastISel) &&
ARE_EQUAL(PositionIndependentExecutable) &&
ARE_EQUAL(UseInitArray) &&
ARE_EQUAL(TrapUnreachable) &&
ARE_EQUAL(FloatABIType) &&
ARE_EQUAL(AllowFPOpFusion) &&
ARE_EQUAL(Reciprocals) &&
ARE_EQUAL(JTType) &&
ARE_EQUAL(ThreadModel) &&
ARE_EQUAL(MCOptions);
#undef ARE_EQUAL
}
inline bool operator!=(const TargetOptions &LHS,
const TargetOptions &RHS) {
return !(LHS == RHS);
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetSelectionDAGInfo.h | //==-- llvm/Target/TargetSelectionDAGInfo.h - SelectionDAG Info --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the TargetSelectionDAGInfo class, which targets can
// subclass to parameterize the SelectionDAG lowering and instruction
// selection process.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETSELECTIONDAGINFO_H
#define LLVM_TARGET_TARGETSELECTIONDAGINFO_H
#include "llvm/CodeGen/SelectionDAGNodes.h"
namespace llvm {
// //
///////////////////////////////////////////////////////////////////////////////
/// TargetSelectionDAGInfo - Targets can subclass this to parameterize the
/// SelectionDAG lowering and instruction selection process.
///
class TargetSelectionDAGInfo {
TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) = delete;
void operator=(const TargetSelectionDAGInfo &) = delete;
public:
explicit TargetSelectionDAGInfo() = default;
virtual ~TargetSelectionDAGInfo();
/// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
/// memcpy. This can be used by targets to provide code sequences for cases
/// that don't fit the target's parameters for simple loads/stores and can be
/// more efficient than using a library call. This function can return a null
/// SDValue if the target declines to use custom code and a different
/// lowering strategy should be used.
///
/// If AlwaysInline is true, the size is constant and the target should not
/// emit any calls and is strongly encouraged to attempt to emit inline code
/// even if it is beyond the usual threshold because this intrinsic is being
/// expanded in a place where calls are not feasible (e.g. within the prologue
/// for another call). If the target chooses to decline an AlwaysInline
/// request here, legalize will resort to using simple loads and stores.
virtual SDValue
EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
/// EmitTargetCodeForMemmove - Emit target-specific code that performs a
/// memmove. This can be used by targets to provide code sequences for cases
/// that don't fit the target's parameters for simple loads/stores and can be
/// more efficient than using a library call. This function can return a null
/// SDValue if the target declines to use custom code and a different
/// lowering strategy should be used.
virtual SDValue
EmitTargetCodeForMemmove(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const {
return SDValue();
}
/// EmitTargetCodeForMemset - Emit target-specific code that performs a
/// memset. This can be used by targets to provide code sequences for cases
/// that don't fit the target's parameters for simple stores and can be more
/// efficient than using a library call. This function can return a null
/// SDValue if the target declines to use custom code and a different
/// lowering strategy should be used.
virtual SDValue
EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const {
return SDValue();
}
/// EmitTargetCodeForMemcmp - Emit target-specific code that performs a
/// memcmp, in cases where that is faster than a libcall. The first
/// returned SDValue is the result of the memcmp and the second is
/// the chain. Both SDValues can be null if a normal libcall should
/// be used.
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
SDValue Op3, MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
return std::make_pair(SDValue(), SDValue());
}
/// EmitTargetCodeForMemchr - Emit target-specific code that performs a
/// memchr, in cases where that is faster than a libcall. The first
/// returned SDValue is the result of the memchr and the second is
/// the chain. Both SDValues can be null if a normal libcall should
/// be used.
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
SDValue Src, SDValue Char, SDValue Length,
MachinePointerInfo SrcPtrInfo) const {
return std::make_pair(SDValue(), SDValue());
}
/// EmitTargetCodeForStrcpy - Emit target-specific code that performs a
/// strcpy or stpcpy, in cases where that is faster than a libcall.
/// The first returned SDValue is the result of the copy (the start
/// of the destination string for strcpy, a pointer to the null terminator
/// for stpcpy) and the second is the chain. Both SDValues can be null
/// if a normal libcall should be used.
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Dest, SDValue Src,
MachinePointerInfo DestPtrInfo,
MachinePointerInfo SrcPtrInfo,
bool isStpcpy) const {
return std::make_pair(SDValue(), SDValue());
}
/// EmitTargetCodeForStrcmp - Emit target-specific code that performs a
/// strcmp, in cases where that is faster than a libcall. The first
/// returned SDValue is the result of the strcmp and the second is
/// the chain. Both SDValues can be null if a normal libcall should
/// be used.
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Op1, SDValue Op2,
MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
return std::make_pair(SDValue(), SDValue());
}
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src, MachinePointerInfo SrcPtrInfo) const {
return std::make_pair(SDValue(), SDValue());
}
virtual std::pair<SDValue, SDValue>
EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src, SDValue MaxLength,
MachinePointerInfo SrcPtrInfo) const {
return std::make_pair(SDValue(), SDValue());
}
};
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/CostTable.h | //===-- CostTable.h - Instruction Cost Table handling -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Cost tables and simple lookup functions
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_COSTTABLE_H_
#define LLVM_TARGET_COSTTABLE_H_
namespace llvm {
/// Cost Table Entry
template <class TypeTy>
struct CostTblEntry {
int ISD;
TypeTy Type;
unsigned Cost;
};
/// Find in cost table, TypeTy must be comparable to CompareTy by ==
template <class TypeTy, class CompareTy>
int CostTableLookup(const CostTblEntry<TypeTy> *Tbl, unsigned len, int ISD,
CompareTy Ty) {
for (unsigned int i = 0; i < len; ++i)
if (ISD == Tbl[i].ISD && Ty == Tbl[i].Type)
return i;
// Could not find an entry.
return -1;
}
/// Find in cost table, TypeTy must be comparable to CompareTy by ==
template <class TypeTy, class CompareTy, unsigned N>
int CostTableLookup(const CostTblEntry<TypeTy>(&Tbl)[N], int ISD,
CompareTy Ty) {
return CostTableLookup(Tbl, N, ISD, Ty);
}
/// Type Conversion Cost Table
template <class TypeTy>
struct TypeConversionCostTblEntry {
int ISD;
TypeTy Dst;
TypeTy Src;
unsigned Cost;
};
/// Find in type conversion cost table, TypeTy must be comparable to CompareTy
/// by ==
template <class TypeTy, class CompareTy>
int ConvertCostTableLookup(const TypeConversionCostTblEntry<TypeTy> *Tbl,
unsigned len, int ISD, CompareTy Dst,
CompareTy Src) {
for (unsigned int i = 0; i < len; ++i)
if (ISD == Tbl[i].ISD && Src == Tbl[i].Src && Dst == Tbl[i].Dst)
return i;
// Could not find an entry.
return -1;
}
/// Find in type conversion cost table, TypeTy must be comparable to CompareTy
/// by ==
template <class TypeTy, class CompareTy, unsigned N>
int ConvertCostTableLookup(const TypeConversionCostTblEntry<TypeTy>(&Tbl)[N],
int ISD, CompareTy Dst, CompareTy Src) {
return ConvertCostTableLookup(Tbl, N, ISD, Dst, Src);
}
} // namespace llvm
#endif /* LLVM_TARGET_COSTTABLE_H_ */
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetRecip.h | //===--------------------- llvm/Target/TargetRecip.h ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class is used to customize machine-specific reciprocal estimate code
// generation in a target-independent way.
// If a target does not support operations in this specification, then code
// generation will default to using supported operations.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETRECIP_H
#define LLVM_TARGET_TARGETRECIP_H
#include "llvm/ADT/StringRef.h"
#include <vector>
#include <string>
#include <map>
namespace llvm {
struct TargetRecip {
public:
TargetRecip();
/// Initialize all or part of the operations from command-line options or
/// a front end.
TargetRecip(const std::vector<std::string> &Args);
/// Set whether a particular reciprocal operation is enabled and how many
/// refinement steps are needed when using it. Use "all" to set enablement
/// and refinement steps for all operations.
void setDefaults(const StringRef &Key, bool Enable, unsigned RefSteps);
/// Return true if the reciprocal operation has been enabled by default or
/// from the command-line. Return false if the operation has been disabled
/// by default or from the command-line.
bool isEnabled(const StringRef &Key) const;
/// Return the number of iterations necessary to refine the
/// the result of a machine instruction for the given reciprocal operation.
unsigned getRefinementSteps(const StringRef &Key) const;
bool operator==(const TargetRecip &Other) const;
private:
enum {
Uninitialized = -1
};
struct RecipParams {
int8_t Enabled;
int8_t RefinementSteps;
RecipParams() : Enabled(Uninitialized), RefinementSteps(Uninitialized) {}
};
std::map<StringRef, RecipParams> RecipMap;
typedef std::map<StringRef, RecipParams>::iterator RecipIter;
typedef std::map<StringRef, RecipParams>::const_iterator ConstRecipIter;
bool parseGlobalParams(const std::string &Arg);
void parseIndividualParams(const std::vector<std::string> &Args);
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetRegisterInfo.h | //=== Target/TargetRegisterInfo.h - Target Register Information -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes an abstract interface used to get information about a
// target machines register file. This information is used for a variety of
// purposed, especially register allocation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETREGISTERINFO_H
#define LLVM_TARGET_TARGETREGISTERINFO_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/MC/MCRegisterInfo.h"
#include <cassert>
#include <functional>
namespace llvm {
class BitVector;
class MachineFunction;
class RegScavenger;
template<class T> class SmallVectorImpl;
class VirtRegMap;
class raw_ostream;
class TargetRegisterClass {
public:
typedef const MCPhysReg* iterator;
typedef const MCPhysReg* const_iterator;
typedef const MVT::SimpleValueType* vt_iterator;
typedef const TargetRegisterClass* const * sc_iterator;
// Instance variables filled by tablegen, do not use!
const MCRegisterClass *MC;
const vt_iterator VTs;
const uint32_t *SubClassMask;
const uint16_t *SuperRegIndices;
const unsigned LaneMask;
/// Classes with a higher priority value are assigned first by register
/// allocators using a greedy heuristic. The value is in the range [0,63].
const uint8_t AllocationPriority;
/// Whether the class supports two (or more) disjunct subregister indices.
const bool HasDisjunctSubRegs;
const sc_iterator SuperClasses;
ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
/// getID() - Return the register class ID number.
///
unsigned getID() const { return MC->getID(); }
/// begin/end - Return all of the registers in this class.
///
iterator begin() const { return MC->begin(); }
iterator end() const { return MC->end(); }
/// getNumRegs - Return the number of registers in this class.
///
unsigned getNumRegs() const { return MC->getNumRegs(); }
/// getRegister - Return the specified register in the class.
///
unsigned getRegister(unsigned i) const {
return MC->getRegister(i);
}
/// contains - Return true if the specified register is included in this
/// register class. This does not include virtual registers.
bool contains(unsigned Reg) const {
return MC->contains(Reg);
}
/// contains - Return true if both registers are in this class.
bool contains(unsigned Reg1, unsigned Reg2) const {
return MC->contains(Reg1, Reg2);
}
/// getSize - Return the size of the register in bytes, which is also the size
/// of a stack slot allocated to hold a spilled copy of this register.
unsigned getSize() const { return MC->getSize(); }
/// getAlignment - Return the minimum required alignment for a register of
/// this class.
unsigned getAlignment() const { return MC->getAlignment(); }
/// getCopyCost - Return the cost of copying a value between two registers in
/// this class. A negative number means the register class is very expensive
/// to copy e.g. status flag register classes.
int getCopyCost() const { return MC->getCopyCost(); }
/// isAllocatable - Return true if this register class may be used to create
/// virtual registers.
bool isAllocatable() const { return MC->isAllocatable(); }
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(MVT vt) const {
for(int i = 0; VTs[i] != MVT::Other; ++i)
if (MVT(VTs[i]) == vt)
return true;
return false;
}
/// vt_begin / vt_end - Loop over all of the value types that can be
/// represented by values in this register class.
vt_iterator vt_begin() const {
return VTs;
}
vt_iterator vt_end() const {
vt_iterator I = VTs;
while (*I != MVT::Other) ++I;
return I;
}
/// hasSubClass - return true if the specified TargetRegisterClass
/// is a proper sub-class of this TargetRegisterClass.
bool hasSubClass(const TargetRegisterClass *RC) const {
return RC != this && hasSubClassEq(RC);
}
/// hasSubClassEq - Returns true if RC is a sub-class of or equal to this
/// class.
bool hasSubClassEq(const TargetRegisterClass *RC) const {
unsigned ID = RC->getID();
return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
}
/// hasSuperClass - return true if the specified TargetRegisterClass is a
/// proper super-class of this TargetRegisterClass.
bool hasSuperClass(const TargetRegisterClass *RC) const {
return RC->hasSubClass(this);
}
/// hasSuperClassEq - Returns true if RC is a super-class of or equal to this
/// class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const {
return RC->hasSubClassEq(this);
}
/// getSubClassMask - Returns a bit vector of subclasses, including this one.
/// The vector is indexed by class IDs, see hasSubClassEq() above for how to
/// use it.
const uint32_t *getSubClassMask() const {
return SubClassMask;
}
/// getSuperRegIndices - Returns a 0-terminated list of sub-register indices
/// that project some super-register class into this register class. The list
/// has an entry for each Idx such that:
///
/// There exists SuperRC where:
/// For all Reg in SuperRC:
/// this->contains(Reg:Idx)
///
const uint16_t *getSuperRegIndices() const {
return SuperRegIndices;
}
/// getSuperClasses - Returns a NULL terminated list of super-classes. The
/// classes are ordered by ID which is also a topological ordering from large
/// to small classes. The list does NOT include the current class.
sc_iterator getSuperClasses() const {
return SuperClasses;
}
/// isASubClass - return true if this TargetRegisterClass is a subset
/// class of at least one other TargetRegisterClass.
bool isASubClass() const {
return SuperClasses[0] != nullptr;
}
/// getRawAllocationOrder - Returns the preferred order for allocating
/// registers from this register class in MF. The raw order comes directly
/// from the .td file and may include reserved registers that are not
/// allocatable. Register allocators should also make sure to allocate
/// callee-saved registers only after all the volatiles are used. The
/// RegisterClassInfo class provides filtered allocation orders with
/// callee-saved registers moved to the end.
///
/// The MachineFunction argument can be used to tune the allocatable
/// registers based on the characteristics of the function, subtarget, or
/// other criteria.
///
/// By default, this method returns all registers in the class.
///
ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
}
/// Returns the combination of all lane masks of register in this class.
/// The lane masks of the registers are the combination of all lane masks
/// of their subregisters.
unsigned getLaneMask() const {
return LaneMask;
}
};
/// TargetRegisterInfoDesc - Extra information, not in MCRegisterDesc, about
/// registers. These are used by codegen, not by MC.
struct TargetRegisterInfoDesc {
unsigned CostPerUse; // Extra cost of instructions using register.
bool inAllocatableClass; // Register belongs to an allocatable regclass.
};
/// Each TargetRegisterClass has a per register weight, and weight
/// limit which must be less than the limits of its pressure sets.
struct RegClassWeight {
unsigned RegWeight;
unsigned WeightLimit;
};
/// TargetRegisterInfo base class - We assume that the target defines a static
/// array of TargetRegisterDesc objects that represent all of the machine
/// registers that the target has. As such, we simply have to track a pointer
/// to this array so that we can turn register number into a register
/// descriptor.
///
class TargetRegisterInfo : public MCRegisterInfo {
public:
typedef const TargetRegisterClass * const * regclass_iterator;
private:
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
const char *const *SubRegIndexNames; // Names of subreg indexes.
// Pointer to array of lane masks, one per sub-reg index.
const unsigned *SubRegIndexLaneMasks;
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
unsigned CoveringLanes;
protected:
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
regclass_iterator RegClassBegin,
regclass_iterator RegClassEnd,
const char *const *SRINames,
const unsigned *SRILaneMasks,
unsigned CoveringLanes);
virtual ~TargetRegisterInfo();
public:
// Register numbers can represent physical registers, virtual registers, and
// sometimes stack slots. The unsigned values are divided into these ranges:
//
// 0 Not a register, can be used as a sentinel.
// [1;2^30) Physical registers assigned by TableGen.
// [2^30;2^31) Stack slots. (Rarely used.)
// [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
//
// Further sentinels can be allocated from the small negative integers.
// DenseMapInfo<unsigned> uses -1u and -2u.
/// isStackSlot - Sometimes it is useful the be able to store a non-negative
/// frame index in a variable that normally holds a register. isStackSlot()
/// returns true if Reg is in the range used for stack slots.
///
/// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
/// slots, so if a variable may contains a stack slot, always check
/// isStackSlot() first.
///
static bool isStackSlot(unsigned Reg) {
return int(Reg) >= (1 << 30);
}
/// stackSlot2Index - Compute the frame index from a register value
/// representing a stack slot.
static int stackSlot2Index(unsigned Reg) {
assert(isStackSlot(Reg) && "Not a stack slot");
return int(Reg - (1u << 30));
}
/// index2StackSlot - Convert a non-negative frame index to a stack slot
/// register value.
static unsigned index2StackSlot(int FI) {
assert(FI >= 0 && "Cannot hold a negative frame index.");
return FI + (1u << 30);
}
/// isPhysicalRegister - Return true if the specified register number is in
/// the physical register namespace.
static bool isPhysicalRegister(unsigned Reg) {
assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
return int(Reg) > 0;
}
/// isVirtualRegister - Return true if the specified register number is in
/// the virtual register namespace.
static bool isVirtualRegister(unsigned Reg) {
assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
return int(Reg) < 0;
}
/// virtReg2Index - Convert a virtual register number to a 0-based index.
/// The first virtual register in a function will get the index 0.
static unsigned virtReg2Index(unsigned Reg) {
assert(isVirtualRegister(Reg) && "Not a virtual register");
return Reg & ~(1u << 31);
}
/// index2VirtReg - Convert a 0-based index to a virtual register number.
/// This is the inverse operation of VirtReg2IndexFunctor below.
static unsigned index2VirtReg(unsigned Index) {
return Index | (1u << 31);
}
/// getMinimalPhysRegClass - Returns the Register Class of a physical
/// register of the given type, picking the most sub register class of
/// the right type that contains this physreg.
const TargetRegisterClass *
getMinimalPhysRegClass(unsigned Reg, MVT VT = MVT::Other) const;
/// getAllocatableClass - Return the maximal subclass of the given register
/// class that is alloctable, or NULL.
const TargetRegisterClass *
getAllocatableClass(const TargetRegisterClass *RC) const;
/// getAllocatableSet - Returns a bitset indexed by register number
/// indicating if a register is allocatable or not. If a register class is
/// specified, returns the subset for the class.
BitVector getAllocatableSet(const MachineFunction &MF,
const TargetRegisterClass *RC = nullptr) const;
/// getCostPerUse - Return the additional cost of using this register instead
/// of other registers in its class.
unsigned getCostPerUse(unsigned RegNo) const {
return InfoDesc[RegNo].CostPerUse;
}
/// isInAllocatableClass - Return true if the register is in the allocation
/// of any register class.
bool isInAllocatableClass(unsigned RegNo) const {
return InfoDesc[RegNo].inAllocatableClass;
}
/// getSubRegIndexName - Return the human-readable symbolic target-specific
/// name for the specified SubRegIndex.
const char *getSubRegIndexName(unsigned SubIdx) const {
assert(SubIdx && SubIdx < getNumSubRegIndices() &&
"This is not a subregister index");
return SubRegIndexNames[SubIdx-1];
}
/// getSubRegIndexLaneMask - Return a bitmask representing the parts of a
/// register that are covered by SubIdx.
///
/// Lane masks for sub-register indices are similar to register units for
/// physical registers. The individual bits in a lane mask can't be assigned
/// any specific meaning. They can be used to check if two sub-register
/// indices overlap.
///
/// If the target has a register such that:
///
/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
///
/// then:
///
/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
///
/// The converse is not necessarily true. If two lane masks have a common
/// bit, the corresponding sub-registers may not overlap, but it can be
/// assumed that they usually will.
/// SubIdx == 0 is allowed, it has the lane mask ~0u.
unsigned getSubRegIndexLaneMask(unsigned SubIdx) const {
assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
return SubRegIndexLaneMasks[SubIdx];
}
/// Returns true if the given lane mask is imprecise.
///
/// LaneMasks as given by getSubRegIndexLaneMask() have a limited number of
/// bits, so for targets with more than 31 disjunct subregister indices there
/// may be cases where:
/// getSubReg(Reg,A) does not overlap getSubReg(Reg,B)
/// but we still have
/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0.
/// This function returns true in those cases.
static bool isImpreciseLaneMask(unsigned LaneMask) {
return LaneMask & 0x80000000u;
}
/// The lane masks returned by getSubRegIndexLaneMask() above can only be
/// used to determine if sub-registers overlap - they can't be used to
/// determine if a set of sub-registers completely cover another
/// sub-register.
///
/// The X86 general purpose registers have two lanes corresponding to the
/// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
/// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
/// sub_32bit sub-register.
///
/// On the other hand, the ARM NEON lanes fully cover their registers: The
/// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
/// This is related to the CoveredBySubRegs property on register definitions.
///
/// This function returns a bit mask of lanes that completely cover their
/// sub-registers. More precisely, given:
///
/// Covering = getCoveringLanes();
/// MaskA = getSubRegIndexLaneMask(SubA);
/// MaskB = getSubRegIndexLaneMask(SubB);
///
/// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
/// SubB.
unsigned getCoveringLanes() const { return CoveringLanes; }
/// regsOverlap - Returns true if the two registers are equal or alias each
/// other. The registers may be virtual register.
bool regsOverlap(unsigned regA, unsigned regB) const {
if (regA == regB) return true;
if (isVirtualRegister(regA) || isVirtualRegister(regB))
return false;
// Regunits are numerically ordered. Find a common unit.
MCRegUnitIterator RUA(regA, this);
MCRegUnitIterator RUB(regB, this);
do {
if (*RUA == *RUB) return true;
if (*RUA < *RUB) ++RUA;
else ++RUB;
} while (RUA.isValid() && RUB.isValid());
return false;
}
/// hasRegUnit - Returns true if Reg contains RegUnit.
bool hasRegUnit(unsigned Reg, unsigned RegUnit) const {
for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
if (*Units == RegUnit)
return true;
return false;
}
/// getCalleeSavedRegs - Return a null-terminated list of all of the
/// callee saved registers on this target. The register should be in the
/// order of desired callee-save stack frame offset. The first register is
/// closest to the incoming stack pointer if stack grows down, and vice versa.
///
virtual const MCPhysReg*
getCalleeSavedRegs(const MachineFunction *MF) const = 0;
/// getCallPreservedMask - Return a mask of call-preserved registers for the
/// given calling convention on the current function. The mask should
/// include all call-preserved aliases. This is used by the register
/// allocator to determine which registers can be live across a call.
///
/// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
/// A set bit indicates that all bits of the corresponding register are
/// preserved across the function call. The bit mask is expected to be
/// sub-register complete, i.e. if A is preserved, so are all its
/// sub-registers.
///
/// Bits are numbered from the LSB, so the bit for physical register Reg can
/// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
///
/// A NULL pointer means that no register mask will be used, and call
/// instructions should use implicit-def operands to indicate call clobbered
/// registers.
///
virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID) const {
// The default mask clobbers everything. All targets should override.
return nullptr;
}
/// Return all the call-preserved register masks defined for this target.
virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
virtual ArrayRef<const char *> getRegMaskNames() const = 0;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses
/// and should be considered unavailable at all times, e.g. SP, RA. This is
/// used by register scavenger to determine what registers are free.
virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
/// Prior to adding the live-out mask to a stackmap or patchpoint
/// instruction, provide the target the opportunity to adjust it (mainly to
/// remove pseudo-registers that should be ignored).
virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const { }
/// getMatchingSuperReg - Return a super-register of the specified register
/// Reg so its sub-register of index SubIdx is Reg.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
const TargetRegisterClass *RC) const {
return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
}
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
///
/// TableGen will synthesize missing A sub-classes.
virtual const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B, unsigned Idx) const;
/// getSubClassWithSubReg - Returns the largest legal sub-class of RC that
/// supports the sub-register index Idx.
/// If no such sub-class exists, return NULL.
/// If all registers in RC already have an Idx sub-register, return RC.
///
/// TableGen generates a version of this function that is good enough in most
/// cases. Targets can override if they have constraints that TableGen
/// doesn't understand. For example, the x86 sub_8bit sub-register index is
/// supported by the full GR32 register class in 64-bit mode, but only by the
/// GR32_ABCD regiister class in 32-bit mode.
///
/// TableGen will synthesize missing RC sub-classes.
virtual const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
assert(Idx == 0 && "Target has no sub-registers");
return RC;
}
/// composeSubRegIndices - Return the subregister index you get from composing
/// two subregister indices.
///
/// The special null sub-register index composes as the identity.
///
/// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
/// returns c. Note that composeSubRegIndices does not tell you about illegal
/// compositions. If R does not have a subreg a, or R:a does not have a subreg
/// b, composeSubRegIndices doesn't tell you.
///
/// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
/// ssub_0:S0 - ssub_3:S3 subregs.
/// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
///
unsigned composeSubRegIndices(unsigned a, unsigned b) const {
if (!a) return b;
if (!b) return a;
return composeSubRegIndicesImpl(a, b);
}
/// Transforms a LaneMask computed for one subregister to the lanemask that
/// would have been computed when composing the subsubregisters with IdxA
/// first. @sa composeSubRegIndices()
unsigned composeSubRegIndexLaneMask(unsigned IdxA, unsigned LaneMask) const {
if (!IdxA)
return LaneMask;
return composeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
}
/// Debugging helper: dump register in human readable form to dbgs() stream.
static void dumpReg(unsigned Reg, unsigned SubRegIndex = 0,
const TargetRegisterInfo* TRI = nullptr);
protected:
/// Overridden by TableGen in targets that have sub-registers.
virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
llvm_unreachable("Target has no sub-registers");
}
/// Overridden by TableGen in targets that have sub-registers.
virtual unsigned
composeSubRegIndexLaneMaskImpl(unsigned, unsigned) const {
llvm_unreachable("Target has no sub-registers");
}
public:
/// getCommonSuperRegClass - Find a common super-register class if it exists.
///
/// Find a register class, SuperRC and two sub-register indices, PreA and
/// PreB, such that:
///
/// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and
///
/// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
///
/// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
///
/// SuperRC will be chosen such that no super-class of SuperRC satisfies the
/// requirements, and there is no register class with a smaller spill size
/// that satisfies the requirements.
///
/// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
///
/// Either of the PreA and PreB sub-register indices may be returned as 0. In
/// that case, the returned register class will be a sub-class of the
/// corresponding argument register class.
///
/// The function returns NULL if no register class can be found.
///
const TargetRegisterClass*
getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
const TargetRegisterClass *RCB, unsigned SubB,
unsigned &PreA, unsigned &PreB) const;
//===--------------------------------------------------------------------===//
// Register Class Information
//
/// Register class iterators
///
regclass_iterator regclass_begin() const { return RegClassBegin; }
regclass_iterator regclass_end() const { return RegClassEnd; }
unsigned getNumRegClasses() const {
return (unsigned)(regclass_end()-regclass_begin());
}
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class MCOperandInfo.
const TargetRegisterClass *getRegClass(unsigned i) const {
assert(i < getNumRegClasses() && "Register Class ID out of range");
return RegClassBegin[i];
}
/// getRegClassName - Returns the name of the register class.
const char *getRegClassName(const TargetRegisterClass *Class) const {
return MCRegisterInfo::getRegClassName(Class->MC);
}
/// getCommonSubClass - find the largest common subclass of A and B. Return
/// NULL if there is no common subclass.
const TargetRegisterClass *
getCommonSubClass(const TargetRegisterClass *A,
const TargetRegisterClass *B) const;
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
/// values. If a target supports multiple different pointer register classes,
/// kind specifies which one is indicated.
virtual const TargetRegisterClass *
getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
llvm_unreachable("Target didn't implement getPointerRegClass!");
}
/// getCrossCopyRegClass - Returns a legal register class to copy a register
/// in the specified class to or from. If it is possible to copy the register
/// directly without using a cross register class copy, return the specified
/// RC. Returns NULL if it is not possible to copy between a two registers of
/// the specified class.
virtual const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass *RC) const {
return RC;
}
/// getLargestLegalSuperClass - Returns the largest super class of RC that is
/// legal to use in the current sub-target and has the same spill size.
/// The returned register class can be used to create virtual registers which
/// means that all its registers can be copied and spilled.
virtual const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass *RC,
const MachineFunction &) const {
/// The default implementation is very conservative and doesn't allow the
/// register allocator to inflate register classes.
return RC;
}
/// getRegPressureLimit - Return the register pressure "high water mark" for
/// the specific register class. The scheduler is in high register pressure
/// mode (for the specific register class) if it goes over the limit.
///
/// Note: this is the old register pressure model that relies on a manually
/// specified representative register class per value type.
virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const {
return 0;
}
/// Get the weight in units of pressure for this register class.
virtual const RegClassWeight &getRegClassWeight(
const TargetRegisterClass *RC) const = 0;
/// Get the weight in units of pressure for this register unit.
virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
/// Get the number of dimensions of register pressure.
virtual unsigned getNumRegPressureSets() const = 0;
/// Get the name of this register unit pressure set.
virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
/// Get the register unit pressure limit for this dimension.
/// This limit must be adjusted dynamically for reserved registers.
virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
unsigned Idx) const = 0;
/// Get the dimensions of register pressure impacted by this register class.
/// Returns a -1 terminated array of pressure set IDs.
virtual const int *getRegClassPressureSets(
const TargetRegisterClass *RC) const = 0;
/// Get the dimensions of register pressure impacted by this register unit.
/// Returns a -1 terminated array of pressure set IDs.
virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
/// Get a list of 'hint' registers that the register allocator should try
/// first when allocating a physical register for the virtual register
/// VirtReg. These registers are effectively moved to the front of the
/// allocation order.
///
/// The Order argument is the allocation order for VirtReg's register class
/// as returned from RegisterClassInfo::getOrder(). The hint registers must
/// come from Order, and they must not be reserved.
///
/// The default implementation of this function can resolve
/// target-independent hints provided to MRI::setRegAllocationHint with
/// HintType == 0. Targets that override this function should defer to the
/// default implementation if they have no reason to change the allocation
/// order for VirtReg. There may be target-independent hints.
virtual void getRegAllocationHints(unsigned VirtReg,
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
const VirtRegMap *VRM = nullptr) const;
/// updateRegAllocHint - A callback to allow target a chance to update
/// register allocation hints when a register is "changed" (e.g. coalesced)
/// to another register. e.g. On ARM, some virtual registers should target
/// register pairs, if one of pair is coalesced to another register, the
/// allocation hint of the other half of the pair should be changed to point
/// to the new register.
virtual void updateRegAllocHint(unsigned Reg, unsigned NewReg,
MachineFunction &MF) const {
// Do nothing.
}
/// Allow the target to reverse allocation order of local live ranges. This
/// will generally allocate shorter local live ranges first. For targets with
/// many registers, this could reduce regalloc compile time by a large
/// factor. It is disabled by default for three reasons:
/// (1) Top-down allocation is simpler and easier to debug for targets that
/// don't benefit from reversing the order.
/// (2) Bottom-up allocation could result in poor evicition decisions on some
/// targets affecting the performance of compiled code.
/// (3) Bottom-up allocation is no longer guaranteed to optimally color.
virtual bool reverseLocalAssignment() const { return false; }
/// Allow the target to override the cost of using a callee-saved register for
/// the first time. Default value of 0 means we will use a callee-saved
/// register if it is available.
virtual unsigned getCSRFirstUseCost() const { return 0; }
/// requiresRegisterScavenging - returns true if the target requires (and can
/// make use of) the register scavenger.
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
return false;
}
/// useFPForScavengingIndex - returns true if the target wants to use
/// frame pointer based accesses to spill to the scavenger emergency spill
/// slot.
virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
return true;
}
/// requiresFrameIndexScavenging - returns true if the target requires post
/// PEI scavenging of registers for materializing frame index constants.
virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
return false;
}
/// requiresVirtualBaseRegisters - Returns true if the target wants the
/// LocalStackAllocation pass to be run and virtual base registers
/// used for more efficient stack access.
virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
return false;
}
/// hasReservedSpillSlot - Return true if target has reserved a spill slot in
/// the stack frame of the given function for the specified register. e.g. On
/// x86, if the frame register is required, the first fixed stack object is
/// reserved as its spill slot. This tells PEI not to create a new stack frame
/// object for the given register. It should be called only after
/// processFunctionBeforeCalleeSavedScan().
virtual bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
int &FrameIdx) const {
return false;
}
/// trackLivenessAfterRegAlloc - returns true if the live-ins should be tracked
/// after register allocation.
virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
return false;
}
/// needsStackRealignment - true if storage within the function requires the
/// stack pointer to be aligned more than the normal calling convention calls
/// for.
virtual bool needsStackRealignment(const MachineFunction &MF) const {
return false;
}
/// getFrameIndexInstrOffset - Get the offset from the referenced frame
/// index in the instruction, if there is one.
virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
int Idx) const {
return 0;
}
/// needsFrameBaseReg - Returns true if the instruction's frame index
/// reference would be better served by a base register other than FP
/// or SP. Used by LocalStackFrameAllocation to determine which frame index
/// references it should create new base registers for.
virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
return false;
}
/// materializeFrameBaseRegister - Insert defining instruction(s) for
/// BaseReg to be a pointer to FrameIdx before insertion point I.
virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
int64_t Offset) const {
llvm_unreachable("materializeFrameBaseRegister does not exist on this "
"target");
}
/// resolveFrameIndex - Resolve a frame index operand of an instruction
/// to reference the indicated base register plus offset instead.
virtual void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
int64_t Offset) const {
llvm_unreachable("resolveFrameIndex does not exist on this target");
}
/// isFrameOffsetLegal - Determine whether a given base register plus offset
/// immediate is encodable to resolve a frame index.
virtual bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
int64_t Offset) const {
llvm_unreachable("isFrameOffsetLegal does not exist on this target");
}
/// saveScavengerRegister - Spill the register so it can be used by the
/// register scavenger. Return true if the register was spilled, false
/// otherwise. If this function does not spill the register, the scavenger
/// will instead spill it to the emergency spill slot.
///
virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &UseMI,
const TargetRegisterClass *RC,
unsigned Reg) const {
return false;
}
/// eliminateFrameIndex - This method must be overriden to eliminate abstract
/// frame indices from instructions which may use them. The instruction
/// referenced by the iterator contains an MO_FrameIndex operand which must be
/// eliminated by this method. This method may modify or replace the
/// specified instruction, as long as it keeps the iterator pointing at the
/// finished product. SPAdj is the SP adjustment due to call frame setup
/// instruction. FIOperandNum is the FI operand number.
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = nullptr) const = 0;
//===--------------------------------------------------------------------===//
/// Subtarget Hooks
/// \brief SrcRC and DstRC will be morphed into NewRC if this returns true.
virtual bool shouldCoalesce(MachineInstr *MI,
const TargetRegisterClass *SrcRC,
unsigned SubReg,
const TargetRegisterClass *DstRC,
unsigned DstSubReg,
const TargetRegisterClass *NewRC) const
{ return true; }
//===--------------------------------------------------------------------===//
/// Debug information queries.
/// getFrameRegister - This method should return the register used as a base
/// for values allocated in the current stack frame.
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
};
//===----------------------------------------------------------------------===//
// SuperRegClassIterator
// //
///////////////////////////////////////////////////////////////////////////////
//
// Iterate over the possible super-registers for a given register class. The
// iterator will visit a list of pairs (Idx, Mask) corresponding to the
// possible classes of super-registers.
//
// Each bit mask will have at least one set bit, and each set bit in Mask
// corresponds to a SuperRC such that:
//
// For all Reg in SuperRC: Reg:Idx is in RC.
//
// The iterator can include (O, RC->getSubClassMask()) as the first entry which
// also satisfies the above requirement, assuming Reg:0 == Reg.
//
class SuperRegClassIterator {
const unsigned RCMaskWords;
unsigned SubReg;
const uint16_t *Idx;
const uint32_t *Mask;
public:
/// Create a SuperRegClassIterator that visits all the super-register classes
/// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
SuperRegClassIterator(const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
bool IncludeSelf = false)
: RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
SubReg(0),
Idx(RC->getSuperRegIndices()),
Mask(RC->getSubClassMask()) {
if (!IncludeSelf)
++*this;
}
/// Returns true if this iterator is still pointing at a valid entry.
bool isValid() const { return Idx; }
/// Returns the current sub-register index.
unsigned getSubReg() const { return SubReg; }
/// Returns the bit mask if register classes that getSubReg() projects into
/// RC.
const uint32_t *getMask() const { return Mask; }
/// Advance iterator to the next entry.
void operator++() {
assert(isValid() && "Cannot move iterator past end.");
Mask += RCMaskWords;
SubReg = *Idx++;
if (!SubReg)
Idx = nullptr;
}
};
// This is useful when building IndexedMaps keyed on virtual registers
struct VirtReg2IndexFunctor {
unsigned operator()(unsigned Reg) const {
return TargetRegisterInfo::virtReg2Index(Reg);
}
};
/// PrintReg - Helper class for printing registers on a raw_ostream.
/// Prints virtual and physical registers with or without a TRI instance.
///
/// The format is:
/// %noreg - NoRegister
/// %vreg5 - a virtual register.
/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
/// %EAX - a physical register
/// %physreg17 - a physical register when no TRI instance given.
///
/// Usage: OS << PrintReg(Reg, TRI) << '\n';
///
class PrintReg {
const TargetRegisterInfo *TRI;
unsigned Reg;
unsigned SubIdx;
public:
explicit PrintReg(unsigned reg, const TargetRegisterInfo *tri = nullptr,
unsigned subidx = 0)
: TRI(tri), Reg(reg), SubIdx(subidx) {}
void print(raw_ostream&) const;
};
static inline raw_ostream &operator<<(raw_ostream &OS, const PrintReg &PR) {
PR.print(OS);
return OS;
}
/// PrintRegUnit - Helper class for printing register units on a raw_ostream.
///
/// Register units are named after their root registers:
///
/// AL - Single root.
/// FP0~ST7 - Dual roots.
///
/// Usage: OS << PrintRegUnit(Unit, TRI) << '\n';
///
class PrintRegUnit {
protected:
const TargetRegisterInfo *TRI;
unsigned Unit;
public:
PrintRegUnit(unsigned unit, const TargetRegisterInfo *tri)
: TRI(tri), Unit(unit) {}
void print(raw_ostream&) const;
};
static inline raw_ostream &operator<<(raw_ostream &OS, const PrintRegUnit &PR) {
PR.print(OS);
return OS;
}
/// PrintVRegOrUnit - It is often convenient to track virtual registers and
/// physical register units in the same list.
class PrintVRegOrUnit : protected PrintRegUnit {
public:
PrintVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *tri)
: PrintRegUnit(VRegOrUnit, tri) {}
void print(raw_ostream&) const;
};
static inline raw_ostream &operator<<(raw_ostream &OS,
const PrintVRegOrUnit &PR) {
PR.print(OS);
return OS;
}
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetOpcodes.h | //===-- llvm/Target/TargetOpcodes.h - Target Indep Opcodes ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the target independent instruction opcodes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETOPCODES_H
#define LLVM_TARGET_TARGETOPCODES_H
namespace llvm {
/// Invariant opcodes: All instruction sets have these as their low opcodes.
///
/// Every instruction defined here must also appear in Target.td and the order
/// must be the same as in CodeGenTarget.cpp.
///
namespace TargetOpcode {
enum {
PHI = 0,
INLINEASM = 1,
CFI_INSTRUCTION = 2,
EH_LABEL = 3,
GC_LABEL = 4,
/// KILL - This instruction is a noop that is used only to adjust the
/// liveness of registers. This can be useful when dealing with
/// sub-registers.
KILL = 5,
/// EXTRACT_SUBREG - This instruction takes two operands: a register
/// that has subregisters, and a subregister index. It returns the
/// extracted subregister value. This is commonly used to implement
/// truncation operations on target architectures which support it.
EXTRACT_SUBREG = 6,
/// INSERT_SUBREG - This instruction takes three operands: a register that
/// has subregisters, a register providing an insert value, and a
/// subregister index. It returns the value of the first register with the
/// value of the second register inserted. The first register is often
/// defined by an IMPLICIT_DEF, because it is commonly used to implement
/// anyext operations on target architectures which support it.
INSERT_SUBREG = 7,
/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
IMPLICIT_DEF = 8,
/// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
/// the first operand is an immediate integer constant. This constant is
/// often zero, because it is commonly used to assert that the instruction
/// defining the register implicitly clears the high bits.
SUBREG_TO_REG = 9,
/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
/// register-to-register copy into a specific register class. This is only
/// used between instruction selection and MachineInstr creation, before
/// virtual registers have been created for all the instructions, and it's
/// only needed in cases where the register classes implied by the
/// instructions are insufficient. It is emitted as a COPY MachineInstr.
COPY_TO_REGCLASS = 10,
/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
DBG_VALUE = 11,
/// REG_SEQUENCE - This variadic instruction is used to form a register that
/// represents a consecutive sequence of sub-registers. It's used as a
/// register coalescing / allocation aid and must be eliminated before code
/// emission.
// In SDNode form, the first operand encodes the register class created by
// the REG_SEQUENCE, while each subsequent pair names a vreg + subreg index
// pair. Once it has been lowered to a MachineInstr, the regclass operand
// is no longer present.
/// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
/// After register coalescing references of v1024 should be replace with
/// v1027:3, v1025 with v1027:4, etc.
REG_SEQUENCE = 12,
/// COPY - Target-independent register copy. This instruction can also be
/// used to copy between subregisters of virtual registers.
COPY = 13,
/// BUNDLE - This instruction represents an instruction bundle. Instructions
/// which immediately follow a BUNDLE instruction which are marked with
/// 'InsideBundle' flag are inside the bundle.
BUNDLE = 14,
/// Lifetime markers.
LIFETIME_START = 15,
LIFETIME_END = 16,
/// A Stackmap instruction captures the location of live variables at its
/// position in the instruction stream. It is followed by a shadow of bytes
/// that must lie within the function and not contain another stackmap.
STACKMAP = 17,
/// Patchable call instruction - this instruction represents a call to a
/// constant address, followed by a series of NOPs. It is intended to
/// support optimizations for dynamic languages (such as javascript) that
/// rewrite calls to runtimes with more efficient code sequences.
/// This also implies a stack map.
PATCHPOINT = 18,
/// This pseudo-instruction loads the stack guard value. Targets which need
/// to prevent the stack guard value or address from being spilled to the
/// stack should override TargetLowering::emitLoadStackGuardNode and
/// additionally expand this pseudo after register allocation.
LOAD_STACK_GUARD = 19,
/// Call instruction with associated vm state for deoptimization and list
/// of live pointers for relocation by the garbage collector. It is
/// intended to support garbage collection with fully precise relocating
/// collectors and deoptimizations in either the callee or caller.
STATEPOINT = 20,
/// Instruction that records the offset of a local stack allocation passed to
/// llvm.localescape. It has two arguments: the symbol for the label and the
/// frame index of the local stack allocation.
LOCAL_ESCAPE = 21,
/// Loading instruction that may page fault, bundled with associated
/// information on how to handle such a page fault. It is intended to support
/// "zero cost" null checks in managed languages by allowing LLVM to fold
/// comparisions into existing memory operations.
FAULTING_LOAD_OP = 22,
};
} // end namespace TargetOpcode
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetFrameLowering.h | //===-- llvm/Target/TargetFrameLowering.h ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Interface to describe the layout of a stack frame on the target machine.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETFRAMELOWERING_H
#define LLVM_TARGET_TARGETFRAMELOWERING_H
#include "llvm/CodeGen/MachineBasicBlock.h"
#include <utility>
#include <vector>
namespace llvm {
class BitVector;
class CalleeSavedInfo;
class MachineFunction;
class RegScavenger;
/// Information about stack frame layout on the target. It holds the direction
/// of stack growth, the known stack alignment on entry to each function, and
/// the offset to the locals area.
///
/// The offset to the local area is the offset from the stack pointer on
/// function entry to the first location where function data (local variables,
/// spill locations) can be stored.
class TargetFrameLowering {
public:
enum StackDirection {
StackGrowsUp, // Adding to the stack increases the stack address
StackGrowsDown // Adding to the stack decreases the stack address
};
// Maps a callee saved register to a stack slot with a fixed offset.
struct SpillSlot {
unsigned Reg;
int Offset; // Offset relative to stack pointer on function entry.
};
private:
StackDirection StackDir;
unsigned StackAlignment;
unsigned TransientStackAlignment;
int LocalAreaOffset;
bool StackRealignable;
public:
TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO,
unsigned TransAl = 1, bool StackReal = true)
: StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
LocalAreaOffset(LAO), StackRealignable(StackReal) {}
virtual ~TargetFrameLowering();
// These methods return information that describes the abstract stack layout
// of the target machine.
/// getStackGrowthDirection - Return the direction the stack grows
///
StackDirection getStackGrowthDirection() const { return StackDir; }
/// getStackAlignment - This method returns the number of bytes to which the
/// stack pointer must be aligned on entry to a function. Typically, this
/// is the largest alignment for any data object in the target.
///
unsigned getStackAlignment() const { return StackAlignment; }
/// getTransientStackAlignment - This method returns the number of bytes to
/// which the stack pointer must be aligned at all times, even between
/// calls.
///
unsigned getTransientStackAlignment() const {
return TransientStackAlignment;
}
/// isStackRealignable - This method returns whether the stack can be
/// realigned.
bool isStackRealignable() const {
return StackRealignable;
}
/// getOffsetOfLocalArea - This method returns the offset of the local area
/// from the stack pointer on entrance to a function.
///
int getOffsetOfLocalArea() const { return LocalAreaOffset; }
/// isFPCloseToIncomingSP - Return true if the frame pointer is close to
/// the incoming stack pointer, false if it is close to the post-prologue
/// stack pointer.
virtual bool isFPCloseToIncomingSP() const { return true; }
/// assignCalleeSavedSpillSlots - Allows target to override spill slot
/// assignment logic. If implemented, assignCalleeSavedSpillSlots() should
/// assign frame slots to all CSI entries and return true. If this method
/// returns false, spill slots will be assigned using generic implementation.
/// assignCalleeSavedSpillSlots() may add, delete or rearrange elements of
/// CSI.
virtual bool
assignCalleeSavedSpillSlots(MachineFunction &MF,
const TargetRegisterInfo *TRI,
std::vector<CalleeSavedInfo> &CSI) const {
return false;
}
/// getCalleeSavedSpillSlots - This method returns a pointer to an array of
/// pairs, that contains an entry for each callee saved register that must be
/// spilled to a particular stack location if it is spilled.
///
/// Each entry in this array contains a <register,offset> pair, indicating the
/// fixed offset from the incoming stack pointer that each register should be
/// spilled at. If a register is not listed here, the code generator is
/// allowed to spill it anywhere it chooses.
///
virtual const SpillSlot *
getCalleeSavedSpillSlots(unsigned &NumEntries) const {
NumEntries = 0;
return nullptr;
}
/// targetHandlesStackFrameRounding - Returns true if the target is
/// responsible for rounding up the stack frame (probably at emitPrologue
/// time).
virtual bool targetHandlesStackFrameRounding() const {
return false;
}
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
virtual void emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const = 0;
virtual void emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const = 0;
/// Adjust the prologue to have the function use segmented stacks. This works
/// by adding a check even before the "normal" function prologue.
virtual void adjustForSegmentedStacks(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const {}
/// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in
/// the assembly prologue to explicitly handle the stack.
virtual void adjustForHiPEPrologue(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const {}
/// Adjust the prologue to add an allocation at a fixed offset from the frame
/// pointer.
virtual void
adjustForFrameAllocatePrologue(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const {}
/// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
/// saved registers and returns true if it isn't possible / profitable to do
/// so by issuing a series of store instructions via
/// storeRegToStackSlot(). Returns false otherwise.
virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
return false;
}
/// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
/// saved registers and returns true if it isn't possible / profitable to do
/// so by issuing a series of load instructions via loadRegToStackSlot().
/// Returns false otherwise.
virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
return false;
}
/// Return true if the target needs to disable frame pointer elimination.
virtual bool noFramePointerElim(const MachineFunction &MF) const;
/// hasFP - Return true if the specified function should have a dedicated
/// frame pointer register. For most targets this is true only if the function
/// has variable sized allocas or if frame pointer elimination is disabled.
virtual bool hasFP(const MachineFunction &MF) const = 0;
/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
/// not required, we reserve argument space for call sites in the function
/// immediately on entry to the current function. This eliminates the need for
/// add/sub sp brackets around call sites. Returns true if the call frame is
/// included as part of the stack frame.
virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
return !hasFP(MF);
}
/// canSimplifyCallFramePseudos - When possible, it's best to simplify the
/// call frame pseudo ops before doing frame index elimination. This is
/// possible only when frame index references between the pseudos won't
/// need adjusting for the call frame adjustments. Normally, that's true
/// if the function has a reserved call frame or a frame pointer. Some
/// targets (Thumb2, for example) may have more complicated criteria,
/// however, and can override this behavior.
virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
return hasReservedCallFrame(MF) || hasFP(MF);
}
// needsFrameIndexResolution - Do we need to perform FI resolution for
// this function. Normally, this is required only when the function
// has any stack objects. However, targets may want to override this.
virtual bool needsFrameIndexResolution(const MachineFunction &MF) const;
/// getFrameIndexOffset - Returns the displacement from the frame register to
/// the stack frame of the specified index.
virtual int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
/// getFrameIndexReference - This method should return the base register
/// and offset used to reference a frame index location. The offset is
/// returned directly, and the base register is returned via FrameReg.
virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const;
/// Same as above, except that the 'base register' will always be RSP, not
/// RBP on x86. This is used exclusively for lowering STATEPOINT nodes.
/// TODO: This should really be a parameterizable choice.
virtual int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
unsigned &FrameReg) const {
// default to calling normal version, we override this on x86 only
llvm_unreachable("unimplemented for non-x86");
return 0;
}
/// This method determines which of the registers reported by
/// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved.
/// The default implementation checks populates the \p SavedRegs bitset with
/// all registers which are modified in the function, targets may override
/// this function to save additional registers.
/// This method also sets up the register scavenger ensuring there is a free
/// register or a frameindex available.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
RegScavenger *RS = nullptr) const;
/// processFunctionBeforeFrameFinalized - This method is called immediately
/// before the specified function's frame layout (MF.getFrameInfo()) is
/// finalized. Once the frame is finalized, MO_FrameIndex operands are
/// replaced with direct constants. This method is optional.
///
virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS = nullptr) const {
}
/// eliminateCallFramePseudoInstr - This method is called during prolog/epilog
/// code insertion to eliminate call frame setup and destroy pseudo
/// instructions (but only if the Target is using them). It is responsible
/// for eliminating these instructions, replacing them with concrete
/// instructions. This method need only be implemented if using call frame
/// setup/destroy pseudo instructions.
///
virtual void
eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
llvm_unreachable("Call Frame Pseudo Instructions do not exist on this "
"target!");
}
/// Check whether or not the given \p MBB can be used as a prologue
/// for the target.
/// The prologue will be inserted first in this basic block.
/// This method is used by the shrink-wrapping pass to decide if
/// \p MBB will be correctly handled by the target.
/// As soon as the target enable shrink-wrapping without overriding
/// this method, we assume that each basic block is a valid
/// prologue.
virtual bool canUseAsPrologue(const MachineBasicBlock &MBB) const {
return true;
}
/// Check whether or not the given \p MBB can be used as a epilogue
/// for the target.
/// The epilogue will be inserted before the first terminator of that block.
/// This method is used by the shrink-wrapping pass to decide if
/// \p MBB will be correctly handled by the target.
/// As soon as the target enable shrink-wrapping without overriding
/// this method, we assume that each basic block is a valid
/// epilogue.
virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const {
return true;
}
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetIntrinsicInfo.h | //===-- llvm/Target/TargetIntrinsicInfo.h - Instruction Info ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the target intrinsic instructions to the code generator.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
#define LLVM_TARGET_TARGETINTRINSICINFO_H
#include "llvm/Support/Compiler.h"
#include <string>
namespace llvm {
class Function;
class Module;
class Type;
//---------------------------------------------------------------------------
///
/// TargetIntrinsicInfo - Interface to description of machine instruction set
///
class TargetIntrinsicInfo {
TargetIntrinsicInfo(const TargetIntrinsicInfo &) = delete;
void operator=(const TargetIntrinsicInfo &) = delete;
public:
TargetIntrinsicInfo();
virtual ~TargetIntrinsicInfo();
/// Return the name of a target intrinsic, e.g. "llvm.bfin.ssync".
/// The Tys and numTys parameters are for intrinsics with overloaded types
/// (e.g., those using iAny or fAny). For a declaration for an overloaded
/// intrinsic, Tys should point to an array of numTys pointers to Type,
/// and must provide exactly one type for each overloaded type in the
/// intrinsic.
virtual std::string getName(unsigned IID, Type **Tys = nullptr,
unsigned numTys = 0) const = 0;
/// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
/// names.
virtual unsigned lookupName(const char *Name, unsigned Len) const =0;
/// Return the target intrinsic ID of a function, or 0.
virtual unsigned getIntrinsicID(Function *F) const;
/// Returns true if the intrinsic can be overloaded.
virtual bool isOverloaded(unsigned IID) const = 0;
/// Create or insert an LLVM Function declaration for an intrinsic,
/// and return it. The Tys and numTys are for intrinsics with overloaded
/// types. See above for more information.
virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = nullptr,
unsigned numTys = 0) const = 0;
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetLoweringObjectFile.h | //===-- llvm/Target/TargetLoweringObjectFile.h - Object Info ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements classes used to handle lowerings specific to common
// object file formats.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
#define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/SectionKind.h"
namespace llvm {
class MachineModuleInfo;
class Mangler;
class MCContext;
class MCExpr;
class MCSection;
class MCSymbol;
class MCSymbolRefExpr;
class MCStreamer;
class MCValue;
class ConstantExpr;
class GlobalValue;
class TargetMachine;
class TargetLoweringObjectFile : public MCObjectFileInfo {
MCContext *Ctx;
TargetLoweringObjectFile(
const TargetLoweringObjectFile&) = delete;
void operator=(const TargetLoweringObjectFile&) = delete;
protected:
const DataLayout *DL;
bool SupportIndirectSymViaGOTPCRel;
bool SupportGOTPCRelWithOffset;
public:
MCContext &getContext() const { return *Ctx; }
TargetLoweringObjectFile() : MCObjectFileInfo(), Ctx(nullptr), DL(nullptr),
SupportIndirectSymViaGOTPCRel(false),
SupportGOTPCRelWithOffset(true) {}
virtual ~TargetLoweringObjectFile();
/// This method must be called before any actual lowering is done. This
/// specifies the current context for codegen, and gives the lowering
/// implementations a chance to set up their default sections.
virtual void Initialize(MCContext &ctx, const TargetMachine &TM);
virtual void emitPersonalityValue(MCStreamer &Streamer,
const TargetMachine &TM,
const MCSymbol *Sym) const;
/// Emit the module flags that the platform cares about.
virtual void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> Flags,
Mangler &Mang, const TargetMachine &TM) const {}
/// Given a constant with the SectionKind, return a section that it should be
/// placed in.
virtual MCSection *getSectionForConstant(SectionKind Kind,
const Constant *C) const;
/// Classify the specified global variable into a set of target independent
/// categories embodied in SectionKind.
static SectionKind getKindForGlobal(const GlobalValue *GV,
const TargetMachine &TM);
/// This method computes the appropriate section to emit the specified global
/// variable or function definition. This should not be passed external (or
/// available externally) globals.
MCSection *SectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang, const TargetMachine &TM) const;
/// This method computes the appropriate section to emit the specified global
/// variable or function definition. This should not be passed external (or
/// available externally) globals.
MCSection *SectionForGlobal(const GlobalValue *GV, Mangler &Mang,
const TargetMachine &TM) const {
return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM);
}
virtual void getNameWithPrefix(SmallVectorImpl<char> &OutName,
const GlobalValue *GV,
bool CannotUsePrivateLabel, Mangler &Mang,
const TargetMachine &TM) const;
virtual MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
const TargetMachine &TM) const;
virtual bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
const Function &F) const;
/// Targets should implement this method to assign a section to globals with
/// an explicit section specfied. The implementation of this method can
/// assume that GV->hasSection() is true.
virtual MCSection *
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler &Mang, const TargetMachine &TM) const = 0;
/// Allow the target to completely override section assignment of a global.
virtual const MCSection *getSpecialCasedSectionGlobals(const GlobalValue *GV,
SectionKind Kind,
Mangler &Mang) const {
return nullptr;
}
/// Return an MCExpr to use for a reference to the specified global variable
/// from exception handling information.
virtual const MCExpr *
getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
Mangler &Mang, const TargetMachine &TM,
MachineModuleInfo *MMI, MCStreamer &Streamer) const;
/// Return the MCSymbol for a private symbol with global value name as its
/// base, with the specified suffix.
MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
StringRef Suffix, Mangler &Mang,
const TargetMachine &TM) const;
// The symbol that gets passed to .cfi_personality.
virtual MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
Mangler &Mang,
const TargetMachine &TM,
MachineModuleInfo *MMI) const;
const MCExpr *
getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
MCStreamer &Streamer) const;
virtual MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const {
return StaticCtorSection;
}
virtual MCSection *getStaticDtorSection(unsigned Priority,
const MCSymbol *KeySym) const {
return StaticDtorSection;
}
/// \brief Create a symbol reference to describe the given TLS variable when
/// emitting the address in debug info.
virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
virtual const MCExpr *
getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
const TargetMachine &TM) const {
return nullptr;
}
/// \brief Target supports replacing a data "PC"-relative access to a symbol
/// through another symbol, by accessing the later via a GOT entry instead?
bool supportIndirectSymViaGOTPCRel() const {
return SupportIndirectSymViaGOTPCRel;
}
/// \brief Target GOT "PC"-relative relocation supports encoding an additional
/// binary expression with an offset?
bool supportGOTPCRelWithOffset() const {
return SupportGOTPCRelWithOffset;
}
/// \brief Get the target specific PC relative GOT entry relocation
virtual const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
const MCValue &MV,
int64_t Offset,
MachineModuleInfo *MMI,
MCStreamer &Streamer) const {
return nullptr;
}
virtual void emitLinkerFlagsForGlobal(raw_ostream &OS, const GlobalValue *GV,
const Mangler &Mang) const {}
protected:
virtual MCSection *SelectSectionForGlobal(const GlobalValue *GV,
SectionKind Kind, Mangler &Mang,
const TargetMachine &TM) const = 0;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetSubtargetInfo.h | //==-- llvm/Target/TargetSubtargetInfo.h - Target Information ----*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes the subtarget options of a Target machine.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETSUBTARGETINFO_H
#define LLVM_TARGET_TARGETSUBTARGETINFO_H
#include "llvm/CodeGen/PBQPRAConstraint.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CodeGen.h"
namespace llvm {
class DataLayout;
class MachineFunction;
class MachineInstr;
class SDep;
class SUnit;
class TargetFrameLowering;
class TargetInstrInfo;
class TargetLowering;
class TargetRegisterClass;
class TargetRegisterInfo;
class TargetSchedModel;
class TargetSelectionDAGInfo;
struct MachineSchedPolicy;
template <typename T> class SmallVectorImpl;
// //
///////////////////////////////////////////////////////////////////////////////
///
/// TargetSubtargetInfo - Generic base class for all target subtargets. All
/// Target-specific options that control code generation and printing should
/// be exposed through a TargetSubtargetInfo-derived class.
///
class TargetSubtargetInfo : public MCSubtargetInfo {
TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
void operator=(const TargetSubtargetInfo &) = delete;
TargetSubtargetInfo() = delete;
protected: // Can only create subclasses...
TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
ArrayRef<SubtargetFeatureKV> PF,
ArrayRef<SubtargetFeatureKV> PD,
const SubtargetInfoKV *ProcSched,
const MCWriteProcResEntry *WPR,
const MCWriteLatencyEntry *WL,
const MCReadAdvanceEntry *RA, const InstrStage *IS,
const unsigned *OC, const unsigned *FP);
public:
// AntiDepBreakMode - Type of anti-dependence breaking that should
// be performed before post-RA scheduling.
typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
typedef SmallVectorImpl<const TargetRegisterClass *> RegClassVector;
virtual ~TargetSubtargetInfo();
// Interfaces to the major aspects of target machine information:
//
// -- Instruction opcode and operand information
// -- Pipelines and scheduling information
// -- Stack frame information
// -- Selection DAG lowering information
//
// N.B. These objects may change during compilation. It's not safe to cache
// them between functions.
virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
virtual const TargetFrameLowering *getFrameLowering() const {
return nullptr;
}
virtual const TargetLowering *getTargetLowering() const { return nullptr; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
return nullptr;
}
/// getRegisterInfo - If register information is available, return it. If
/// not, return null. This is kept separate from RegInfo until RegInfo has
/// details of graph coloring register allocation removed from it.
///
virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
/// getInstrItineraryData - Returns instruction itinerary data for the target
/// or specific subtarget.
///
virtual const InstrItineraryData *getInstrItineraryData() const {
return nullptr;
}
/// Resolve a SchedClass at runtime, where SchedClass identifies an
/// MCSchedClassDesc with the isVariant property. This may return the ID of
/// another variant SchedClass, but repeated invocation must quickly terminate
/// in a nonvariant SchedClass.
virtual unsigned resolveSchedClass(unsigned SchedClass,
const MachineInstr *MI,
const TargetSchedModel *SchedModel) const {
return 0;
}
/// \brief True if the subtarget should run MachineScheduler after aggressive
/// coalescing.
///
/// This currently replaces the SelectionDAG scheduler with the "source" order
/// scheduler (though see below for an option to turn this off and use the
/// TargetLowering preference). It does not yet disable the postRA scheduler.
virtual bool enableMachineScheduler() const;
/// \brief True if the machine scheduler should disable the TLI preference
/// for preRA scheduling with the source level scheduler.
virtual bool enableMachineSchedDefaultSched() const { return true; }
/// \brief True if the subtarget should enable joining global copies.
///
/// By default this is enabled if the machine scheduler is enabled, but
/// can be overridden.
virtual bool enableJoinGlobalCopies() const;
/// True if the subtarget should run a scheduler after register allocation.
///
/// By default this queries the PostRAScheduling bit in the scheduling model
/// which is the preferred way to influence this.
virtual bool enablePostRAScheduler() const;
/// \brief True if the subtarget should run the atomic expansion pass.
virtual bool enableAtomicExpand() const;
/// \brief Override generic scheduling policy within a region.
///
/// This is a convenient way for targets that don't provide any custom
/// scheduling heuristics (no custom MachineSchedStrategy) to make
/// changes to the generic scheduling policy.
virtual void overrideSchedPolicy(MachineSchedPolicy &Policy,
MachineInstr *begin, MachineInstr *end,
unsigned NumRegionInstrs) const {}
// \brief Perform target specific adjustments to the latency of a schedule
// dependency.
virtual void adjustSchedDependency(SUnit *def, SUnit *use, SDep &dep) const {}
// For use with PostRAScheduling: get the anti-dependence breaking that should
// be performed before post-RA scheduling.
virtual AntiDepBreakMode getAntiDepBreakMode() const { return ANTIDEP_NONE; }
// For use with PostRAScheduling: in CriticalPathRCs, return any register
// classes that should only be considered for anti-dependence breaking if they
// are on the critical path.
virtual void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
return CriticalPathRCs.clear();
}
// For use with PostRAScheduling: get the minimum optimization level needed
// to enable post-RA scheduling.
virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const {
return CodeGenOpt::Default;
}
/// \brief True if the subtarget should run the local reassignment
/// heuristic of the register allocator.
/// This heuristic may be compile time intensive, \p OptLevel provides
/// a finer grain to tune the register allocator.
virtual bool enableRALocalReassignment(CodeGenOpt::Level OptLevel) const;
/// \brief Enable use of alias analysis during code generation (during MI
/// scheduling, DAGCombine, etc.).
virtual bool useAA() const;
/// \brief Enable the use of the early if conversion pass.
virtual bool enableEarlyIfConversion() const { return false; }
/// \brief Return PBQPConstraint(s) for the target.
///
/// Override to provide custom PBQP constraints.
virtual std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const {
return nullptr;
}
/// Enable tracking of subregister liveness in register allocator.
virtual bool enableSubRegLiveness() const { return false; }
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Target/TargetLowering.h | //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file describes how to lower LLVM code to machine code. This has two
/// main components:
///
/// 1. Which ValueTypes are natively supported by the target.
/// 2. Which operations are supported for supported ValueTypes.
/// 3. Cost thresholds for alternative implementations of certain operations.
///
/// In addition it has a few other components, like information about FP
/// immediates.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETLOWERING_H
#define LLVM_TARGET_TARGETLOWERING_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
#include <climits>
#include <map>
#include <vector>
namespace llvm {
class CallInst;
class CCState;
class FastISel;
class FunctionLoweringInfo;
class ImmutableCallSite;
class IntrinsicInst;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineJumpTableInfo;
class MachineLoop;
class Mangler;
class MCContext;
class MCExpr;
class MCSymbol;
template<typename T> class SmallVectorImpl;
class DataLayout;
class TargetRegisterClass;
class TargetLibraryInfo;
class TargetLoweringObjectFile;
class Value;
namespace Sched {
enum Preference {
None, // No preference
Source, // Follow source order.
RegPressure, // Scheduling for lowest register pressure.
Hybrid, // Scheduling for both latency and register pressure.
ILP, // Scheduling for ILP in low register pressure mode.
VLIW // Scheduling for VLIW targets.
};
}
/// This base class for TargetLowering contains the SelectionDAG-independent
/// parts that can be used from the rest of CodeGen.
class TargetLoweringBase {
TargetLoweringBase(const TargetLoweringBase&) = delete;
void operator=(const TargetLoweringBase&) = delete;
public:
/// This enum indicates whether operations are valid for a target, and if not,
/// what action should be used to make them valid.
enum LegalizeAction {
Legal, // The target natively supports this operation.
Promote, // This operation should be executed in a larger type.
Expand, // Try to expand this to other ops, otherwise use a libcall.
Custom // Use the LowerOperation hook to implement custom lowering.
};
/// This enum indicates whether a types are legal for a target, and if not,
/// what action should be used to make them valid.
enum LegalizeTypeAction {
TypeLegal, // The target natively supports this type.
TypePromoteInteger, // Replace this integer with a larger one.
TypeExpandInteger, // Split this integer into two of half the size.
TypeSoftenFloat, // Convert this float to a same size integer type.
TypeExpandFloat, // Split this float into two of half the size.
TypeScalarizeVector, // Replace this one-element vector with its element.
TypeSplitVector, // Split this vector into two of half the size.
TypeWidenVector, // This vector should be widened into a larger vector.
TypePromoteFloat // Replace this float with a larger one.
};
/// LegalizeKind holds the legalization kind that needs to happen to EVT
/// in order to type-legalize it.
typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
/// Enum that describes how the target represents true/false values.
enum BooleanContent {
UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
ZeroOrOneBooleanContent, // All bits zero except for bit 0.
ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
};
/// Enum that describes what type of support for selects the target has.
enum SelectSupportKind {
ScalarValSelect, // The target supports scalar selects (ex: cmov).
ScalarCondVectorVal, // The target supports selects with a scalar condition
// and vector values (ex: cmov).
VectorMaskSelect // The target supports vector selects with a vector
// mask (ex: x86 blends).
};
/// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
/// because different targets have different levels of support for these
/// atomic RMW instructions, and also have different options w.r.t. what they
/// should expand to.
enum class AtomicRMWExpansionKind {
None, // Don't expand the instruction.
LLSC, // Expand the instruction into loadlinked/storeconditional; used
// by ARM/AArch64. Implies `hasLoadLinkedStoreConditional`
// returns true.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
};
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
case UndefinedBooleanContent:
// Extend by adding rubbish bits.
return ISD::ANY_EXTEND;
case ZeroOrOneBooleanContent:
// Extend by adding zero bits.
return ISD::ZERO_EXTEND;
case ZeroOrNegativeOneBooleanContent:
// Extend by copying the sign bit.
return ISD::SIGN_EXTEND;
}
llvm_unreachable("Invalid content kind");
}
/// NOTE: The TargetMachine owns TLOF.
explicit TargetLoweringBase(const TargetMachine &TM);
virtual ~TargetLoweringBase() {}
protected:
/// \brief Initialize all of the actions to default values.
void initActions();
public:
const TargetMachine &getTargetMachine() const { return TM; }
virtual bool useSoftFloat() const { return false; }
/// Return the pointer type for the given address space, defaults to
/// the pointer type from the data layout.
/// FIXME: The default needs to be removed once all the code is updated.
MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
}
/// EVT is not used in-tree, but is used by out-of-tree target.
/// A documentation for this function would be nice...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
/// Returns the type to be used for the index operand of:
/// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
/// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
virtual MVT getVectorIdxTy(const DataLayout &DL) const {
return getPointerTy(DL);
}
/// Return true if the select operation is expensive for this target.
bool isSelectExpensive() const { return SelectIsExpensive; }
virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
return true;
}
/// Return true if multiple condition registers are available.
bool hasMultipleConditionRegisters() const {
return HasMultipleConditionRegisters;
}
/// Return true if the target has BitExtract instructions.
bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
/// Return the preferred vector type legalization action.
virtual TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(EVT VT) const {
// The default action for one element vectors is to scalarize
if (VT.getVectorNumElements() == 1)
return TypeScalarizeVector;
// The default action for other vectors is to promote
return TypePromoteInteger;
}
// There are two general methods for expanding a BUILD_VECTOR node:
// 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
// them together.
// 2. Build the vector on the stack and then load it.
// If this function returns true, then method (1) will be used, subject to
// the constraint that all of the necessary shuffles are legal (as determined
// by isShuffleMaskLegal). If this function returns false, then method (2) is
// always used. The vector type, and the number of defined values, are
// provided.
virtual bool
shouldExpandBuildVectorWithShuffles(EVT /* VT */,
unsigned DefinedValues) const {
return DefinedValues < 3;
}
/// Return true if integer divide is usually cheaper than a sequence of
/// several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
/// Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
bool isFsqrtCheap() const {
return FsqrtIsCheap;
}
/// Returns true if target has indicated at least one type should be bypassed.
bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
/// Returns map of slow types for division or remainder with corresponding
/// fast types
const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
return BypassSlowDivWidths;
}
/// Return true if pow2 sdiv is cheaper than a chain of sra/srl/add/sra.
bool isPow2SDivCheap() const { return Pow2SDivIsCheap; }
/// Return true if Flow Control is an expensive operation that should be
/// avoided.
bool isJumpExpensive() const { return JumpIsExpensive; }
/// Return true if selects are only cheaper than branches if the branch is
/// unlikely to be predicted right.
bool isPredictableSelectExpensive() const {
return PredictableSelectIsExpensive;
}
/// isLoadBitCastBeneficial() - Return true if the following transform
/// is beneficial.
/// fold (conv (load x)) -> (load (conv*)x)
/// On architectures that don't natively support some vector loads
/// efficiently, casting the load to a smaller vector of larger types and
/// loading is more efficient, however, this can be undone by optimizations in
/// dag combiner.
virtual bool isLoadBitCastBeneficial(EVT /* Load */,
EVT /* Bitcast */) const {
return true;
}
/// Return true if it is expected to be cheaper to do a store of a non-zero
/// vector constant with the given size and type for the address space than to
/// store the individual scalar element constants.
virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
unsigned NumElem,
unsigned AddrSpace) const {
return false;
}
/// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
virtual bool isCheapToSpeculateCttz() const {
return false;
}
/// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool isCheapToSpeculateCtlz() const {
return false;
}
/// \brief Return if the target supports combining a
/// chain like:
/// \code
/// %andResult = and %val1, #imm-with-one-bit-set;
/// %icmpResult = icmp %andResult, 0
/// br i1 %icmpResult, label %dest1, label %dest2
/// \endcode
/// into a single machine instruction of a form like:
/// \code
/// brOnBitSet %register, #bitNumber, dest
/// \endcode
bool isMaskAndBranchFoldingLegal() const {
return MaskAndBranchFoldingIsLegal;
}
/// \brief Return true if the target wants to use the optimization that
/// turns ext(promotableInst1(...(promotableInstN(load)))) into
/// promotedInst1(...(promotedInstN(ext(load)))).
bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
/// Return true if the target can combine store(extractelement VectorTy,
/// Idx).
/// \p Cost[out] gives the cost of that transformation when this is true.
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
unsigned &Cost) const {
return false;
}
/// Return true if target supports floating point exceptions.
bool hasFloatingPointExceptions() const {
return HasFloatingPointExceptions;
}
/// Return true if target always beneficiates from combining into FMA for a
/// given value type. This must typically return false on targets where FMA
/// takes more cycles to execute than FADD.
virtual bool enableAggressiveFMAFusion(EVT VT) const {
return false;
}
/// Return the ValueType of the result of SETCC operations.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const;
/// Return the ValueType for comparison libcalls. Comparions libcalls include
/// floating point comparion calls, and Ordered/Unordered check calls on
/// floating point numbers.
virtual
MVT::SimpleValueType getCmpLibcallReturnType() const;
/// For targets without i1 registers, this gives the nature of the high-bits
/// of boolean values held in types wider than i1.
///
/// "Boolean values" are special true/false values produced by nodes like
/// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
/// Not to be confused with general values promoted from i1. Some cpus
/// distinguish between vectors of boolean and scalars; the isVec parameter
/// selects between the two kinds. For example on X86 a scalar boolean should
/// be zero extended from i1, while the elements of a vector of booleans
/// should be sign extended from i1.
///
/// Some cpus also treat floating point types the same way as they treat
/// vectors instead of the way they treat scalars.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
if (isVec)
return BooleanVectorContents;
return isFloat ? BooleanFloatContents : BooleanContents;
}
BooleanContent getBooleanContents(EVT Type) const {
return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
}
/// Return target scheduling preference.
Sched::Preference getSchedulingPreference() const {
return SchedPreferenceInfo;
}
/// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
/// for different nodes. This function returns the preference (or none) for
/// the given node.
virtual Sched::Preference getSchedulingPreference(SDNode *) const {
return Sched::None;
}
/// Return the register class that should be used for the specified value
/// type.
virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
assert(RC && "This value type is not natively supported!");
return RC;
}
/// Return the 'representative' register class for the specified value
/// type.
///
/// The 'representative' register class is the largest legal super-reg
/// register class for the register class of the value type. For example, on
/// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
/// register class is GR64 on x86_64.
virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
return RC;
}
/// Return the cost of the 'representative' register class for the specified
/// value type.
virtual uint8_t getRepRegClassCostFor(MVT VT) const {
return RepRegClassCostForVT[VT.SimpleTy];
}
/// Return true if the target has native support for the specified value type.
/// This means that it has a register that directly holds it without
/// promotions or expansions.
bool isTypeLegal(EVT VT) const {
assert(!VT.isSimple() ||
(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
}
class ValueTypeActionImpl {
/// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
/// that indicates how instruction selection should deal with the type.
uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
public:
ValueTypeActionImpl() {
std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
}
LegalizeTypeAction getTypeAction(MVT VT) const {
return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
}
void setTypeAction(MVT VT, LegalizeTypeAction Action) {
unsigned I = VT.SimpleTy;
ValueTypeActions[I] = Action;
}
};
const ValueTypeActionImpl &getValueTypeActions() const {
return ValueTypeActions;
}
/// Return how we should legalize values of this type, either it is already
/// legal (return 'Legal') or we need to promote it to a larger type (return
/// 'Promote'), or we need to expand it into multiple registers of smaller
/// integer type (return 'Expand'). 'Custom' is not an option.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
return getTypeConversion(Context, VT).first;
}
LegalizeTypeAction getTypeAction(MVT VT) const {
return ValueTypeActions.getTypeAction(VT);
}
/// For types supported by the target, this is an identity function. For
/// types that must be promoted to larger types, this returns the larger type
/// to promote to. For integer types that are larger than the largest integer
/// register, this contains one step in the expansion to get to the smaller
/// register. For illegal floating point types, this returns the integer type
/// to transform to.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
return getTypeConversion(Context, VT).second;
}
/// For types supported by the target, this is an identity function. For
/// types that must be expanded (i.e. integer types that are larger than the
/// largest integer register or illegal floating point types), this returns
/// the largest legal type it will be expanded to.
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
assert(!VT.isVector());
while (true) {
switch (getTypeAction(Context, VT)) {
case TypeLegal:
return VT;
case TypeExpandInteger:
VT = getTypeToTransformTo(Context, VT);
break;
default:
llvm_unreachable("Type is not legal nor is it to be expanded!");
}
}
}
/// Vector types are broken down into some number of legal first class types.
/// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
/// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
/// turns into 4 EVT::i32 values with both PPC and X86.
///
/// This method returns the number of registers needed, and the VT for each
/// register. It also returns the VT and quantity of the intermediate values
/// before they are promoted/expanded.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
EVT &IntermediateVT,
unsigned &NumIntermediates,
MVT &RegisterVT) const;
struct IntrinsicInfo {
unsigned opc; // target opcode
EVT memVT; // memory VT
const Value* ptrVal; // value representing memory location
int offset; // offset off of ptrVal
unsigned size; // the size of the memory location
// (taken from memVT if zero)
unsigned align; // alignment
bool vol; // is volatile?
bool readMem; // reads memory?
bool writeMem; // writes memory?
IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
vol(false), readMem(false), writeMem(false) {}
};
/// Given an intrinsic, checks if on the target the intrinsic will need to map
/// to a MemIntrinsicNode (touches memory). If this is the case, it returns
/// true and store the intrinsic information into the IntrinsicInfo that was
/// passed to the function.
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
unsigned /*Intrinsic*/) const {
return false;
}
/// Returns true if the target can instruction select the specified FP
/// immediate natively. If false, the legalizer will materialize the FP
/// immediate as a load from a constant pool.
virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
return false;
}
/// Targets can use this to indicate that they only support *some*
/// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
/// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
/// legal.
virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
EVT /*VT*/) const {
return true;
}
/// Returns true if the operation can trap for the value type.
///
/// VT must be a legal type. By default, we optimistically assume most
/// operations don't trap except for divide and remainder.
virtual bool canOpTrap(unsigned Op, EVT VT) const;
/// Similar to isShuffleMaskLegal. This is used by Targets can use this to
/// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
/// a VAND with a constant pool entry.
virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
EVT /*VT*/) const {
return false;
}
/// Return how this operation should be treated: either it is legal, needs to
/// be promoted to a larger size, needs to be expanded to some other code
/// sequence, or the target has a custom expander for it.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
if (VT.isExtended()) return Expand;
// If a target-specific SDNode requires legalization, require the target
// to provide custom legalization for it.
if (Op > array_lengthof(OpActions[0])) return Custom;
unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
return (LegalizeAction)OpActions[I][Op];
}
/// Return true if the specified operation is legal on this target or can be
/// made legal with custom lowering. This is used to help guide high-level
/// lowering decisions.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
return (VT == MVT::Other || isTypeLegal(VT)) &&
(getOperationAction(Op, VT) == Legal ||
getOperationAction(Op, VT) == Custom);
}
/// Return true if the specified operation is legal on this target or can be
/// made legal using promotion. This is used to help guide high-level lowering
/// decisions.
bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
return (VT == MVT::Other || isTypeLegal(VT)) &&
(getOperationAction(Op, VT) == Legal ||
getOperationAction(Op, VT) == Promote);
}
/// Return true if the specified operation is illegal on this target or
/// unlikely to be made legal with custom lowering. This is used to help guide
/// high-level lowering decisions.
bool isOperationExpand(unsigned Op, EVT VT) const {
return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
}
/// Return true if the specified operation is legal on this target.
bool isOperationLegal(unsigned Op, EVT VT) const {
return (VT == MVT::Other || isTypeLegal(VT)) &&
getOperationAction(Op, VT) == Legal;
}
/// Return how this load with extension should be treated: either it is legal,
/// needs to be promoted to a larger size, needs to be expanded to some other
/// code sequence, or the target has a custom expander for it.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
EVT MemVT) const {
if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
return (LegalizeAction)LoadExtActions[ValI][MemI][ExtType];
}
/// Return true if the specified load with extension is legal on this target.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
return ValVT.isSimple() && MemVT.isSimple() &&
getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
}
/// Return true if the specified load with extension is legal or custom
/// on this target.
bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
return ValVT.isSimple() && MemVT.isSimple() &&
(getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
getLoadExtAction(ExtType, ValVT, MemVT) == Custom);
}
/// Return how this store with truncation should be treated: either it is
/// legal, needs to be promoted to a larger size, needs to be expanded to some
/// other code sequence, or the target has a custom expander for it.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)TruncStoreActions[ValI][MemI];
}
/// Return true if the specified store with truncation is legal on this
/// target.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
return isTypeLegal(ValVT) && MemVT.isSimple() &&
getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
}
/// Return how the indexed load should be treated: either it is legal, needs
/// to be promoted to a larger size, needs to be expanded to some other code
/// sequence, or the target has a custom expander for it.
LegalizeAction
getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.SimpleTy;
return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
}
/// Return true if the specified indexed load is legal on this target.
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
return VT.isSimple() &&
(getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
}
/// Return how the indexed store should be treated: either it is legal, needs
/// to be promoted to a larger size, needs to be expanded to some other code
/// sequence, or the target has a custom expander for it.
LegalizeAction
getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.SimpleTy;
return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
}
/// Return true if the specified indexed load is legal on this target.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
return VT.isSimple() &&
(getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
}
/// Return how the condition code should be treated: either it is legal, needs
/// to be expanded to some other code sequence, or the target has a custom
/// expander for it.
LegalizeAction
getCondCodeAction(ISD::CondCode CC, MVT VT) const {
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
"Table isn't big enough!");
// See setCondCodeAction for how this is encoded.
uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
assert(Action != Promote && "Can't promote condition code!");
return Action;
}
/// Return true if the specified condition code is legal on this target.
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
return
getCondCodeAction(CC, VT) == Legal ||
getCondCodeAction(CC, VT) == Custom;
}
/// If the action for this operation is to promote, this method returns the
/// ValueType to promote to.
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
assert(getOperationAction(Op, VT) == Promote &&
"This operation isn't promoted!");
// See if this has an explicit type specified.
std::map<std::pair<unsigned, MVT::SimpleValueType>,
MVT::SimpleValueType>::const_iterator PTTI =
PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
if (PTTI != PromoteToType.end()) return PTTI->second;
assert((VT.isInteger() || VT.isFloatingPoint()) &&
"Cannot autopromote this type, add it with AddPromotedToType.");
MVT NVT = VT;
do {
NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
"Didn't find type to promote to!");
} while (!isTypeLegal(NVT) ||
getOperationAction(Op, NVT) == Promote);
return NVT;
}
/// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
/// operations except for the pointer size. If AllowUnknown is true, this
/// will return MVT::Other for types with no EVT counterpart (e.g. structs),
/// otherwise it will assert.
EVT getValueType(const DataLayout &DL, Type *Ty,
bool AllowUnknown = false) const {
// Lower scalar pointers to native pointer types.
if (PointerType *PTy = dyn_cast<PointerType>(Ty))
return getPointerTy(DL, PTy->getAddressSpace());
if (Ty->isVectorTy()) {
VectorType *VTy = cast<VectorType>(Ty);
Type *Elm = VTy->getElementType();
// Lower vectors of pointers to native pointer types.
if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
Elm = PointerTy.getTypeForEVT(Ty->getContext());
}
return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
VTy->getNumElements());
}
return EVT::getEVT(Ty, AllowUnknown);
}
/// Return the MVT corresponding to this LLVM type. See getValueType.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
bool AllowUnknown = false) const {
return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
}
/// Return the desired alignment for ByVal or InAlloca aggregate function
/// arguments in the caller parameter area. This is the actual alignment, not
/// its logarithm.
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
/// Return the type of registers that this ValueType will eventually require.
MVT getRegisterType(MVT VT) const {
assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.SimpleTy];
}
/// Return the type of registers that this ValueType will eventually require.
MVT getRegisterType(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
EVT VT1;
MVT RegisterVT;
unsigned NumIntermediates;
(void)getVectorTypeBreakdown(Context, VT, VT1,
NumIntermediates, RegisterVT);
return RegisterVT;
}
if (VT.isInteger()) {
return getRegisterType(Context, getTypeToTransformTo(Context, VT));
}
llvm_unreachable("Unsupported extended type!");
}
/// Return the number of registers that this ValueType will eventually
/// require.
///
/// This is one for any types promoted to live in larger registers, but may be
/// more than one for types (like i64) that are split into pieces. For types
/// like i140, which are first promoted then expanded, it is the number of
/// registers needed to hold all the bits of the original type. For an i140
/// on a 32 bit machine this means 5 registers.
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(NumRegistersForVT));
return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
EVT VT1;
MVT VT2;
unsigned NumIntermediates;
return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
}
if (VT.isInteger()) {
unsigned BitWidth = VT.getSizeInBits();
unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
return (BitWidth + RegWidth - 1) / RegWidth;
}
llvm_unreachable("Unsupported extended type!");
}
/// If true, then instruction selection should seek to shrink the FP constant
/// of the specified type to a smaller type in order to save space and / or
/// reduce runtime.
virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
// Return true if it is profitable to reduce the given load node to a smaller
// type.
//
// e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
virtual bool shouldReduceLoadWidth(SDNode *Load,
ISD::LoadExtType ExtTy,
EVT NewVT) const {
return true;
}
/// When splitting a value of the specified type into parts, does the Lo
/// or Hi part come first? This usually follows the endianness, except
/// for ppcf128, where the Hi part always comes first.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
return DL.isBigEndian() || VT == MVT::ppcf128;
}
/// If true, the target has custom DAG combine transformations that it can
/// perform for the specified node.
bool hasTargetDAGCombine(ISD::NodeType NT) const {
assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
}
/// \brief Get maximum # of store operations permitted for llvm.memset
///
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memset. The value is set by the target at the
/// performance threshold for such a replacement. If OptSize is true,
/// return the limit for functions that have OptSize attribute.
unsigned getMaxStoresPerMemset(bool OptSize) const {
return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
}
/// \brief Get maximum # of store operations permitted for llvm.memcpy
///
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memcpy. The value is set by the target at the
/// performance threshold for such a replacement. If OptSize is true,
/// return the limit for functions that have OptSize attribute.
unsigned getMaxStoresPerMemcpy(bool OptSize) const {
return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
}
/// \brief Get maximum # of store operations permitted for llvm.memmove
///
/// This function returns the maximum number of store operations permitted
/// to replace a call to llvm.memmove. The value is set by the target at the
/// performance threshold for such a replacement. If OptSize is true,
/// return the limit for functions that have OptSize attribute.
unsigned getMaxStoresPerMemmove(bool OptSize) const {
return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
}
/// \brief Determine if the target supports unaligned memory accesses.
///
/// This function returns true if the target allows unaligned memory accesses
/// of the specified type in the given address space. If true, it also returns
/// whether the unaligned memory access is "fast" in the last argument by
/// reference. This is used, for example, in situations where an array
/// copy/move/set is converted to a sequence of store operations. Its use
/// helps to ensure that such replacements don't generate code that causes an
/// alignment error (trap) on the target machine.
virtual bool allowsMisalignedMemoryAccesses(EVT,
unsigned AddrSpace = 0,
unsigned Align = 1,
bool * /*Fast*/ = nullptr) const {
return false;
}
/// Returns the target specific optimal type for load and store operations as
/// a result of memset, memcpy, and memmove lowering.
///
/// If DstAlign is zero that means it's safe to destination alignment can
/// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
/// a need to check it against alignment requirement, probably because the
/// source does not need to be loaded. If 'IsMemset' is true, that means it's
/// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
/// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
/// does not need to be loaded. It returns EVT::Other if the type should be
/// determined using generic target-independent logic.
virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
bool /*IsMemset*/,
bool /*ZeroMemset*/,
bool /*MemcpyStrSrc*/,
MachineFunction &/*MF*/) const {
return MVT::Other;
}
/// Returns true if it's safe to use load / store of the specified type to
/// expand memcpy / memset inline.
///
/// This is mostly true for all types except for some special cases. For
/// example, on X86 targets without SSE2 f64 load / store are done with fldl /
/// fstpl which also does type conversion. Note the specified type doesn't
/// have to be legal as the hook is used before type legalization.
virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
/// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
bool usesUnderscoreSetJmp() const {
return UseUnderscoreSetJmp;
}
/// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
bool usesUnderscoreLongJmp() const {
return UseUnderscoreLongJmp;
}
/// Return integer threshold on number of blocks to use jump tables rather
/// than if sequence.
int getMinimumJumpTableEntries() const {
return MinimumJumpTableEntries;
}
/// If a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
unsigned getStackPointerRegisterToSaveRestore() const {
return StackPointerRegisterToSaveRestore;
}
/// If a physical register, this returns the register that receives the
/// exception address on entry to a landing pad.
unsigned getExceptionPointerRegister() const {
return ExceptionPointerRegister;
}
/// If a physical register, this returns the register that receives the
/// exception typeid on entry to a landing pad.
unsigned getExceptionSelectorRegister() const {
return ExceptionSelectorRegister;
}
/// Returns the target's jmp_buf size in bytes (if never set, the default is
/// 200)
unsigned getJumpBufSize() const {
return JumpBufSize;
}
/// Returns the target's jmp_buf alignment in bytes (if never set, the default
/// is 0)
unsigned getJumpBufAlignment() const {
return JumpBufAlignment;
}
/// Return the minimum stack alignment of an argument.
unsigned getMinStackArgumentAlignment() const {
return MinStackArgumentAlignment;
}
/// Return the minimum function alignment.
unsigned getMinFunctionAlignment() const {
return MinFunctionAlignment;
}
/// Return the preferred function alignment.
unsigned getPrefFunctionAlignment() const {
return PrefFunctionAlignment;
}
/// Return the preferred loop alignment.
virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
return PrefLoopAlignment;
}
/// Return whether the DAG builder should automatically insert fences and
/// reduce ordering for atomics.
bool getInsertFencesForAtomic() const {
return InsertFencesForAtomic;
}
/// Return true if the target stores stack protector cookies at a fixed offset
/// in some non-standard address space, and populates the address space and
/// offset as appropriate.
virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
unsigned &/*Offset*/) const {
return false;
}
/// Returns true if a cast between SrcAS and DestAS is a noop.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
return false;
}
/// Return true if the pointer arguments to CI should be aligned by aligning
/// the object whose address is being passed. If so then MinSize is set to the
/// minimum size the object must be to be aligned and PrefAlign is set to the
/// preferred alignment.
virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
unsigned & /*PrefAlign*/) const {
return false;
}
//===--------------------------------------------------------------------===//
/// \name Helpers for TargetTransformInfo implementations
/// @{
/// Get the ISD node that corresponds to the Instruction class opcode.
int InstructionOpcodeToISD(unsigned Opcode) const;
/// Estimate the cost of type-legalization and the legalized type.
std::pair<unsigned, MVT> getTypeLegalizationCost(const DataLayout &DL,
Type *Ty) const;
/// @}
//===--------------------------------------------------------------------===//
/// \name Helpers for atomic expansion.
/// @{
/// True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional
/// and expand AtomicCmpXchgInst.
virtual bool hasLoadLinkedStoreConditional() const { return false; }
/// Perform a load-linked operation on Addr, returning a "Value *" with the
/// corresponding pointee type. This may entail some non-trivial operations to
/// truncate or reconstruct types that will be illegal in the backend. See
/// ARMISelLowering for an example implementation.
virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
llvm_unreachable("Load linked unimplemented on this target");
}
/// Perform a store-conditional operation to Addr. Return the status of the
/// store. This should be 0 if the store succeeded, non-zero otherwise.
virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
Value *Addr, AtomicOrdering Ord) const {
llvm_unreachable("Store conditional unimplemented on this target");
}
/// Inserts in the IR a target-specific intrinsic specifying a fence.
/// It is called by AtomicExpandPass before expanding an
/// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
/// RMW and CmpXchg set both IsStore and IsLoad to true.
/// This function should either return a nullptr, or a pointer to an IR-level
/// Instruction*. Even complex fence sequences can be represented by a
/// single Instruction* through an intrinsic to be lowered later.
/// Backends with !getInsertFencesForAtomic() should keep a no-op here.
/// Backends should override this method to produce target-specific intrinsic
/// for their fences.
/// FIXME: Please note that the default implementation here in terms of
/// IR-level fences exists for historical/compatibility reasons and is
/// *unsound* ! Fences cannot, in general, be used to restore sequential
/// consistency. For example, consider the following example:
/// atomic<int> x = y = 0;
/// int r1, r2, r3, r4;
/// Thread 0:
/// x.store(1);
/// Thread 1:
/// y.store(1);
/// Thread 2:
/// r1 = x.load();
/// r2 = y.load();
/// Thread 3:
/// r3 = y.load();
/// r4 = x.load();
/// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
/// seq_cst. But if they are lowered to monotonic accesses, no amount of
/// IR-level fences can prevent it.
/// @{
virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (!getInsertFencesForAtomic())
return nullptr;
if (isAtLeastRelease(Ord) && IsStore)
return Builder.CreateFence(Ord);
else
return nullptr;
}
virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (!getInsertFencesForAtomic())
return nullptr;
if (isAtLeastAcquire(Ord))
return Builder.CreateFence(Ord);
else
return nullptr;
}
/// @}
/// Returns true if the given (atomic) store should be expanded by the
/// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
return false;
}
/// Returns true if arguments should be sign-extended in lib calls.
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
return IsSigned;
}
/// Returns true if the given (atomic) load should be expanded by the
/// IR-level AtomicExpand pass into a load-linked instruction
/// (through emitLoadLinked()).
virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; }
/// Returns how the IR-level AtomicExpand pass should expand the given
/// AtomicRMW, if at all. Default is to never expand.
virtual AtomicRMWExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
return AtomicRMWExpansionKind::None;
}
/// On some platforms, an AtomicRMW that never actually modifies the value
/// (such as fetch_add of 0) can be turned into a fence followed by an
/// atomic load. This may sound useless, but it makes it possible for the
/// processor to keep the cacheline shared, dramatically improving
/// performance. And such idempotent RMWs are useful for implementing some
/// kinds of locks, see for example (justification + benchmarks):
/// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
/// This method tries doing that transformation, returning the atomic load if
/// it succeeds, and nullptr otherwise.
/// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
/// another round of expansion.
virtual LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
return nullptr;
}
/// Returns true if we should normalize
/// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
/// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
/// that it saves us from materializing N0 and N1 in an integer register.
/// Targets that are able to perform and/or on flags should return false here.
virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
EVT VT) const {
// If a target has multiple condition registers, then it likely has logical
// operations on those registers.
if (hasMultipleConditionRegisters())
return false;
// Only do the transform if the value won't be split into multiple
// registers.
LegalizeTypeAction Action = getTypeAction(Context, VT);
return Action != TypeExpandInteger && Action != TypeExpandFloat &&
Action != TypeSplitVector;
}
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
//
protected:
/// Specify how the target extends the result of integer and floating point
/// boolean values from i1 to a wider type. See getBooleanContents.
void setBooleanContents(BooleanContent Ty) {
BooleanContents = Ty;
BooleanFloatContents = Ty;
}
/// Specify how the target extends the result of integer and floating point
/// boolean values from i1 to a wider type. See getBooleanContents.
void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
BooleanContents = IntTy;
BooleanFloatContents = FloatTy;
}
/// Specify how the target extends the result of a vector boolean value from a
/// vector of i1 to a wider type. See getBooleanContents.
void setBooleanVectorContents(BooleanContent Ty) {
BooleanVectorContents = Ty;
}
/// Specify the target scheduling preference.
void setSchedulingPreference(Sched::Preference Pref) {
SchedPreferenceInfo = Pref;
}
/// Indicate whether this target prefers to use _setjmp to implement
/// llvm.setjmp or the version without _. Defaults to false.
void setUseUnderscoreSetJmp(bool Val) {
UseUnderscoreSetJmp = Val;
}
/// Indicate whether this target prefers to use _longjmp to implement
/// llvm.longjmp or the version without _. Defaults to false.
void setUseUnderscoreLongJmp(bool Val) {
UseUnderscoreLongJmp = Val;
}
/// Indicate the number of blocks to generate jump tables rather than if
/// sequence.
void setMinimumJumpTableEntries(int Val) {
MinimumJumpTableEntries = Val;
}
/// If set to a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
void setStackPointerRegisterToSaveRestore(unsigned R) {
StackPointerRegisterToSaveRestore = R;
}
/// If set to a physical register, this sets the register that receives the
/// exception address on entry to a landing pad.
void setExceptionPointerRegister(unsigned R) {
ExceptionPointerRegister = R;
}
/// If set to a physical register, this sets the register that receives the
/// exception typeid on entry to a landing pad.
void setExceptionSelectorRegister(unsigned R) {
ExceptionSelectorRegister = R;
}
/// Tells the code generator not to expand operations into sequences that use
/// the select operations if possible.
void setSelectIsExpensive(bool isExpensive = true) {
SelectIsExpensive = isExpensive;
}
/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
/// registers, the code generator will not aggressively sink comparisons into
/// the blocks of their users.
void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
HasMultipleConditionRegisters = hasManyRegs;
}
/// Tells the code generator that the target has BitExtract instructions.
/// The code generator will aggressively sink "shift"s into the blocks of
/// their users if the users will generate "and" instructions which can be
/// combined with "shift" to BitExtract instructions.
void setHasExtractBitsInsn(bool hasExtractInsn = true) {
HasExtractBitsInsn = hasExtractInsn;
}
/// Tells the code generator not to expand logic operations on comparison
/// predicates into separate sequences that increase the amount of flow
/// control.
void setJumpIsExpensive(bool isExpensive = true);
/// Tells the code generator that integer divide is expensive, and if
/// possible, should be replaced by an alternate sequence of instructions not
/// containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
/// Tells the code generator that fsqrt is cheap, and should not be replaced
/// with an alternative sequence of instructions.
void setFsqrtIsCheap(bool isCheap = true) { FsqrtIsCheap = isCheap; }
/// Tells the code generator that this target supports floating point
/// exceptions and cares about preserving floating point exception behavior.
void setHasFloatingPointExceptions(bool FPExceptions = true) {
HasFloatingPointExceptions = FPExceptions;
}
/// Tells the code generator which bitwidths to bypass.
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
}
/// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
/// signed divide by power of two; let the target handle it.
void setPow2SDivIsCheap(bool isCheap = true) { Pow2SDivIsCheap = isCheap; }
/// Add the specified register class as an available regclass for the
/// specified value type. This indicates the selector can handle values of
/// that class natively.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.SimpleTy] = RC;
}
/// Remove all register classes.
void clearRegisterClasses() {
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
AvailableRegClasses.clear();
}
/// \brief Remove all operation actions.
void clearOperationActions() {
}
/// Return the largest legal super-reg register class of the register class
/// for the specified type and its associated "cost".
virtual std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
/// Once all of the register classes are added, this allows us to compute
/// derived properties we expose.
void computeRegisterProperties(const TargetRegisterInfo *TRI);
/// Indicate that the specified operation does not work with the specified
/// type and indicate what to do about it.
void setOperationAction(unsigned Op, MVT VT,
LegalizeAction Action) {
assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
}
/// Indicate that the specified load with extension does not work with the
/// specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
LegalizeAction Action) {
assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
MemVT.isValid() && "Table isn't big enough!");
LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy][ExtType] = (uint8_t)Action;
}
/// Indicate that the specified truncating store does not work with the
/// specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT,
LegalizeAction Action) {
assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
}
/// Indicate that the specified indexed load does or does not work with the
/// specified type and indicate what to do abort it.
///
/// NOTE: All indexed mode loads are initialized to Expand in
/// TargetLowering.cpp
void setIndexedLoadAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
(unsigned)Action < 0xf && "Table isn't big enough!");
// Load action are kept in the upper half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
}
/// Indicate that the specified indexed store does or does not work with the
/// specified type and indicate what to do about it.
///
/// NOTE: All indexed mode stores are initialized to Expand in
/// TargetLowering.cpp
void setIndexedStoreAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
(unsigned)Action < 0xf && "Table isn't big enough!");
// Store action are kept in the lower half.
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
}
/// Indicate that the specified condition code is or isn't supported on the
/// target and indicate what to do about it.
void setCondCodeAction(ISD::CondCode CC, MVT VT,
LegalizeAction Action) {
assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
/// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
/// value and the upper 27 bits index into the second dimension of the array
/// to select what 32-bit value to use.
uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
}
/// If Opc/OrigVT is specified as being promoted, the promotion code defaults
/// to trying a larger integer/fp until it can find one that works. If that
/// default is insufficient, this method can be used by the target to override
/// the default.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
}
/// Targets should invoke this method for each target independent node that
/// they want to provide a custom DAG combiner for by implementing the
/// PerformDAGCombine virtual method.
void setTargetDAGCombine(ISD::NodeType NT) {
assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
}
/// Set the target's required jmp_buf buffer size (in bytes); default is 200
void setJumpBufSize(unsigned Size) {
JumpBufSize = Size;
}
/// Set the target's required jmp_buf buffer alignment (in bytes); default is
/// 0
void setJumpBufAlignment(unsigned Align) {
JumpBufAlignment = Align;
}
/// Set the target's minimum function alignment (in log2(bytes))
void setMinFunctionAlignment(unsigned Align) {
MinFunctionAlignment = Align;
}
/// Set the target's preferred function alignment. This should be set if
/// there is a performance benefit to higher-than-minimum alignment (in
/// log2(bytes))
void setPrefFunctionAlignment(unsigned Align) {
PrefFunctionAlignment = Align;
}
/// Set the target's preferred loop alignment. Default alignment is zero, it
/// means the target does not care about loop alignment. The alignment is
/// specified in log2(bytes). The target may also override
/// getPrefLoopAlignment to provide per-loop values.
void setPrefLoopAlignment(unsigned Align) {
PrefLoopAlignment = Align;
}
/// Set the minimum stack alignment of an argument (in log2(bytes)).
void setMinStackArgumentAlignment(unsigned Align) {
MinStackArgumentAlignment = Align;
}
/// Set if the DAG builder should automatically insert fences and reduce the
/// order of atomic memory operations to Monotonic.
void setInsertFencesForAtomic(bool fence) {
InsertFencesForAtomic = fence;
}
public:
//===--------------------------------------------------------------------===//
// Addressing mode description hooks (used by LSR etc).
//
/// CodeGenPrepare sinks address calculations into the same BB as Load/Store
/// instructions reading the address. This allows as much computation as
/// possible to be done in the address mode for that operand. This hook lets
/// targets also pass back when this should be done on intrinsics which
/// load/store.
virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
SmallVectorImpl<Value*> &/*Ops*/,
Type *&/*AccessTy*/,
unsigned AddrSpace = 0) const {
return false;
}
/// This represents an addressing mode of:
/// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
/// If BaseGV is null, there is no BaseGV.
/// If BaseOffs is zero, there is no base offset.
/// If HasBaseReg is false, there is no base register.
/// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
/// no scale.
struct AddrMode {
GlobalValue *BaseGV;
int64_t BaseOffs;
bool HasBaseReg;
int64_t Scale;
AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
/// Return true if the addressing mode represented by AM is legal for this
/// target, for a load/store of the specified type.
///
/// The type may be VoidTy, in which case only return true if the addressing
/// mode is legal for a load/store of any legal type. TODO: Handle
/// pre/postinc as well.
///
/// If the address space cannot be determined, it will be -1.
///
/// TODO: Remove default argument
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
Type *Ty, unsigned AddrSpace) const;
/// \brief Return the cost of the scaling factor used in the addressing mode
/// represented by AM for this target, for a load/store of the specified type.
///
/// If the AM is supported, the return value must be >= 0.
/// If the AM is not supported, it returns a negative value.
/// TODO: Handle pre/postinc as well.
/// TODO: Remove default argument
virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
Type *Ty, unsigned AS = 0) const {
// Default: assume that any scaling factor used in a legal AM is free.
if (isLegalAddressingMode(DL, AM, Ty, AS))
return 0;
return -1;
}
/// Return true if the specified immediate is legal icmp immediate, that is
/// the target has icmp instructions which can compare a register against the
/// immediate without having to materialize the immediate into a register.
virtual bool isLegalICmpImmediate(int64_t) const {
return true;
}
/// Return true if the specified immediate is legal add immediate, that is the
/// target has add instructions which can add a register with the immediate
/// without having to materialize the immediate into a register.
virtual bool isLegalAddImmediate(int64_t) const {
return true;
}
/// Return true if it's significantly cheaper to shift a vector by a uniform
/// scalar than by an amount which will vary across each lane. On x86, for
/// example, there is a "psllw" instruction for the former case, but no simple
/// instruction for a general "a << b" operation on vectors.
virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
return false;
}
/// Return true if it's free to truncate a value of type Ty1 to type
/// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
/// by referencing its sub-register AX.
virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
return false;
}
/// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
/// whether a call is in tail position. Typically this means that both results
/// would be assigned to the same register or stack slot, but it could mean
/// the target performs adequate checks of its own before proceeding with the
/// tail call.
virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
return false;
}
virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
return false;
}
virtual bool isProfitableToHoist(Instruction *I) const { return true; }
/// Return true if the extension represented by \p I is free.
/// Unlikely the is[Z|FP]ExtFree family which is based on types,
/// this method can use the context provided by \p I to decide
/// whether or not \p I is free.
/// This method extends the behavior of the is[Z|FP]ExtFree family.
/// In other words, if is[Z|FP]Free returns true, then this method
/// returns true as well. The converse is not true.
/// The target can perform the adequate checks by overriding isExtFreeImpl.
/// \pre \p I must be a sign, zero, or fp extension.
bool isExtFree(const Instruction *I) const {
switch (I->getOpcode()) {
case Instruction::FPExt:
if (isFPExtFree(EVT::getEVT(I->getType())))
return true;
break;
case Instruction::ZExt:
if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
return true;
break;
case Instruction::SExt:
break;
default:
llvm_unreachable("Instruction is not an extension");
}
return isExtFreeImpl(I);
}
/// Return true if any actual instruction that defines a value of type Ty1
/// implicitly zero-extends the value to Ty2 in the result register.
///
/// This does not necessarily include registers defined in unknown ways, such
/// as incoming arguments, or copies from unknown virtual registers. Also, if
/// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
/// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
/// values implicit zero-extend the result out to 64 bits.
virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
return false;
}
virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
return false;
}
/// Return true if the target supplies and combines to a paired load
/// two loaded values of type LoadedType next to each other in memory.
/// RequiredAlignment gives the minimal alignment constraints that must be met
/// to be able to select this paired load.
///
/// This information is *not* used to generate actual paired loads, but it is
/// used to generate a sequence of loads that is easier to combine into a
/// paired load.
/// For instance, something like this:
/// a = load i64* addr
/// b = trunc i64 a to i32
/// c = lshr i64 a, 32
/// d = trunc i64 c to i32
/// will be optimized into:
/// b = load i32* addr1
/// d = load i32* addr2
/// Where addr1 = addr2 +/- sizeof(i32).
///
/// In other words, unless the target performs a post-isel load combining,
/// this information should not be provided because it will generate more
/// loads.
virtual bool hasPairedLoad(Type * /*LoadedType*/,
unsigned & /*RequiredAligment*/) const {
return false;
}
virtual bool hasPairedLoad(EVT /*LoadedType*/,
unsigned & /*RequiredAligment*/) const {
return false;
}
/// \brief Get the maximum supported factor for interleaved memory accesses.
/// Default to be the minimum interleave factor: 2.
virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
/// \brief Lower an interleaved load to target specific intrinsics. Return
/// true on success.
///
/// \p LI is the vector load instruction.
/// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
/// \p Indices is the corresponding indices for each shufflevector.
/// \p Factor is the interleave factor.
virtual bool lowerInterleavedLoad(LoadInst *LI,
ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices,
unsigned Factor) const {
return false;
}
/// \brief Lower an interleaved store to target specific intrinsics. Return
/// true on success.
///
/// \p SI is the vector store instruction.
/// \p SVI is the shufflevector to RE-interleave the stored vector.
/// \p Factor is the interleave factor.
virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
unsigned Factor) const {
return false;
}
/// Return true if zero-extending the specific node Val to type VT2 is free
/// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
/// because it's folded such as X86 zero-extending loads).
virtual bool isZExtFree(SDValue Val, EVT VT2) const {
return isZExtFree(Val.getValueType(), VT2);
}
/// Return true if an fpext operation is free (for instance, because
/// single-precision floating-point numbers are implicitly extended to
/// double-precision).
virtual bool isFPExtFree(EVT VT) const {
assert(VT.isFloatingPoint());
return false;
}
/// Return true if folding a vector load into ExtVal (a sign, zero, or any
/// extend node) is profitable.
virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
/// Return true if an fneg operation is free to the point where it is never
/// worthwhile to replace it with a bitwise operation.
virtual bool isFNegFree(EVT VT) const {
assert(VT.isFloatingPoint());
return false;
}
/// Return true if an fabs operation is free to the point where it is never
/// worthwhile to replace it with a bitwise operation.
virtual bool isFAbsFree(EVT VT) const {
assert(VT.isFloatingPoint());
return false;
}
/// Return true if an FMA operation is faster than a pair of fmul and fadd
/// instructions. fmuladd intrinsics will be expanded to FMAs when this method
/// returns true, otherwise fmuladd is expanded to fmul + fadd.
///
/// NOTE: This may be called before legalization on types for which FMAs are
/// not legal, but should return true if those types will eventually legalize
/// to types that support FMAs. After legalization, it will only be called on
/// types that support FMAs (via Legal or Custom actions)
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
return false;
}
/// Return true if it's profitable to narrow operations of type VT1 to
/// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
/// i32 to i16.
virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
return false;
}
/// \brief Return true if it is beneficial to convert a load of a constant to
/// just the constant itself.
/// On some targets it might be more efficient to use a combination of
/// arithmetic instructions to materialize the constant instead of loading it
/// from a constant pool.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const {
return false;
}
/// Return true if EXTRACT_SUBVECTOR is cheap for this result type
/// with this index. This is needed because EXTRACT_SUBVECTOR usually
/// has custom lowering that depends on the index of the first element,
/// and only the target knows which lowering is cheap.
virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
return false;
}
//===--------------------------------------------------------------------===//
// Runtime Library hooks
//
/// Rename the default libcall routine name for the specified libcall.
void setLibcallName(RTLIB::Libcall Call, const char *Name) {
LibcallRoutineNames[Call] = Name;
}
/// Get the libcall routine name for the specified libcall.
const char *getLibcallName(RTLIB::Libcall Call) const {
return LibcallRoutineNames[Call];
}
/// Override the default CondCode to be used to test the result of the
/// comparison libcall against zero.
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
CmpLibcallCCs[Call] = CC;
}
/// Get the CondCode that's to be used to test the result of the comparison
/// libcall against zero.
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
return CmpLibcallCCs[Call];
}
/// Set the CallingConv that should be used for the specified libcall.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
LibcallCallingConvs[Call] = CC;
}
/// Get the CallingConv that should be used for the specified libcall.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
return LibcallCallingConvs[Call];
}
private:
const TargetMachine &TM;
/// Tells the code generator not to expand operations into sequences that use
/// the select operations if possible.
bool SelectIsExpensive;
/// Tells the code generator that the target has multiple (allocatable)
/// condition registers that can be used to store the results of comparisons
/// for use by selects and conditional branches. With multiple condition
/// registers, the code generator will not aggressively sink comparisons into
/// the blocks of their users.
bool HasMultipleConditionRegisters;
/// Tells the code generator that the target has BitExtract instructions.
/// The code generator will aggressively sink "shift"s into the blocks of
/// their users if the users will generate "and" instructions which can be
/// combined with "shift" to BitExtract instructions.
bool HasExtractBitsInsn;
/// Tells the code generator not to expand integer divides by constants into a
/// sequence of muls, adds, and shifts. This is a hack until a real cost
/// model is in place. If we ever optimize for size, this will be set to true
/// unconditionally.
bool IntDivIsCheap;
// Don't expand fsqrt with an approximation based on the inverse sqrt.
bool FsqrtIsCheap;
/// Tells the code generator to bypass slow divide or remainder
/// instructions. For example, BypassSlowDivWidths[32,8] tells the code
/// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
/// div/rem when the operands are positive and less than 256.
DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
/// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
/// signed divide by power of two; let the target handle it.
bool Pow2SDivIsCheap;
/// Tells the code generator that it shouldn't generate extra flow control
/// instructions and should attempt to combine flow control instructions via
/// predication.
bool JumpIsExpensive;
/// Whether the target supports or cares about preserving floating point
/// exception behavior.
bool HasFloatingPointExceptions;
/// This target prefers to use _setjmp to implement llvm.setjmp.
///
/// Defaults to false.
bool UseUnderscoreSetJmp;
/// This target prefers to use _longjmp to implement llvm.longjmp.
///
/// Defaults to false.
bool UseUnderscoreLongJmp;
/// Number of blocks threshold to use jump tables.
int MinimumJumpTableEntries;
/// Information about the contents of the high-bits in boolean values held in
/// a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
/// Information about the contents of the high-bits in boolean values held in
/// a type wider than i1. See getBooleanContents.
BooleanContent BooleanFloatContents;
/// Information about the contents of the high-bits in boolean vector values
/// when the element type is wider than i1. See getBooleanContents.
BooleanContent BooleanVectorContents;
/// The target scheduling preference: shortest possible total cycles or lowest
/// register usage.
Sched::Preference SchedPreferenceInfo;
/// The size, in bytes, of the target's jmp_buf buffers
unsigned JumpBufSize;
/// The alignment, in bytes, of the target's jmp_buf buffers
unsigned JumpBufAlignment;
/// The minimum alignment that any argument on the stack needs to have.
unsigned MinStackArgumentAlignment;
/// The minimum function alignment (used when optimizing for size, and to
/// prevent explicitly provided alignment from leading to incorrect code).
unsigned MinFunctionAlignment;
/// The preferred function alignment (used when alignment unspecified and
/// optimizing for speed).
unsigned PrefFunctionAlignment;
/// The preferred loop alignment.
unsigned PrefLoopAlignment;
/// Whether the DAG builder should automatically insert fences and reduce
/// ordering for atomics. (This will be set for for most architectures with
/// weak memory ordering.)
bool InsertFencesForAtomic;
/// If set to a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
unsigned StackPointerRegisterToSaveRestore;
/// If set to a physical register, this specifies the register that receives
/// the exception address on entry to a landing pad.
unsigned ExceptionPointerRegister;
/// If set to a physical register, this specifies the register that receives
/// the exception typeid on entry to a landing pad.
unsigned ExceptionSelectorRegister;
/// This indicates the default register class to use for each ValueType the
/// target supports natively.
const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
/// This indicates the "representative" register class to use for each
/// ValueType the target supports natively. This information is used by the
/// scheduler to track register pressure. By default, the representative
/// register class is the largest legal super-reg register class of the
/// register class of the specified type. e.g. On x86, i8, i16, and i32's
/// representative class would be GR32.
const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
/// This indicates the "cost" of the "representative" register class for each
/// ValueType. The cost is used by the scheduler to approximate register
/// pressure.
uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
/// For any value types we are promoting or expanding, this contains the value
/// type that we are changing to. For Expanded types, this contains one step
/// of the expand (e.g. i64 -> i32), even if there are multiple steps required
/// (e.g. i64 -> i16). For types natively supported by the system, this holds
/// the same type (e.g. i32 -> i32).
MVT TransformToType[MVT::LAST_VALUETYPE];
/// For each operation and each value type, keep a LegalizeAction that
/// indicates how instruction selection should deal with the operation. Most
/// operations are Legal (aka, supported natively by the target), but
/// operations that are not should be described. Note that operations on
/// non-legal value types are not described here.
uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
/// For each load extension type and each value type, keep a LegalizeAction
/// that indicates how instruction selection should deal with a load of a
/// specific value type and extension type.
uint8_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]
[ISD::LAST_LOADEXT_TYPE];
/// For each value type pair keep a LegalizeAction that indicates whether a
/// truncating store of a specific value type and truncating type is legal.
uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
/// For each indexed mode and each value type, keep a pair of LegalizeAction
/// that indicates how instruction selection should deal with the load /
/// store.
///
/// The first dimension is the value_type for the reference. The second
/// dimension represents the various modes for load store.
uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
/// For each condition code (ISD::CondCode) keep a LegalizeAction that
/// indicates how instruction selection should deal with the condition code.
///
/// Because each CC action takes up 2 bits, we need to have the array size be
/// large enough to fit all of the value types. This can be done by rounding
/// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
ValueTypeActionImpl ValueTypeActions;
private:
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
private:
std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
/// Targets can specify ISD nodes that they would like PerformDAGCombine
/// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
/// array.
unsigned char
TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
/// For operations that must be promoted to a specific type, this holds the
/// destination type. This map should be sparse, so don't hold it as an
/// array.
///
/// Targets add entries to this map with AddPromotedToType(..), clients access
/// this with getTypeToPromoteTo(..).
std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
PromoteToType;
/// Stores the name each libcall.
const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
/// The ISD::CondCode that should be used to test the result of each of the
/// comparison libcall against zero.
ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
/// Stores the CallingConv that should be used for each libcall.
CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
protected:
/// Return true if the extension represented by \p I is free.
/// \pre \p I is a sign, zero, or fp extension and
/// is[Z|FP]ExtFree of the related types is not true.
virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
/// \brief Specify maximum number of store instructions per memset call.
///
/// When lowering \@llvm.memset this field specifies the maximum number of
/// store operations that may be substituted for the call to memset. Targets
/// must set this value based on the cost threshold for that target. Targets
/// should assume that the memset will be done using as many of the largest
/// store operations first, followed by smaller ones, if necessary, per
/// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
/// with 16-bit alignment would result in four 2-byte stores and one 1-byte
/// store. This only applies to setting a constant array of a constant size.
unsigned MaxStoresPerMemset;
/// Maximum number of stores operations that may be substituted for the call
/// to memset, used for functions with OptSize attribute.
unsigned MaxStoresPerMemsetOptSize;
/// \brief Specify maximum bytes of store instructions per memcpy call.
///
/// When lowering \@llvm.memcpy this field specifies the maximum number of
/// store operations that may be substituted for a call to memcpy. Targets
/// must set this value based on the cost threshold for that target. Targets
/// should assume that the memcpy will be done using as many of the largest
/// store operations first, followed by smaller ones, if necessary, per
/// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
/// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
/// and one 1-byte store. This only applies to copying a constant array of
/// constant size.
unsigned MaxStoresPerMemcpy;
/// Maximum number of store operations that may be substituted for a call to
/// memcpy, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize;
/// \brief Specify maximum bytes of store instructions per memmove call.
///
/// When lowering \@llvm.memmove this field specifies the maximum number of
/// store instructions that may be substituted for a call to memmove. Targets
/// must set this value based on the cost threshold for that target. Targets
/// should assume that the memmove will be done using as many of the largest
/// store operations first, followed by smaller ones, if necessary, per
/// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
/// with 8-bit alignment would result in nine 1-byte stores. This only
/// applies to copying a constant array of constant size.
unsigned MaxStoresPerMemmove;
/// Maximum number of store instructions that may be substituted for a call to
/// memmove, used for functions with OpSize attribute.
unsigned MaxStoresPerMemmoveOptSize;
/// Tells the code generator that select is more expensive than a branch if
/// the branch is usually predicted right.
bool PredictableSelectIsExpensive;
/// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
/// a mask of a single bit, a compare, and a branch into a single instruction.
bool MaskAndBranchFoldingIsLegal;
/// \see enableExtLdPromotion.
bool EnableExtLdPromotion;
protected:
/// Return true if the value types that can be represented by the specified
/// register class are all legal.
bool isLegalRC(const TargetRegisterClass *RC) const;
/// Replace/modify any TargetFrameIndex operands with a targte-dependent
/// sequence of memory operands that is recognized by PrologEpilogInserter.
MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
MachineBasicBlock *MBB) const;
};
/// This class defines information used to lower LLVM code to legal SelectionDAG
/// operators that the target instruction selector can accept natively.
///
/// This class also defines callbacks that targets must implement to lower
/// target-specific constructs to SelectionDAG operators.
class TargetLowering : public TargetLoweringBase {
TargetLowering(const TargetLowering&) = delete;
void operator=(const TargetLowering&) = delete;
public:
/// NOTE: The TargetMachine owns TLOF.
explicit TargetLowering(const TargetMachine &TM);
/// Returns true by value, base pointer and offset pointer and addressing mode
/// by reference if the node's address can be legally represented as
/// pre-indexed load / store address.
virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
SDValue &/*Offset*/,
ISD::MemIndexedMode &/*AM*/,
SelectionDAG &/*DAG*/) const {
return false;
}
/// Returns true by value, base pointer and offset pointer and addressing mode
/// by reference if this node can be combined with a load / store to form a
/// post-indexed load / store.
virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
SDValue &/*Base*/,
SDValue &/*Offset*/,
ISD::MemIndexedMode &/*AM*/,
SelectionDAG &/*DAG*/) const {
return false;
}
/// Return the entry encoding for a jump table in the current function. The
/// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
virtual unsigned getJumpTableEncoding() const;
virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
MCContext &/*Ctx*/) const {
llvm_unreachable("Need to implement this hook if target has custom JTIs");
}
/// Returns relocation base for the given PIC jumptable.
virtual SDValue getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const;
/// This returns the relocation base for the given PIC jumptable, the same as
/// getPICJumpTableRelocBase, but as an MCExpr.
virtual const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
/// Return true if folding a constant offset with the given GlobalAddress is
/// legal. It is frequently not legal in PIC relocation models.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
SDValue &Chain) const;
void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, SDLoc DL) const;
/// Returns a pair of (return value, chain).
/// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
EVT RetVT, const SDValue *Ops,
unsigned NumOps, bool isSigned,
SDLoc dl, bool doesNotReturn = false,
bool isReturnValueUsed = true) const;
//===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
/// A convenience struct that encapsulates a DAG, and two SDValues for
/// returning information from TargetLowering to its clients that want to
/// combine.
struct TargetLoweringOpt {
SelectionDAG &DAG;
bool LegalTys;
bool LegalOps;
SDValue Old;
SDValue New;
explicit TargetLoweringOpt(SelectionDAG &InDAG,
bool LT, bool LO) :
DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
bool LegalTypes() const { return LegalTys; }
bool LegalOperations() const { return LegalOps; }
bool CombineTo(SDValue O, SDValue N) {
Old = O;
New = N;
return true;
}
/// Check to see if the specified operand of the specified instruction is a
/// constant integer. If so, check to see if there are any bits set in the
/// constant that are not demanded. If so, shrink the constant and return
/// true.
bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
/// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
/// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
/// generalized for targets with other types of implicit widening casts.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
SDLoc dl);
};
/// Look at Op. At this point, we know that only the DemandedMask bits of the
/// result of Op are ever used downstream. If we can use this information to
/// simplify Op, create a new simplified DAG node and return true, returning
/// the original and new nodes in Old and New. Otherwise, analyze the
/// expression and return a mask of KnownOne and KnownZero bits for the
/// expression (used to simplify the caller). The KnownZero/One bits may only
/// be accurate for those bits in the DemandedMask.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
APInt &KnownZero, APInt &KnownOne,
TargetLoweringOpt &TLO, unsigned Depth = 0) const;
/// Determine which of the bits specified in Mask are known to be either zero
/// or one and return them in the KnownZero/KnownOne bitsets.
virtual void computeKnownBitsForTargetNode(const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
/// This method can be implemented by targets that want to expose additional
/// information about sign bits to the DAG Combiner.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
CombineLevel Level;
bool CalledByLegalizer;
public:
SelectionDAG &DAG;
DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
: DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
bool isAfterLegalizeVectorOps() const {
return Level == AfterLegalizeDAG;
}
CombineLevel getDAGCombineLevel() { return Level; }
bool isCalledByLegalizer() const { return CalledByLegalizer; }
void AddToWorklist(SDNode *N);
void RemoveFromWorklist(SDNode *N);
SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
};
/// Return if the N is a constant or constant vector equal to the true value
/// from getBooleanContents().
bool isConstTrueVal(const SDNode *N) const;
/// Return if the N is a constant or constant vector equal to the false value
/// from getBooleanContents().
bool isConstFalseVal(const SDNode *N) const;
/// Try to simplify a setcc built with the specified operands and cc. If it is
/// unable to simplify it, return a null SDValue.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
DAGCombinerInfo &DCI, SDLoc dl) const;
/// Returns true (and the GlobalValue and the offset) if the node is a
/// GlobalAddress + offset.
virtual bool
isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
/// This method will be invoked for all target nodes and for any
/// target-independent nodes that the target has registered with invoke it
/// for.
///
/// The semantics are as follows:
/// Return Value:
/// SDValue.Val == 0 - No change was made
/// SDValue.Val == N - N was replaced, is dead, and is already handled.
/// otherwise - N should be replaced by the returned Operand.
///
/// In addition, methods provided by DAGCombinerInfo may be used to perform
/// more complex transformations.
///
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
/// Return true if it is profitable to move a following shift through this
// node, adjusting any immediate operands as necessary to preserve semantics.
// This transformation may not be desirable if it disrupts a particularly
// auspicious target-specific tree (e.g. bitfield extraction in AArch64).
// By default, it returns true.
virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
return true;
}
/// Return true if the target has native support for the specified value type
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
/// and some i16 instructions are slow.
virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
// By default, assume all legal types are desirable.
return isTypeLegal(VT);
}
/// Return true if it is profitable for dag combiner to transform a floating
/// point op of specified opcode to a equivalent op of an integer
/// type. e.g. f32 load -> i32 load can be profitable on ARM.
virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
EVT /*VT*/) const {
return false;
}
/// This method query the target whether it is beneficial for dag combiner to
/// promote the specified node. If true, it should return the desired
/// promotion type by reference.
virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
return false;
}
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
// the SelectionDAGBuilder code knows how to lower these.
//
/// This hook must be implemented to lower the incoming (formal) arguments,
/// described by the Ins array, into the specified DAG. The implementation
/// should fill in the InVals array with legal-type argument values, and
/// return the resulting token chain value.
///
virtual SDValue
LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
bool /*isVarArg*/,
const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
SDLoc /*dl*/, SelectionDAG &/*DAG*/,
SmallVectorImpl<SDValue> &/*InVals*/) const {
llvm_unreachable("Not Implemented");
}
struct ArgListEntry {
SDValue Node;
Type* Ty;
bool isSExt : 1;
bool isZExt : 1;
bool isInReg : 1;
bool isSRet : 1;
bool isNest : 1;
bool isByVal : 1;
bool isInAlloca : 1;
bool isReturned : 1;
uint16_t Alignment;
ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
isReturned(false), Alignment(0) { }
void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
};
typedef std::vector<ArgListEntry> ArgListTy;
/// This structure contains all information that is necessary for lowering
/// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
/// needs to lower a call, and targets will see this struct in their LowerCall
/// implementation.
struct CallLoweringInfo {
SDValue Chain;
Type *RetTy;
bool RetSExt : 1;
bool RetZExt : 1;
bool IsVarArg : 1;
bool IsInReg : 1;
bool DoesNotReturn : 1;
bool IsReturnValueUsed : 1;
// IsTailCall should be modified by implementations of
// TargetLowering::LowerCall that perform tail call conversions.
bool IsTailCall;
unsigned NumFixedArgs;
CallingConv::ID CallConv;
SDValue Callee;
ArgListTy Args;
SelectionDAG &DAG;
SDLoc DL;
ImmutableCallSite *CS;
bool IsPatchPoint;
SmallVector<ISD::OutputArg, 32> Outs;
SmallVector<SDValue, 32> OutVals;
SmallVector<ISD::InputArg, 32> Ins;
CallLoweringInfo(SelectionDAG &DAG)
: RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
DAG(DAG), CS(nullptr), IsPatchPoint(false) {}
CallLoweringInfo &setDebugLoc(SDLoc dl) {
DL = dl;
return *this;
}
CallLoweringInfo &setChain(SDValue InChain) {
Chain = InChain;
return *this;
}
CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
SDValue Target, ArgListTy &&ArgsList,
unsigned FixedArgs = -1) {
RetTy = ResultType;
Callee = Target;
CallConv = CC;
NumFixedArgs =
(FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
Args = std::move(ArgsList);
return *this;
}
CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
SDValue Target, ArgListTy &&ArgsList,
ImmutableCallSite &Call) {
RetTy = ResultType;
IsInReg = Call.paramHasAttr(0, Attribute::InReg);
DoesNotReturn = Call.doesNotReturn();
IsVarArg = FTy->isVarArg();
IsReturnValueUsed = !Call.getInstruction()->use_empty();
RetSExt = Call.paramHasAttr(0, Attribute::SExt);
RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
Callee = Target;
CallConv = Call.getCallingConv();
NumFixedArgs = FTy->getNumParams();
Args = std::move(ArgsList);
CS = &Call;
return *this;
}
CallLoweringInfo &setInRegister(bool Value = true) {
IsInReg = Value;
return *this;
}
CallLoweringInfo &setNoReturn(bool Value = true) {
DoesNotReturn = Value;
return *this;
}
CallLoweringInfo &setVarArg(bool Value = true) {
IsVarArg = Value;
return *this;
}
CallLoweringInfo &setTailCall(bool Value = true) {
IsTailCall = Value;
return *this;
}
CallLoweringInfo &setDiscardResult(bool Value = true) {
IsReturnValueUsed = !Value;
return *this;
}
CallLoweringInfo &setSExtResult(bool Value = true) {
RetSExt = Value;
return *this;
}
CallLoweringInfo &setZExtResult(bool Value = true) {
RetZExt = Value;
return *this;
}
CallLoweringInfo &setIsPatchPoint(bool Value = true) {
IsPatchPoint = Value;
return *this;
}
ArgListTy &getArgs() {
return Args;
}
};
/// This function lowers an abstract call to a function into an actual call.
/// This returns a pair of operands. The first element is the return value
/// for the function (if RetTy is not VoidTy). The second element is the
/// outgoing token chain. It calls LowerCall to do the actual lowering.
std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
/// This hook must be implemented to lower calls into the specified
/// DAG. The outgoing arguments to the call are described by the Outs array,
/// and the values to be returned by the call are described by the Ins
/// array. The implementation should fill in the InVals array with legal-type
/// return values from the call, and return the resulting token chain value.
virtual SDValue
LowerCall(CallLoweringInfo &/*CLI*/,
SmallVectorImpl<SDValue> &/*InVals*/) const {
llvm_unreachable("Not Implemented");
}
/// Target-specific cleanup for formal ByVal parameters.
virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
/// This hook should be implemented to check whether the return values
/// described by the Outs array can fit into the return registers. If false
/// is returned, an sret-demotion is performed.
virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
MachineFunction &/*MF*/, bool /*isVarArg*/,
const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
LLVMContext &/*Context*/) const
{
// Return true by default to get preexisting behavior.
return true;
}
/// This hook must be implemented to lower outgoing return values, described
/// by the Outs array, into the specified DAG. The implementation should
/// return the resulting token chain value.
virtual SDValue
LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
bool /*isVarArg*/,
const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
const SmallVectorImpl<SDValue> &/*OutVals*/,
SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
llvm_unreachable("Not Implemented");
}
/// Return true if result of the specified node is used by a return node
/// only. It also compute and return the input chain for the tail call.
///
/// This is used to determine whether it is possible to codegen a libcall as
/// tail call at legalization time.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
return false;
}
/// Return true if the target may be able emit the call instruction as a tail
/// call. This is used by optimization passes to determine if it's profitable
/// to duplicate return instructions to enable tailcall optimization.
virtual bool mayBeEmittedAsTailCall(CallInst *) const {
return false;
}
/// Return the builtin name for the __builtin___clear_cache intrinsic
/// Default is to invoke the clear cache library call
virtual const char * getClearCacheBuiltinName() const {
return "__clear_cache";
}
/// Return the register ID of the name passed in. Used by named register
/// global variables extension. There is no target-independent behaviour
/// so the default action is to bail.
virtual unsigned getRegisterByName(const char* RegName, EVT VT,
SelectionDAG &DAG) const {
report_fatal_error("Named registers not implemented for this target");
}
/// Return the type that should be used to zero or sign extend a
/// zeroext/signext integer argument or return value. FIXME: Most C calling
/// convention requires the return type to be promoted, but this is not true
/// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
/// calling conventions. The frontend should handle this and include all of
/// the necessary information.
virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
ISD::NodeType /*ExtendKind*/) const {
EVT MinVT = getRegisterType(Context, MVT::i32);
return VT.bitsLT(MinVT) ? MinVT : VT;
}
/// For some targets, an LLVM struct type must be broken down into multiple
/// simple types, but the calling convention specifies that the entire struct
/// must be passed in a block of consecutive registers.
virtual bool
functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
bool isVarArg) const {
return false;
}
/// Returns a 0 terminated array of registers that can be safely used as
/// scratch registers.
virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
return nullptr;
}
/// This callback is used to prepare for a volatile or atomic load.
/// It takes a chain node as input and returns the chain for the load itself.
///
/// Having a callback like this is necessary for targets like SystemZ,
/// which allows a CPU to reuse the result of a previous load indefinitely,
/// even if a cache-coherent store is performed by another CPU. The default
/// implementation does nothing.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
SelectionDAG &DAG) const {
return Chain;
}
/// This callback is invoked by the type legalizer to legalize nodes with an
/// illegal operand type but legal result types. It replaces the
/// LowerOperation callback in the type Legalizer. The reason we can not do
/// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
/// use this callback.
///
/// TODO: Consider merging with ReplaceNodeResults.
///
/// The target places new result values for the node in Results (their number
/// and types must exactly match those of the original return values of
/// the node), or leaves Results empty, which indicates that the node is not
/// to be custom lowered after all.
/// The default implementation calls LowerOperation.
virtual void LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
/// This callback is invoked for operations that are unsupported by the
/// target, which are registered to use 'custom' lowering, and whose defined
/// values are all legal. If the target has no operations that require custom
/// lowering, it need not implement this. The default implementation of this
/// aborts.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
/// This callback is invoked when a node result type is illegal for the
/// target, and the operation was registered to use 'custom' lowering for that
/// result type. The target places new result values for the node in Results
/// (their number and types must exactly match those of the original return
/// values of the node), or leaves Results empty, which indicates that the
/// node is not to be custom lowered after all.
///
/// If the target has no operations that require custom lowering, it need not
/// implement this. The default implementation aborts.
virtual void ReplaceNodeResults(SDNode * /*N*/,
SmallVectorImpl<SDValue> &/*Results*/,
SelectionDAG &/*DAG*/) const {
llvm_unreachable("ReplaceNodeResults not implemented for this target!");
}
/// This method returns the name of a target specific DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// This method returns a target specific FastISel object, or null if the
/// target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &,
const TargetLibraryInfo *) const {
return nullptr;
}
bool verifyReturnAddressArgumentIsConstant(SDValue Op,
SelectionDAG &DAG) const;
//===--------------------------------------------------------------------===//
// Inline Asm Support hooks
//
/// This hook allows the target to expand an inline asm call to be explicit
/// llvm code if it wants to. This is useful for turning simple inline asms
/// into LLVM intrinsics, which gives the compiler more information about the
/// behavior of the code.
virtual bool ExpandInlineAsm(CallInst *) const {
return false;
}
enum ConstraintType {
C_Register, // Constraint represents specific register(s).
C_RegisterClass, // Constraint represents any of register(s) in class.
C_Memory, // Memory constraint.
C_Other, // Something else.
C_Unknown // Unsupported constraint.
};
enum ConstraintWeight {
// Generic weights.
CW_Invalid = -1, // No match.
CW_Okay = 0, // Acceptable.
CW_Good = 1, // Good weight.
CW_Better = 2, // Better weight.
CW_Best = 3, // Best weight.
// Well-known weights.
CW_SpecificReg = CW_Okay, // Specific register operands.
CW_Register = CW_Good, // Register operands.
CW_Memory = CW_Better, // Memory operands.
CW_Constant = CW_Best, // Constant operand.
CW_Default = CW_Okay // Default or don't know type.
};
/// This contains information for each constraint that we are lowering.
struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
/// This contains the actual string for the code, like "m". TargetLowering
/// picks the 'best' code from ConstraintInfo::Codes that most closely
/// matches the operand.
std::string ConstraintCode;
/// Information about the constraint code, e.g. Register, RegisterClass,
/// Memory, Other, Unknown.
TargetLowering::ConstraintType ConstraintType;
/// If this is the result output operand or a clobber, this is null,
/// otherwise it is the incoming operand to the CallInst. This gets
/// modified as the asm is processed.
Value *CallOperandVal;
/// The ValueType for the operand value.
MVT ConstraintVT;
/// Return true of this is an input operand that is a matching constraint
/// like "4".
bool isMatchingInputConstraint() const;
/// If this is an input matching constraint, this method returns the output
/// operand it matches.
unsigned getMatchedOperand() const;
/// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfo(InlineAsm::ConstraintInfo Info)
: InlineAsm::ConstraintInfo(std::move(Info)),
ConstraintType(TargetLowering::C_Unknown), CallOperandVal(nullptr),
ConstraintVT(MVT::Other) {}
};
typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
/// Split up the constraint string from the inline assembly value into the
/// specific constraints and their prefixes, and also tie in the associated
/// operand values. If this returns an empty vector, and if the constraint
/// string itself isn't empty, there was an error parsing.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
const TargetRegisterInfo *TRI,
ImmutableCallSite CS) const;
/// Examine constraint type and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
virtual ConstraintWeight getMultipleConstraintMatchWeight(
AsmOperandInfo &info, int maIndex) const;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
virtual ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
/// Determines the constraint code and constraint type to use for the specific
/// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
/// If the actual operand being passed in is available, it can be passed in as
/// Op, otherwise an empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
SelectionDAG *DAG = nullptr) const;
/// Given a constraint, return the type of constraint it is for this target.
virtual ConstraintType getConstraintType(StringRef Constraint) const;
/// Given a physical register constraint (e.g. {edx}), return the register
/// number and the register class for the register.
///
/// Given a register class constraint, like 'r', if this corresponds directly
/// to an LLVM register class, return a register of 0 and the register class
/// pointer.
///
/// This should only be used for C_Register constraints. On error, this
/// returns a register number of 0 and a null register class pointer.
virtual std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const;
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
if (ConstraintCode == "i")
return InlineAsm::Constraint_i;
else if (ConstraintCode == "m")
return InlineAsm::Constraint_m;
return InlineAsm::Constraint_Unknown;
}
/// Try to replace an X constraint, which matches anything, with another that
/// has more specific requirements based on the type of the corresponding
/// operand. This returns null if there is no replacement to make.
virtual const char *LowerXConstraint(EVT ConstraintVT) const;
/// Lower the specified operand into the Ops vector. If it is invalid, don't
/// add anything to Ops.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
//===--------------------------------------------------------------------===//
// Div utility functions
//
SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
bool IsAfterLegalization,
std::vector<SDNode *> *Created) const;
SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
bool IsAfterLegalization,
std::vector<SDNode *> *Created) const;
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
return SDValue();
}
/// Indicate whether this target prefers to combine the given number of FDIVs
/// with the same divisor.
virtual bool combineRepeatedFPDivisors(unsigned NumUsers) const {
return false;
}
/// Hooks for building estimates in place of slower divisions and square
/// roots.
/// Return a reciprocal square root estimate value for the input operand.
/// The RefinementSteps output is the number of Newton-Raphson refinement
/// iterations required to generate a sufficient (though not necessarily
/// IEEE-754 compliant) estimate for the value type.
/// The boolean UseOneConstNR output is used to select a Newton-Raphson
/// algorithm implementation that uses one constant or two constants.
/// A target may choose to implement its own refinement within this function.
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
unsigned &RefinementSteps,
bool &UseOneConstNR) const {
return SDValue();
}
/// Return a reciprocal estimate value for the input operand.
/// The RefinementSteps output is the number of Newton-Raphson refinement
/// iterations required to generate a sufficient (though not necessarily
/// IEEE-754 compliant) estimate for the value type.
/// A target may choose to implement its own refinement within this function.
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
unsigned &RefinementSteps) const {
return SDValue();
}
//===--------------------------------------------------------------------===//
// Legalization utility functions
//
/// Expand a MUL into two nodes. One that computes the high bits of
/// the result and one that computes the low bits.
/// \param HiLoVT The value type to use for the Lo and Hi nodes.
/// \param LL Low bits of the LHS of the MUL. You can use this parameter
/// if you want to control how low bits are extracted from the LHS.
/// \param LH High bits of the LHS of the MUL. See LL for meaning.
/// \param RL Low bits of the RHS of the MUL. See LL for meaning
/// \param RH High bits of the RHS of the MUL. See LL for meaning.
/// \returns true if the node has been expanded. false if it has not
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
SelectionDAG &DAG, SDValue LL = SDValue(),
SDValue LH = SDValue(), SDValue RL = SDValue(),
SDValue RH = SDValue()) const;
/// Expand float(f32) to SINT(i64) conversion
/// \param N Node to expand
/// \param Result output after conversion
/// \returns True, if the expansion was successful, false otherwise
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
/// This method should be implemented by targets that mark instructions with
/// the 'usesCustomInserter' flag. These instructions are special in various
/// ways, which require special support to insert. The specified MachineInstr
/// is created but not inserted into any basic blocks, and this method is
/// called to expand it into a sequence of instructions, potentially also
/// creating new basic blocks and control flow.
/// As long as the returned basic block is different (i.e., we created a new
/// one), the custom inserter is free to modify the rest of \p MBB.
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
/// This method should be implemented by targets that mark instructions with
/// the 'hasPostISelHook' flag. These instructions must be adjusted after
/// instruction selection by target hooks. e.g. To fill in optional defs for
/// ARM 's' setting instructions.
virtual void
AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
/// If this function returns true, SelectionDAGBuilder emits a
/// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
virtual bool useLoadStackGuardNode() const {
return false;
}
};
/// Given an LLVM IR type and return type attributes, compute the return value
/// EVTs and flags, and optionally also the offsets, if the return value is
/// being lowered to memory.
void GetReturnInfo(Type *ReturnType, AttributeSet attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL);
} // end llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/COFFYAML.h | //===- COFFYAML.h - COFF YAMLIO implementation ------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares classes for handling the YAML representation of COFF.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_COFFYAML_H
#define LLVM_OBJECT_COFFYAML_H
#include "llvm/ADT/Optional.h"
#include "llvm/MC/YAML.h"
#include "llvm/Support/COFF.h"
namespace llvm {
namespace COFF {
inline Characteristics operator|(Characteristics a, Characteristics b) {
uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
return static_cast<Characteristics>(Ret);
}
inline SectionCharacteristics operator|(SectionCharacteristics a,
SectionCharacteristics b) {
uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
return static_cast<SectionCharacteristics>(Ret);
}
inline DLLCharacteristics operator|(DLLCharacteristics a,
DLLCharacteristics b) {
uint16_t Ret = static_cast<uint16_t>(a) | static_cast<uint16_t>(b);
return static_cast<DLLCharacteristics>(Ret);
}
}
// The structure of the yaml files is not an exact 1:1 match to COFF. In order
// to use yaml::IO, we use these structures which are closer to the source.
namespace COFFYAML {
LLVM_YAML_STRONG_TYPEDEF(uint8_t, COMDATType)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, WeakExternalCharacteristics)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, AuxSymbolType)
struct Relocation {
uint32_t VirtualAddress;
uint16_t Type;
StringRef SymbolName;
};
struct Section {
COFF::section Header;
unsigned Alignment;
yaml::BinaryRef SectionData;
std::vector<Relocation> Relocations;
StringRef Name;
Section();
};
struct Symbol {
COFF::symbol Header;
COFF::SymbolBaseType SimpleType;
COFF::SymbolComplexType ComplexType;
Optional<COFF::AuxiliaryFunctionDefinition> FunctionDefinition;
Optional<COFF::AuxiliarybfAndefSymbol> bfAndefSymbol;
Optional<COFF::AuxiliaryWeakExternal> WeakExternal;
StringRef File;
Optional<COFF::AuxiliarySectionDefinition> SectionDefinition;
Optional<COFF::AuxiliaryCLRToken> CLRToken;
StringRef Name;
Symbol();
};
struct PEHeader {
COFF::PE32Header Header;
Optional<COFF::DataDirectory> DataDirectories[COFF::NUM_DATA_DIRECTORIES];
};
struct Object {
Optional<PEHeader> OptionalHeader;
COFF::header Header;
std::vector<Section> Sections;
std::vector<Symbol> Symbols;
Object();
};
}
}
LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Section)
LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Symbol)
LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Relocation)
namespace llvm {
namespace yaml {
template <>
struct ScalarEnumerationTraits<COFFYAML::WeakExternalCharacteristics> {
static void enumeration(IO &IO, COFFYAML::WeakExternalCharacteristics &Value);
};
template <>
struct ScalarEnumerationTraits<COFFYAML::AuxSymbolType> {
static void enumeration(IO &IO, COFFYAML::AuxSymbolType &Value);
};
template <>
struct ScalarEnumerationTraits<COFFYAML::COMDATType> {
static void enumeration(IO &IO, COFFYAML::COMDATType &Value);
};
template <>
struct ScalarEnumerationTraits<COFF::MachineTypes> {
static void enumeration(IO &IO, COFF::MachineTypes &Value);
};
template <>
struct ScalarEnumerationTraits<COFF::SymbolBaseType> {
static void enumeration(IO &IO, COFF::SymbolBaseType &Value);
};
template <>
struct ScalarEnumerationTraits<COFF::SymbolStorageClass> {
static void enumeration(IO &IO, COFF::SymbolStorageClass &Value);
};
template <>
struct ScalarEnumerationTraits<COFF::SymbolComplexType> {
static void enumeration(IO &IO, COFF::SymbolComplexType &Value);
};
template <>
struct ScalarEnumerationTraits<COFF::RelocationTypeI386> {
static void enumeration(IO &IO, COFF::RelocationTypeI386 &Value);
};
template <>
struct ScalarEnumerationTraits<COFF::RelocationTypeAMD64> {
static void enumeration(IO &IO, COFF::RelocationTypeAMD64 &Value);
};
template <>
struct ScalarEnumerationTraits<COFF::WindowsSubsystem> {
static void enumeration(IO &IO, COFF::WindowsSubsystem &Value);
};
template <>
struct ScalarBitSetTraits<COFF::Characteristics> {
static void bitset(IO &IO, COFF::Characteristics &Value);
};
template <>
struct ScalarBitSetTraits<COFF::SectionCharacteristics> {
static void bitset(IO &IO, COFF::SectionCharacteristics &Value);
};
template <>
struct ScalarBitSetTraits<COFF::DLLCharacteristics> {
static void bitset(IO &IO, COFF::DLLCharacteristics &Value);
};
template <>
struct MappingTraits<COFFYAML::Relocation> {
static void mapping(IO &IO, COFFYAML::Relocation &Rel);
};
template <>
struct MappingTraits<COFFYAML::PEHeader> {
static void mapping(IO &IO, COFFYAML::PEHeader &PH);
};
template <>
struct MappingTraits<COFF::DataDirectory> {
static void mapping(IO &IO, COFF::DataDirectory &DD);
};
template <>
struct MappingTraits<COFF::header> {
static void mapping(IO &IO, COFF::header &H);
};
template <> struct MappingTraits<COFF::AuxiliaryFunctionDefinition> {
static void mapping(IO &IO, COFF::AuxiliaryFunctionDefinition &AFD);
};
template <> struct MappingTraits<COFF::AuxiliarybfAndefSymbol> {
static void mapping(IO &IO, COFF::AuxiliarybfAndefSymbol &AAS);
};
template <> struct MappingTraits<COFF::AuxiliaryWeakExternal> {
static void mapping(IO &IO, COFF::AuxiliaryWeakExternal &AWE);
};
template <> struct MappingTraits<COFF::AuxiliarySectionDefinition> {
static void mapping(IO &IO, COFF::AuxiliarySectionDefinition &ASD);
};
template <> struct MappingTraits<COFF::AuxiliaryCLRToken> {
static void mapping(IO &IO, COFF::AuxiliaryCLRToken &ACT);
};
template <>
struct MappingTraits<COFFYAML::Symbol> {
static void mapping(IO &IO, COFFYAML::Symbol &S);
};
template <>
struct MappingTraits<COFFYAML::Section> {
static void mapping(IO &IO, COFFYAML::Section &Sec);
};
template <>
struct MappingTraits<COFFYAML::Object> {
static void mapping(IO &IO, COFFYAML::Object &Obj);
};
} // end namespace yaml
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/MachO.h | //===- MachO.h - MachO object file implementation ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the MachOObjectFile class, which implement the ObjectFile
// interface for MachO files.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_MACHO_H
#define LLVM_OBJECT_MACHO_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/MachO.h"
namespace llvm {
namespace object {
/// DiceRef - This is a value type class that represents a single
/// data in code entry in the table in a Mach-O object file.
class DiceRef {
DataRefImpl DicePimpl;
const ObjectFile *OwningObject;
public:
DiceRef() : OwningObject(nullptr) { }
DiceRef(DataRefImpl DiceP, const ObjectFile *Owner);
bool operator==(const DiceRef &Other) const;
bool operator<(const DiceRef &Other) const;
void moveNext();
std::error_code getOffset(uint32_t &Result) const;
std::error_code getLength(uint16_t &Result) const;
std::error_code getKind(uint16_t &Result) const;
DataRefImpl getRawDataRefImpl() const;
const ObjectFile *getObjectFile() const;
};
typedef content_iterator<DiceRef> dice_iterator;
/// ExportEntry encapsulates the current-state-of-the-walk used when doing a
/// non-recursive walk of the trie data structure. This allows you to iterate
/// across all exported symbols using:
/// for (const llvm::object::ExportEntry &AnExport : Obj->exports()) {
/// }
class ExportEntry {
public:
ExportEntry(ArrayRef<uint8_t> Trie);
StringRef name() const;
uint64_t flags() const;
uint64_t address() const;
uint64_t other() const;
StringRef otherName() const;
uint32_t nodeOffset() const;
bool operator==(const ExportEntry &) const;
void moveNext();
private:
friend class MachOObjectFile;
void moveToFirst();
void moveToEnd();
uint64_t readULEB128(const uint8_t *&p);
void pushDownUntilBottom();
void pushNode(uint64_t Offset);
// Represents a node in the mach-o exports trie.
struct NodeState {
NodeState(const uint8_t *Ptr);
const uint8_t *Start;
const uint8_t *Current;
uint64_t Flags;
uint64_t Address;
uint64_t Other;
const char *ImportName;
unsigned ChildCount;
unsigned NextChildIndex;
unsigned ParentStringLength;
bool IsExportNode;
};
ArrayRef<uint8_t> Trie;
SmallString<256> CumulativeString;
SmallVector<NodeState, 16> Stack;
bool Malformed;
bool Done;
};
typedef content_iterator<ExportEntry> export_iterator;
/// MachORebaseEntry encapsulates the current state in the decompression of
/// rebasing opcodes. This allows you to iterate through the compressed table of
/// rebasing using:
/// for (const llvm::object::MachORebaseEntry &Entry : Obj->rebaseTable()) {
/// }
class MachORebaseEntry {
public:
MachORebaseEntry(ArrayRef<uint8_t> opcodes, bool is64Bit);
uint32_t segmentIndex() const;
uint64_t segmentOffset() const;
StringRef typeName() const;
bool operator==(const MachORebaseEntry &) const;
void moveNext();
private:
friend class MachOObjectFile;
void moveToFirst();
void moveToEnd();
uint64_t readULEB128();
ArrayRef<uint8_t> Opcodes;
const uint8_t *Ptr;
uint64_t SegmentOffset;
uint32_t SegmentIndex;
uint64_t RemainingLoopCount;
uint64_t AdvanceAmount;
uint8_t RebaseType;
uint8_t PointerSize;
bool Malformed;
bool Done;
};
typedef content_iterator<MachORebaseEntry> rebase_iterator;
/// MachOBindEntry encapsulates the current state in the decompression of
/// binding opcodes. This allows you to iterate through the compressed table of
/// bindings using:
/// for (const llvm::object::MachOBindEntry &Entry : Obj->bindTable()) {
/// }
class MachOBindEntry {
public:
enum class Kind { Regular, Lazy, Weak };
MachOBindEntry(ArrayRef<uint8_t> Opcodes, bool is64Bit, MachOBindEntry::Kind);
uint32_t segmentIndex() const;
uint64_t segmentOffset() const;
StringRef typeName() const;
StringRef symbolName() const;
uint32_t flags() const;
int64_t addend() const;
int ordinal() const;
bool operator==(const MachOBindEntry &) const;
void moveNext();
private:
friend class MachOObjectFile;
void moveToFirst();
void moveToEnd();
uint64_t readULEB128();
int64_t readSLEB128();
ArrayRef<uint8_t> Opcodes;
const uint8_t *Ptr;
uint64_t SegmentOffset;
uint32_t SegmentIndex;
StringRef SymbolName;
int Ordinal;
uint32_t Flags;
int64_t Addend;
uint64_t RemainingLoopCount;
uint64_t AdvanceAmount;
uint8_t BindType;
uint8_t PointerSize;
Kind TableKind;
bool Malformed;
bool Done;
};
typedef content_iterator<MachOBindEntry> bind_iterator;
class MachOObjectFile : public ObjectFile {
public:
struct LoadCommandInfo {
const char *Ptr; // Where in memory the load command is.
MachO::load_command C; // The command itself.
};
typedef SmallVector<LoadCommandInfo, 4> LoadCommandList;
typedef LoadCommandList::const_iterator load_command_iterator;
MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
std::error_code &EC);
void moveSymbolNext(DataRefImpl &Symb) const override;
uint64_t getNValue(DataRefImpl Sym) const;
ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const override;
// MachO specific.
std::error_code getIndirectName(DataRefImpl Symb, StringRef &Res) const;
unsigned getSectionType(SectionRef Sec) const;
ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
SymbolRef::Type getSymbolType(DataRefImpl Symb) const override;
uint32_t getSymbolFlags(DataRefImpl Symb) const override;
std::error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const override;
unsigned getSymbolSectionID(SymbolRef Symb) const;
unsigned getSectionID(SectionRef Sec) const;
void moveSectionNext(DataRefImpl &Sec) const override;
std::error_code getSectionName(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAddress(DataRefImpl Sec) const override;
uint64_t getSectionSize(DataRefImpl Sec) const override;
std::error_code getSectionContents(DataRefImpl Sec,
StringRef &Res) const override;
uint64_t getSectionAlignment(DataRefImpl Sec) const override;
bool isSectionText(DataRefImpl Sec) const override;
bool isSectionData(DataRefImpl Sec) const override;
bool isSectionBSS(DataRefImpl Sec) const override;
bool isSectionVirtual(DataRefImpl Sec) const override;
relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
relocation_iterator section_rel_end(DataRefImpl Sec) const override;
void moveRelocationNext(DataRefImpl &Rel) const override;
uint64_t getRelocationOffset(DataRefImpl Rel) const override;
symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
section_iterator getRelocationSection(DataRefImpl Rel) const;
uint64_t getRelocationType(DataRefImpl Rel) const override;
void getRelocationTypeName(DataRefImpl Rel,
SmallVectorImpl<char> &Result) const override;
uint8_t getRelocationLength(DataRefImpl Rel) const;
// MachO specific.
std::error_code getLibraryShortNameByIndex(unsigned Index, StringRef &) const;
section_iterator getRelocationRelocatedSection(relocation_iterator Rel) const;
// TODO: Would be useful to have an iterator based version
// of the load command interface too.
basic_symbol_iterator symbol_begin_impl() const override;
basic_symbol_iterator symbol_end_impl() const override;
// MachO specific.
basic_symbol_iterator getSymbolByIndex(unsigned Index) const;
section_iterator section_begin() const override;
section_iterator section_end() const override;
uint8_t getBytesInAddress() const override;
StringRef getFileFormatName() const override;
unsigned getArch() const override;
Triple getArch(const char **McpuDefault, Triple *ThumbTriple) const;
relocation_iterator section_rel_begin(unsigned Index) const;
relocation_iterator section_rel_end(unsigned Index) const;
dice_iterator begin_dices() const;
dice_iterator end_dices() const;
load_command_iterator begin_load_commands() const;
load_command_iterator end_load_commands() const;
iterator_range<load_command_iterator> load_commands() const;
/// For use iterating over all exported symbols.
iterator_range<export_iterator> exports() const;
/// For use examining a trie not in a MachOObjectFile.
static iterator_range<export_iterator> exports(ArrayRef<uint8_t> Trie);
/// For use iterating over all rebase table entries.
iterator_range<rebase_iterator> rebaseTable() const;
/// For use examining rebase opcodes not in a MachOObjectFile.
static iterator_range<rebase_iterator> rebaseTable(ArrayRef<uint8_t> Opcodes,
bool is64);
/// For use iterating over all bind table entries.
iterator_range<bind_iterator> bindTable() const;
/// For use iterating over all lazy bind table entries.
iterator_range<bind_iterator> lazyBindTable() const;
/// For use iterating over all lazy bind table entries.
iterator_range<bind_iterator> weakBindTable() const;
/// For use examining bind opcodes not in a MachOObjectFile.
static iterator_range<bind_iterator> bindTable(ArrayRef<uint8_t> Opcodes,
bool is64,
MachOBindEntry::Kind);
// In a MachO file, sections have a segment name. This is used in the .o
// files. They have a single segment, but this field specifies which segment
// a section should be put in in the final object.
StringRef getSectionFinalSegmentName(DataRefImpl Sec) const;
// Names are stored as 16 bytes. These returns the raw 16 bytes without
// interpreting them as a C string.
ArrayRef<char> getSectionRawName(DataRefImpl Sec) const;
ArrayRef<char> getSectionRawFinalSegmentName(DataRefImpl Sec) const;
// MachO specific Info about relocations.
bool isRelocationScattered(const MachO::any_relocation_info &RE) const;
unsigned getPlainRelocationSymbolNum(
const MachO::any_relocation_info &RE) const;
bool getPlainRelocationExternal(const MachO::any_relocation_info &RE) const;
bool getScatteredRelocationScattered(
const MachO::any_relocation_info &RE) const;
uint32_t getScatteredRelocationValue(
const MachO::any_relocation_info &RE) const;
uint32_t getScatteredRelocationType(
const MachO::any_relocation_info &RE) const;
unsigned getAnyRelocationAddress(const MachO::any_relocation_info &RE) const;
unsigned getAnyRelocationPCRel(const MachO::any_relocation_info &RE) const;
unsigned getAnyRelocationLength(const MachO::any_relocation_info &RE) const;
unsigned getAnyRelocationType(const MachO::any_relocation_info &RE) const;
SectionRef getAnyRelocationSection(const MachO::any_relocation_info &RE) const;
// MachO specific structures.
MachO::section getSection(DataRefImpl DRI) const;
MachO::section_64 getSection64(DataRefImpl DRI) const;
MachO::section getSection(const LoadCommandInfo &L, unsigned Index) const;
MachO::section_64 getSection64(const LoadCommandInfo &L,unsigned Index) const;
MachO::nlist getSymbolTableEntry(DataRefImpl DRI) const;
MachO::nlist_64 getSymbol64TableEntry(DataRefImpl DRI) const;
MachO::linkedit_data_command
getLinkeditDataLoadCommand(const LoadCommandInfo &L) const;
MachO::segment_command
getSegmentLoadCommand(const LoadCommandInfo &L) const;
MachO::segment_command_64
getSegment64LoadCommand(const LoadCommandInfo &L) const;
MachO::linker_option_command
getLinkerOptionLoadCommand(const LoadCommandInfo &L) const;
MachO::version_min_command
getVersionMinLoadCommand(const LoadCommandInfo &L) const;
MachO::dylib_command
getDylibIDLoadCommand(const LoadCommandInfo &L) const;
MachO::dyld_info_command
getDyldInfoLoadCommand(const LoadCommandInfo &L) const;
MachO::dylinker_command
getDylinkerCommand(const LoadCommandInfo &L) const;
MachO::uuid_command
getUuidCommand(const LoadCommandInfo &L) const;
MachO::rpath_command
getRpathCommand(const LoadCommandInfo &L) const;
MachO::source_version_command
getSourceVersionCommand(const LoadCommandInfo &L) const;
MachO::entry_point_command
getEntryPointCommand(const LoadCommandInfo &L) const;
MachO::encryption_info_command
getEncryptionInfoCommand(const LoadCommandInfo &L) const;
MachO::encryption_info_command_64
getEncryptionInfoCommand64(const LoadCommandInfo &L) const;
MachO::sub_framework_command
getSubFrameworkCommand(const LoadCommandInfo &L) const;
MachO::sub_umbrella_command
getSubUmbrellaCommand(const LoadCommandInfo &L) const;
MachO::sub_library_command
getSubLibraryCommand(const LoadCommandInfo &L) const;
MachO::sub_client_command
getSubClientCommand(const LoadCommandInfo &L) const;
MachO::routines_command
getRoutinesCommand(const LoadCommandInfo &L) const;
MachO::routines_command_64
getRoutinesCommand64(const LoadCommandInfo &L) const;
MachO::thread_command
getThreadCommand(const LoadCommandInfo &L) const;
MachO::any_relocation_info getRelocation(DataRefImpl Rel) const;
MachO::data_in_code_entry getDice(DataRefImpl Rel) const;
const MachO::mach_header &getHeader() const;
const MachO::mach_header_64 &getHeader64() const;
uint32_t
getIndirectSymbolTableEntry(const MachO::dysymtab_command &DLC,
unsigned Index) const;
MachO::data_in_code_entry getDataInCodeTableEntry(uint32_t DataOffset,
unsigned Index) const;
MachO::symtab_command getSymtabLoadCommand() const;
MachO::dysymtab_command getDysymtabLoadCommand() const;
MachO::linkedit_data_command getDataInCodeLoadCommand() const;
MachO::linkedit_data_command getLinkOptHintsLoadCommand() const;
ArrayRef<uint8_t> getDyldInfoRebaseOpcodes() const;
ArrayRef<uint8_t> getDyldInfoBindOpcodes() const;
ArrayRef<uint8_t> getDyldInfoWeakBindOpcodes() const;
ArrayRef<uint8_t> getDyldInfoLazyBindOpcodes() const;
ArrayRef<uint8_t> getDyldInfoExportsTrie() const;
ArrayRef<uint8_t> getUuid() const;
StringRef getStringTableData() const;
bool is64Bit() const;
void ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const;
static StringRef guessLibraryShortName(StringRef Name, bool &isFramework,
StringRef &Suffix);
static Triple::ArchType getArch(uint32_t CPUType);
static Triple getArch(uint32_t CPUType, uint32_t CPUSubType,
const char **McpuDefault = nullptr);
static Triple getThumbArch(uint32_t CPUType, uint32_t CPUSubType,
const char **McpuDefault = nullptr);
static Triple getArch(uint32_t CPUType, uint32_t CPUSubType,
const char **McpuDefault, Triple *ThumbTriple);
static bool isValidArch(StringRef ArchFlag);
static Triple getHostArch();
bool isRelocatableObject() const override;
bool hasPageZeroSegment() const { return HasPageZeroSegment; }
static bool classof(const Binary *v) {
return v->isMachO();
}
private:
uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
union {
MachO::mach_header_64 Header64;
MachO::mach_header Header;
};
typedef SmallVector<const char*, 1> SectionList;
SectionList Sections;
typedef SmallVector<const char*, 1> LibraryList;
LibraryList Libraries;
LoadCommandList LoadCommands;
typedef SmallVector<StringRef, 1> LibraryShortName;
mutable LibraryShortName LibrariesShortNames;
const char *SymtabLoadCmd;
const char *DysymtabLoadCmd;
const char *DataInCodeLoadCmd;
const char *LinkOptHintsLoadCmd;
const char *DyldInfoLoadCmd;
const char *UuidLoadCmd;
bool HasPageZeroSegment;
};
/// DiceRef
inline DiceRef::DiceRef(DataRefImpl DiceP, const ObjectFile *Owner)
: DicePimpl(DiceP) , OwningObject(Owner) {}
inline bool DiceRef::operator==(const DiceRef &Other) const {
return DicePimpl == Other.DicePimpl;
}
inline bool DiceRef::operator<(const DiceRef &Other) const {
return DicePimpl < Other.DicePimpl;
}
inline void DiceRef::moveNext() {
const MachO::data_in_code_entry *P =
reinterpret_cast<const MachO::data_in_code_entry *>(DicePimpl.p);
DicePimpl.p = reinterpret_cast<uintptr_t>(P + 1);
}
// Since a Mach-O data in code reference, a DiceRef, can only be created when
// the OwningObject ObjectFile is a MachOObjectFile a static_cast<> is used for
// the methods that get the values of the fields of the reference.
inline std::error_code DiceRef::getOffset(uint32_t &Result) const {
const MachOObjectFile *MachOOF =
static_cast<const MachOObjectFile *>(OwningObject);
MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
Result = Dice.offset;
return std::error_code();
}
inline std::error_code DiceRef::getLength(uint16_t &Result) const {
const MachOObjectFile *MachOOF =
static_cast<const MachOObjectFile *>(OwningObject);
MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
Result = Dice.length;
return std::error_code();
}
inline std::error_code DiceRef::getKind(uint16_t &Result) const {
const MachOObjectFile *MachOOF =
static_cast<const MachOObjectFile *>(OwningObject);
MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
Result = Dice.kind;
return std::error_code();
}
inline DataRefImpl DiceRef::getRawDataRefImpl() const {
return DicePimpl;
}
inline const ObjectFile *DiceRef::getObjectFile() const {
return OwningObject;
}
}
}
#endif
|
0 | repos/DirectXShaderCompiler/include/llvm | repos/DirectXShaderCompiler/include/llvm/Object/Binary.h | //===- Binary.h - A generic binary file -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the Binary class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_BINARY_H
#define LLVM_OBJECT_BINARY_H
#include "llvm/Object/Error.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
namespace llvm {
class LLVMContext;
class StringRef;
namespace object {
class Binary {
private:
Binary() = delete;
Binary(const Binary &other) = delete;
unsigned int TypeID;
protected:
MemoryBufferRef Data;
Binary(unsigned int Type, MemoryBufferRef Source);
enum {
ID_Archive,
ID_MachOUniversalBinary,
ID_IR, // LLVM IR
// Object and children.
ID_StartObjects,
ID_COFF,
ID_ELF32L, // ELF 32-bit, little endian
ID_ELF32B, // ELF 32-bit, big endian
ID_ELF64L, // ELF 64-bit, little endian
ID_ELF64B, // ELF 64-bit, big endian
ID_MachO32L, // MachO 32-bit, little endian
ID_MachO32B, // MachO 32-bit, big endian
ID_MachO64L, // MachO 64-bit, little endian
ID_MachO64B, // MachO 64-bit, big endian
ID_EndObjects
};
static inline unsigned int getELFType(bool isLE, bool is64Bits) {
if (isLE)
return is64Bits ? ID_ELF64L : ID_ELF32L;
else
return is64Bits ? ID_ELF64B : ID_ELF32B;
}
static unsigned int getMachOType(bool isLE, bool is64Bits) {
if (isLE)
return is64Bits ? ID_MachO64L : ID_MachO32L;
else
return is64Bits ? ID_MachO64B : ID_MachO32B;
}
public:
virtual ~Binary();
StringRef getData() const;
StringRef getFileName() const;
MemoryBufferRef getMemoryBufferRef() const;
// Cast methods.
unsigned int getType() const { return TypeID; }
// Convenience methods
bool isObject() const {
return TypeID > ID_StartObjects && TypeID < ID_EndObjects;
}
bool isSymbolic() const {
return isIR() || isObject();
}
bool isArchive() const {
return TypeID == ID_Archive;
}
bool isMachOUniversalBinary() const {
return TypeID == ID_MachOUniversalBinary;
}
bool isELF() const {
return TypeID >= ID_ELF32L && TypeID <= ID_ELF64B;
}
bool isMachO() const {
return TypeID >= ID_MachO32L && TypeID <= ID_MachO64B;
}
bool isCOFF() const {
return TypeID == ID_COFF;
}
bool isIR() const {
return TypeID == ID_IR;
}
bool isLittleEndian() const {
return !(TypeID == ID_ELF32B || TypeID == ID_ELF64B ||
TypeID == ID_MachO32B || TypeID == ID_MachO64B);
}
};
/// @brief Create a Binary from Source, autodetecting the file type.
///
/// @param Source The data to create the Binary from.
ErrorOr<std::unique_ptr<Binary>> createBinary(MemoryBufferRef Source,
LLVMContext *Context = nullptr);
template <typename T> class OwningBinary {
std::unique_ptr<T> Bin;
std::unique_ptr<MemoryBuffer> Buf;
public:
OwningBinary();
OwningBinary(std::unique_ptr<T> Bin, std::unique_ptr<MemoryBuffer> Buf);
OwningBinary(OwningBinary<T>&& Other);
OwningBinary<T> &operator=(OwningBinary<T> &&Other);
std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>> takeBinary();
T* getBinary();
const T* getBinary() const;
};
template <typename T>
OwningBinary<T>::OwningBinary(std::unique_ptr<T> Bin,
std::unique_ptr<MemoryBuffer> Buf)
: Bin(std::move(Bin)), Buf(std::move(Buf)) {}
template <typename T> OwningBinary<T>::OwningBinary() {}
template <typename T>
OwningBinary<T>::OwningBinary(OwningBinary &&Other)
: Bin(std::move(Other.Bin)), Buf(std::move(Other.Buf)) {}
template <typename T>
OwningBinary<T> &OwningBinary<T>::operator=(OwningBinary &&Other) {
Bin = std::move(Other.Bin);
Buf = std::move(Other.Buf);
return *this;
}
template <typename T>
std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>>
OwningBinary<T>::takeBinary() {
return std::make_pair(std::move(Bin), std::move(Buf));
}
template <typename T> T* OwningBinary<T>::getBinary() {
return Bin.get();
}
template <typename T> const T* OwningBinary<T>::getBinary() const {
return Bin.get();
}
ErrorOr<OwningBinary<Binary>> createBinary(StringRef Path);
}
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.